]> git.proxmox.com Git - mirror_qemu.git/blame - include/exec/memory.h
memory: API to allocate resizeable RAM MR
[mirror_qemu.git] / include / exec / memory.h
CommitLineData
093bc2cd
AK
1/*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#ifndef MEMORY_H
15#define MEMORY_H
16
17#ifndef CONFIG_USER_ONLY
18
1ab4c8ce
JQ
19#define DIRTY_MEMORY_VGA 0
20#define DIRTY_MEMORY_CODE 1
21#define DIRTY_MEMORY_MIGRATION 2
22#define DIRTY_MEMORY_NUM 3 /* num of dirty bits */
23
093bc2cd
AK
24#include <stdint.h>
25#include <stdbool.h>
26#include "qemu-common.h"
022c62cb 27#include "exec/cpu-common.h"
ce927ed9 28#ifndef CONFIG_USER_ONLY
022c62cb 29#include "exec/hwaddr.h"
ce927ed9 30#endif
1de7afc9 31#include "qemu/queue.h"
1de7afc9 32#include "qemu/int128.h"
06866575 33#include "qemu/notify.h"
7f56e740 34#include "qapi/error.h"
b4fefef9 35#include "qom/object.h"
093bc2cd 36
052e87b0
PB
37#define MAX_PHYS_ADDR_SPACE_BITS 62
38#define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
39
b4fefef9
PC
40#define TYPE_MEMORY_REGION "qemu:memory-region"
41#define MEMORY_REGION(obj) \
42 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
43
093bc2cd 44typedef struct MemoryRegionOps MemoryRegionOps;
74901c3b 45typedef struct MemoryRegionMmio MemoryRegionMmio;
093bc2cd 46
74901c3b
AK
47struct MemoryRegionMmio {
48 CPUReadMemoryFunc *read[3];
49 CPUWriteMemoryFunc *write[3];
50};
51
30951157
AK
52typedef struct IOMMUTLBEntry IOMMUTLBEntry;
53
54/* See address_space_translate: bit 0 is read, bit 1 is write. */
55typedef enum {
56 IOMMU_NONE = 0,
57 IOMMU_RO = 1,
58 IOMMU_WO = 2,
59 IOMMU_RW = 3,
60} IOMMUAccessFlags;
61
62struct IOMMUTLBEntry {
63 AddressSpace *target_as;
64 hwaddr iova;
65 hwaddr translated_addr;
66 hwaddr addr_mask; /* 0xfff = 4k translation */
67 IOMMUAccessFlags perm;
68};
69
093bc2cd
AK
70/*
71 * Memory region callbacks
72 */
73struct MemoryRegionOps {
74 /* Read from the memory region. @addr is relative to @mr; @size is
75 * in bytes. */
76 uint64_t (*read)(void *opaque,
a8170e5e 77 hwaddr addr,
093bc2cd
AK
78 unsigned size);
79 /* Write to the memory region. @addr is relative to @mr; @size is
80 * in bytes. */
81 void (*write)(void *opaque,
a8170e5e 82 hwaddr addr,
093bc2cd
AK
83 uint64_t data,
84 unsigned size);
85
86 enum device_endian endianness;
87 /* Guest-visible constraints: */
88 struct {
89 /* If nonzero, specify bounds on access sizes beyond which a machine
90 * check is thrown.
91 */
92 unsigned min_access_size;
93 unsigned max_access_size;
94 /* If true, unaligned accesses are supported. Otherwise unaligned
95 * accesses throw machine checks.
96 */
97 bool unaligned;
897fa7cf
AK
98 /*
99 * If present, and returns #false, the transaction is not accepted
100 * by the device (and results in machine dependent behaviour such
101 * as a machine check exception).
102 */
a8170e5e 103 bool (*accepts)(void *opaque, hwaddr addr,
897fa7cf 104 unsigned size, bool is_write);
093bc2cd
AK
105 } valid;
106 /* Internal implementation constraints: */
107 struct {
108 /* If nonzero, specifies the minimum size implemented. Smaller sizes
109 * will be rounded upwards and a partial result will be returned.
110 */
111 unsigned min_access_size;
112 /* If nonzero, specifies the maximum size implemented. Larger sizes
113 * will be done as a series of accesses with smaller sizes.
114 */
115 unsigned max_access_size;
116 /* If true, unaligned accesses are supported. Otherwise all accesses
117 * are converted to (possibly multiple) naturally aligned accesses.
118 */
edc1ba7a 119 bool unaligned;
093bc2cd 120 } impl;
627a0e90 121
74901c3b
AK
122 /* If .read and .write are not present, old_mmio may be used for
123 * backwards compatibility with old mmio registration
124 */
125 const MemoryRegionMmio old_mmio;
093bc2cd
AK
126};
127
30951157
AK
128typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps;
129
130struct MemoryRegionIOMMUOps {
131 /* Return a TLB entry that contains a given address. */
8d7b8cb9 132 IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write);
30951157
AK
133};
134
093bc2cd 135typedef struct CoalescedMemoryRange CoalescedMemoryRange;
3e9d69e7 136typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
093bc2cd
AK
137
138struct MemoryRegion {
b4fefef9 139 Object parent_obj;
093bc2cd
AK
140 /* All fields are private - violators will be prosecuted */
141 const MemoryRegionOps *ops;
30951157 142 const MemoryRegionIOMMUOps *iommu_ops;
093bc2cd 143 void *opaque;
feca4ac1 144 MemoryRegion *container;
08dafab4 145 Int128 size;
a8170e5e 146 hwaddr addr;
545e92e0 147 void (*destructor)(MemoryRegion *mr);
093bc2cd 148 ram_addr_t ram_addr;
a2b257d6 149 uint64_t align;
b3b00c78 150 bool subpage;
14a3c10a 151 bool terminates;
5f9a5ea1 152 bool romd_mode;
8ea9252a 153 bool ram;
e4dc3f59 154 bool skip_dump;
fb1cd6f9 155 bool readonly; /* For RAM regions */
6bba19ba 156 bool enabled;
75c578dc 157 bool rom_device;
1660e72d 158 bool warning_printed; /* For reservations */
d410515e 159 bool flush_coalesced_mmio;
093bc2cd 160 MemoryRegion *alias;
a8170e5e 161 hwaddr alias_offset;
d33382da 162 int32_t priority;
093bc2cd
AK
163 bool may_overlap;
164 QTAILQ_HEAD(subregions, MemoryRegion) subregions;
165 QTAILQ_ENTRY(MemoryRegion) subregions_link;
166 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
302fa283 167 const char *name;
5a583347 168 uint8_t dirty_log_mask;
3e9d69e7
AK
169 unsigned ioeventfd_nb;
170 MemoryRegionIoeventfd *ioeventfds;
06866575 171 NotifierList iommu_notify;
093bc2cd
AK
172};
173
c2fc83e8
PB
174/**
175 * MemoryListener: callbacks structure for updates to the physical memory map
176 *
177 * Allows a component to adjust to changes in the guest-visible memory map.
178 * Use with memory_listener_register() and memory_listener_unregister().
179 */
180struct MemoryListener {
181 void (*begin)(MemoryListener *listener);
182 void (*commit)(MemoryListener *listener);
183 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
184 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
185 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
186 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
187 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
188 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
189 void (*log_global_start)(MemoryListener *listener);
190 void (*log_global_stop)(MemoryListener *listener);
191 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
192 bool match_data, uint64_t data, EventNotifier *e);
193 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
194 bool match_data, uint64_t data, EventNotifier *e);
195 void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
196 hwaddr addr, hwaddr len);
197 void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
198 hwaddr addr, hwaddr len);
199 /* Lower = earlier (during add), later (during del) */
200 unsigned priority;
201 AddressSpace *address_space_filter;
202 QTAILQ_ENTRY(MemoryListener) link;
203};
204
9ad2bbc1
AK
205/**
206 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
207 */
208struct AddressSpace {
209 /* All fields are private. */
7dca8043 210 char *name;
9ad2bbc1
AK
211 MemoryRegion *root;
212 struct FlatView *current_map;
213 int ioeventfd_nb;
214 struct MemoryRegionIoeventfd *ioeventfds;
ac1970fb 215 struct AddressSpaceDispatch *dispatch;
00752703 216 struct AddressSpaceDispatch *next_dispatch;
89ae337a
PB
217 MemoryListener dispatch_listener;
218
0d673e36 219 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
9ad2bbc1
AK
220};
221
e2177955
AK
222/**
223 * MemoryRegionSection: describes a fragment of a #MemoryRegion
224 *
225 * @mr: the region, or %NULL if empty
7664e80c 226 * @address_space: the address space the region is mapped in
e2177955
AK
227 * @offset_within_region: the beginning of the section, relative to @mr's start
228 * @size: the size of the section; will not exceed @mr's boundaries
229 * @offset_within_address_space: the address of the first byte of the section
230 * relative to the region's address space
7a8499e8 231 * @readonly: writes to this section are ignored
e2177955
AK
232 */
233struct MemoryRegionSection {
234 MemoryRegion *mr;
f6790af6 235 AddressSpace *address_space;
a8170e5e 236 hwaddr offset_within_region;
052e87b0 237 Int128 size;
a8170e5e 238 hwaddr offset_within_address_space;
7a8499e8 239 bool readonly;
e2177955
AK
240};
241
093bc2cd
AK
242/**
243 * memory_region_init: Initialize a memory region
244 *
69ddaf66 245 * The region typically acts as a container for other memory regions. Use
093bc2cd
AK
246 * memory_region_add_subregion() to add subregions.
247 *
248 * @mr: the #MemoryRegion to be initialized
2c9b15ca 249 * @owner: the object that tracks the region's reference count
093bc2cd
AK
250 * @name: used for debugging; not visible to the user or ABI
251 * @size: size of the region; any subregions beyond this size will be clipped
252 */
253void memory_region_init(MemoryRegion *mr,
2c9b15ca 254 struct Object *owner,
093bc2cd
AK
255 const char *name,
256 uint64_t size);
46637be2
PB
257
258/**
259 * memory_region_ref: Add 1 to a memory region's reference count
260 *
261 * Whenever memory regions are accessed outside the BQL, they need to be
262 * preserved against hot-unplug. MemoryRegions actually do not have their
263 * own reference count; they piggyback on a QOM object, their "owner".
264 * This function adds a reference to the owner.
265 *
266 * All MemoryRegions must have an owner if they can disappear, even if the
267 * device they belong to operates exclusively under the BQL. This is because
268 * the region could be returned at any time by memory_region_find, and this
269 * is usually under guest control.
270 *
271 * @mr: the #MemoryRegion
272 */
273void memory_region_ref(MemoryRegion *mr);
274
275/**
276 * memory_region_unref: Remove 1 to a memory region's reference count
277 *
278 * Whenever memory regions are accessed outside the BQL, they need to be
279 * preserved against hot-unplug. MemoryRegions actually do not have their
280 * own reference count; they piggyback on a QOM object, their "owner".
281 * This function removes a reference to the owner and possibly destroys it.
282 *
283 * @mr: the #MemoryRegion
284 */
285void memory_region_unref(MemoryRegion *mr);
286
093bc2cd
AK
287/**
288 * memory_region_init_io: Initialize an I/O memory region.
289 *
69ddaf66 290 * Accesses into the region will cause the callbacks in @ops to be called.
093bc2cd
AK
291 * if @size is nonzero, subregions will be clipped to @size.
292 *
293 * @mr: the #MemoryRegion to be initialized.
2c9b15ca 294 * @owner: the object that tracks the region's reference count
093bc2cd
AK
295 * @ops: a structure containing read and write callbacks to be used when
296 * I/O is performed on the region.
297 * @opaque: passed to to the read and write callbacks of the @ops structure.
298 * @name: used for debugging; not visible to the user or ABI
299 * @size: size of the region.
300 */
301void memory_region_init_io(MemoryRegion *mr,
2c9b15ca 302 struct Object *owner,
093bc2cd
AK
303 const MemoryRegionOps *ops,
304 void *opaque,
305 const char *name,
306 uint64_t size);
307
308/**
309 * memory_region_init_ram: Initialize RAM memory region. Accesses into the
69ddaf66 310 * region will modify memory directly.
093bc2cd
AK
311 *
312 * @mr: the #MemoryRegion to be initialized.
2c9b15ca 313 * @owner: the object that tracks the region's reference count
c5705a77 314 * @name: the name of the region.
093bc2cd 315 * @size: size of the region.
49946538 316 * @errp: pointer to Error*, to store an error if it happens.
093bc2cd
AK
317 */
318void memory_region_init_ram(MemoryRegion *mr,
2c9b15ca 319 struct Object *owner,
093bc2cd 320 const char *name,
49946538
HT
321 uint64_t size,
322 Error **errp);
093bc2cd 323
60786ef3
MT
324/**
325 * memory_region_init_resizeable_ram: Initialize memory region with resizeable
326 * RAM. Accesses into the region will
327 * modify memory directly. Only an initial
328 * portion of this RAM is actually used.
329 * The used size can change across reboots.
330 *
331 * @mr: the #MemoryRegion to be initialized.
332 * @owner: the object that tracks the region's reference count
333 * @name: the name of the region.
334 * @size: used size of the region.
335 * @max_size: max size of the region.
336 * @resized: callback to notify owner about used size change.
337 * @errp: pointer to Error*, to store an error if it happens.
338 */
339void memory_region_init_resizeable_ram(MemoryRegion *mr,
340 struct Object *owner,
341 const char *name,
342 uint64_t size,
343 uint64_t max_size,
344 void (*resized)(const char*,
345 uint64_t length,
346 void *host),
347 Error **errp);
0b183fc8
PB
348#ifdef __linux__
349/**
350 * memory_region_init_ram_from_file: Initialize RAM memory region with a
351 * mmap-ed backend.
352 *
353 * @mr: the #MemoryRegion to be initialized.
354 * @owner: the object that tracks the region's reference count
355 * @name: the name of the region.
356 * @size: size of the region.
dbcb8981 357 * @share: %true if memory must be mmaped with the MAP_SHARED flag
0b183fc8 358 * @path: the path in which to allocate the RAM.
7f56e740 359 * @errp: pointer to Error*, to store an error if it happens.
0b183fc8
PB
360 */
361void memory_region_init_ram_from_file(MemoryRegion *mr,
362 struct Object *owner,
363 const char *name,
364 uint64_t size,
dbcb8981 365 bool share,
7f56e740
PB
366 const char *path,
367 Error **errp);
0b183fc8
PB
368#endif
369
093bc2cd 370/**
1a7e8cae
BZ
371 * memory_region_init_ram_ptr: Initialize RAM memory region from a
372 * user-provided pointer. Accesses into the
373 * region will modify memory directly.
093bc2cd
AK
374 *
375 * @mr: the #MemoryRegion to be initialized.
2c9b15ca 376 * @owner: the object that tracks the region's reference count
c5705a77 377 * @name: the name of the region.
093bc2cd
AK
378 * @size: size of the region.
379 * @ptr: memory to be mapped; must contain at least @size bytes.
380 */
381void memory_region_init_ram_ptr(MemoryRegion *mr,
2c9b15ca 382 struct Object *owner,
093bc2cd
AK
383 const char *name,
384 uint64_t size,
385 void *ptr);
386
387/**
388 * memory_region_init_alias: Initialize a memory region that aliases all or a
389 * part of another memory region.
390 *
391 * @mr: the #MemoryRegion to be initialized.
2c9b15ca 392 * @owner: the object that tracks the region's reference count
093bc2cd
AK
393 * @name: used for debugging; not visible to the user or ABI
394 * @orig: the region to be referenced; @mr will be equivalent to
395 * @orig between @offset and @offset + @size - 1.
396 * @offset: start of the section in @orig to be referenced.
397 * @size: size of the region.
398 */
399void memory_region_init_alias(MemoryRegion *mr,
2c9b15ca 400 struct Object *owner,
093bc2cd
AK
401 const char *name,
402 MemoryRegion *orig,
a8170e5e 403 hwaddr offset,
093bc2cd 404 uint64_t size);
d0a9b5bc
AK
405
406/**
407 * memory_region_init_rom_device: Initialize a ROM memory region. Writes are
408 * handled via callbacks.
409 *
410 * @mr: the #MemoryRegion to be initialized.
2c9b15ca 411 * @owner: the object that tracks the region's reference count
d0a9b5bc 412 * @ops: callbacks for write access handling.
c5705a77 413 * @name: the name of the region.
d0a9b5bc 414 * @size: size of the region.
33e0eb52 415 * @errp: pointer to Error*, to store an error if it happens.
d0a9b5bc
AK
416 */
417void memory_region_init_rom_device(MemoryRegion *mr,
2c9b15ca 418 struct Object *owner,
d0a9b5bc 419 const MemoryRegionOps *ops,
75f5941c 420 void *opaque,
d0a9b5bc 421 const char *name,
33e0eb52
HT
422 uint64_t size,
423 Error **errp);
d0a9b5bc 424
1660e72d
JK
425/**
426 * memory_region_init_reservation: Initialize a memory region that reserves
427 * I/O space.
428 *
429 * A reservation region primariy serves debugging purposes. It claims I/O
430 * space that is not supposed to be handled by QEMU itself. Any access via
431 * the memory API will cause an abort().
432 *
433 * @mr: the #MemoryRegion to be initialized
2c9b15ca 434 * @owner: the object that tracks the region's reference count
1660e72d
JK
435 * @name: used for debugging; not visible to the user or ABI
436 * @size: size of the region.
437 */
438void memory_region_init_reservation(MemoryRegion *mr,
2c9b15ca 439 struct Object *owner,
1660e72d
JK
440 const char *name,
441 uint64_t size);
30951157
AK
442
443/**
444 * memory_region_init_iommu: Initialize a memory region that translates
445 * addresses
446 *
447 * An IOMMU region translates addresses and forwards accesses to a target
448 * memory region.
449 *
450 * @mr: the #MemoryRegion to be initialized
2c9b15ca 451 * @owner: the object that tracks the region's reference count
30951157
AK
452 * @ops: a function that translates addresses into the @target region
453 * @name: used for debugging; not visible to the user or ABI
454 * @size: size of the region.
455 */
456void memory_region_init_iommu(MemoryRegion *mr,
2c9b15ca 457 struct Object *owner,
30951157
AK
458 const MemoryRegionIOMMUOps *ops,
459 const char *name,
460 uint64_t size);
461
803c0816
PB
462/**
463 * memory_region_owner: get a memory region's owner.
464 *
465 * @mr: the memory region being queried.
466 */
467struct Object *memory_region_owner(MemoryRegion *mr);
468
093bc2cd
AK
469/**
470 * memory_region_size: get a memory region's size.
471 *
472 * @mr: the memory region being queried.
473 */
474uint64_t memory_region_size(MemoryRegion *mr);
475
8ea9252a
AK
476/**
477 * memory_region_is_ram: check whether a memory region is random access
478 *
479 * Returns %true is a memory region is random access.
480 *
481 * @mr: the memory region being queried
482 */
483bool memory_region_is_ram(MemoryRegion *mr);
484
e4dc3f59
ND
485/**
486 * memory_region_is_skip_dump: check whether a memory region should not be
487 * dumped
488 *
489 * Returns %true is a memory region should not be dumped(e.g. VFIO BAR MMAP).
490 *
491 * @mr: the memory region being queried
492 */
493bool memory_region_is_skip_dump(MemoryRegion *mr);
494
495/**
496 * memory_region_set_skip_dump: Set skip_dump flag, dump will ignore this memory
497 * region
498 *
499 * @mr: the memory region being queried
500 */
501void memory_region_set_skip_dump(MemoryRegion *mr);
502
fd062573 503/**
5f9a5ea1 504 * memory_region_is_romd: check whether a memory region is in ROMD mode
fd062573 505 *
5f9a5ea1 506 * Returns %true if a memory region is a ROM device and currently set to allow
fd062573
BS
507 * direct reads.
508 *
509 * @mr: the memory region being queried
510 */
511static inline bool memory_region_is_romd(MemoryRegion *mr)
512{
5f9a5ea1 513 return mr->rom_device && mr->romd_mode;
fd062573
BS
514}
515
30951157
AK
516/**
517 * memory_region_is_iommu: check whether a memory region is an iommu
518 *
519 * Returns %true is a memory region is an iommu.
520 *
521 * @mr: the memory region being queried
522 */
523bool memory_region_is_iommu(MemoryRegion *mr);
524
06866575
DG
525/**
526 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
527 *
528 * @mr: the memory region that was changed
529 * @entry: the new entry in the IOMMU translation table. The entry
530 * replaces all old entries for the same virtual I/O address range.
531 * Deleted entries have .@perm == 0.
532 */
533void memory_region_notify_iommu(MemoryRegion *mr,
534 IOMMUTLBEntry entry);
535
536/**
537 * memory_region_register_iommu_notifier: register a notifier for changes to
538 * IOMMU translation entries.
539 *
540 * @mr: the memory region to observe
541 * @n: the notifier to be added; the notifier receives a pointer to an
542 * #IOMMUTLBEntry as the opaque value; the pointer ceases to be
543 * valid on exit from the notifier.
544 */
545void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n);
546
547/**
548 * memory_region_unregister_iommu_notifier: unregister a notifier for
549 * changes to IOMMU translation entries.
550 *
551 * @n: the notifier to be removed.
552 */
553void memory_region_unregister_iommu_notifier(Notifier *n);
554
8991c79b
AK
555/**
556 * memory_region_name: get a memory region's name
557 *
558 * Returns the string that was used to initialize the memory region.
559 *
560 * @mr: the memory region being queried
561 */
5d546d4b 562const char *memory_region_name(const MemoryRegion *mr);
8991c79b 563
55043ba3
AK
564/**
565 * memory_region_is_logging: return whether a memory region is logging writes
566 *
567 * Returns %true if the memory region is logging writes
568 *
569 * @mr: the memory region being queried
570 */
571bool memory_region_is_logging(MemoryRegion *mr);
572
ce7923da
AK
573/**
574 * memory_region_is_rom: check whether a memory region is ROM
575 *
576 * Returns %true is a memory region is read-only memory.
577 *
578 * @mr: the memory region being queried
579 */
580bool memory_region_is_rom(MemoryRegion *mr);
581
a35ba7be
PB
582/**
583 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
584 *
585 * Returns a file descriptor backing a file-based RAM memory region,
586 * or -1 if the region is not a file-based RAM memory region.
587 *
588 * @mr: the RAM or alias memory region being queried.
589 */
590int memory_region_get_fd(MemoryRegion *mr);
591
093bc2cd
AK
592/**
593 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
594 *
595 * Returns a host pointer to a RAM memory region (created with
596 * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
597 * care.
598 *
599 * @mr: the memory region being queried.
600 */
601void *memory_region_get_ram_ptr(MemoryRegion *mr);
602
093bc2cd
AK
603/**
604 * memory_region_set_log: Turn dirty logging on or off for a region.
605 *
606 * Turns dirty logging on or off for a specified client (display, migration).
607 * Only meaningful for RAM regions.
608 *
609 * @mr: the memory region being updated.
610 * @log: whether dirty logging is to be enabled or disabled.
611 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
612 * %DIRTY_MEMORY_VGA.
613 */
614void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
615
616/**
cd7a45c9
BS
617 * memory_region_get_dirty: Check whether a range of bytes is dirty
618 * for a specified client.
093bc2cd 619 *
cd7a45c9 620 * Checks whether a range of bytes has been written to since the last
093bc2cd
AK
621 * call to memory_region_reset_dirty() with the same @client. Dirty logging
622 * must be enabled.
623 *
624 * @mr: the memory region being queried.
625 * @addr: the address (relative to the start of the region) being queried.
cd7a45c9 626 * @size: the size of the range being queried.
093bc2cd
AK
627 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
628 * %DIRTY_MEMORY_VGA.
629 */
a8170e5e
AK
630bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
631 hwaddr size, unsigned client);
093bc2cd
AK
632
633/**
fd4aa979 634 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
093bc2cd 635 *
fd4aa979
BS
636 * Marks a range of bytes as dirty, after it has been dirtied outside
637 * guest code.
093bc2cd 638 *
fd4aa979 639 * @mr: the memory region being dirtied.
093bc2cd 640 * @addr: the address (relative to the start of the region) being dirtied.
fd4aa979 641 * @size: size of the range being dirtied.
093bc2cd 642 */
a8170e5e
AK
643void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
644 hwaddr size);
093bc2cd 645
6c279db8
JQ
646/**
647 * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty
648 * for a specified client. It clears them.
649 *
650 * Checks whether a range of bytes has been written to since the last
651 * call to memory_region_reset_dirty() with the same @client. Dirty logging
652 * must be enabled.
653 *
654 * @mr: the memory region being queried.
655 * @addr: the address (relative to the start of the region) being queried.
656 * @size: the size of the range being queried.
657 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
658 * %DIRTY_MEMORY_VGA.
659 */
660bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
661 hwaddr size, unsigned client);
093bc2cd
AK
662/**
663 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
664 * any external TLBs (e.g. kvm)
665 *
666 * Flushes dirty information from accelerators such as kvm and vhost-net
667 * and makes it available to users of the memory API.
668 *
669 * @mr: the region being flushed.
670 */
671void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
672
673/**
674 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
675 * client.
676 *
677 * Marks a range of pages as no longer dirty.
678 *
679 * @mr: the region being updated.
680 * @addr: the start of the subrange being cleaned.
681 * @size: the size of the subrange being cleaned.
682 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
683 * %DIRTY_MEMORY_VGA.
684 */
a8170e5e
AK
685void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
686 hwaddr size, unsigned client);
093bc2cd
AK
687
688/**
689 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
690 *
691 * Allows a memory region to be marked as read-only (turning it into a ROM).
692 * only useful on RAM regions.
693 *
694 * @mr: the region being updated.
695 * @readonly: whether rhe region is to be ROM or RAM.
696 */
697void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
698
d0a9b5bc 699/**
5f9a5ea1 700 * memory_region_rom_device_set_romd: enable/disable ROMD mode
d0a9b5bc
AK
701 *
702 * Allows a ROM device (initialized with memory_region_init_rom_device() to
5f9a5ea1
JK
703 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
704 * device is mapped to guest memory and satisfies read access directly.
705 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
706 * Writes are always handled by the #MemoryRegion.write function.
d0a9b5bc
AK
707 *
708 * @mr: the memory region to be updated
5f9a5ea1 709 * @romd_mode: %true to put the region into ROMD mode
d0a9b5bc 710 */
5f9a5ea1 711void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
d0a9b5bc 712
093bc2cd
AK
713/**
714 * memory_region_set_coalescing: Enable memory coalescing for the region.
715 *
716 * Enabled writes to a region to be queued for later processing. MMIO ->write
717 * callbacks may be delayed until a non-coalesced MMIO is issued.
718 * Only useful for IO regions. Roughly similar to write-combining hardware.
719 *
720 * @mr: the memory region to be write coalesced
721 */
722void memory_region_set_coalescing(MemoryRegion *mr);
723
724/**
725 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
726 * a region.
727 *
728 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
729 * Multiple calls can be issued coalesced disjoint ranges.
730 *
731 * @mr: the memory region to be updated.
732 * @offset: the start of the range within the region to be coalesced.
733 * @size: the size of the subrange to be coalesced.
734 */
735void memory_region_add_coalescing(MemoryRegion *mr,
a8170e5e 736 hwaddr offset,
093bc2cd
AK
737 uint64_t size);
738
739/**
740 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
741 *
742 * Disables any coalescing caused by memory_region_set_coalescing() or
743 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
744 * hardware.
745 *
746 * @mr: the memory region to be updated.
747 */
748void memory_region_clear_coalescing(MemoryRegion *mr);
749
d410515e
JK
750/**
751 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
752 * accesses.
753 *
754 * Ensure that pending coalesced MMIO request are flushed before the memory
755 * region is accessed. This property is automatically enabled for all regions
756 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
757 *
758 * @mr: the memory region to be updated.
759 */
760void memory_region_set_flush_coalesced(MemoryRegion *mr);
761
762/**
763 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
764 * accesses.
765 *
766 * Clear the automatic coalesced MMIO flushing enabled via
767 * memory_region_set_flush_coalesced. Note that this service has no effect on
768 * memory regions that have MMIO coalescing enabled for themselves. For them,
769 * automatic flushing will stop once coalescing is disabled.
770 *
771 * @mr: the memory region to be updated.
772 */
773void memory_region_clear_flush_coalesced(MemoryRegion *mr);
774
3e9d69e7
AK
775/**
776 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
777 * is written to a location.
778 *
779 * Marks a word in an IO region (initialized with memory_region_init_io())
780 * as a trigger for an eventfd event. The I/O callback will not be called.
69ddaf66 781 * The caller must be prepared to handle failure (that is, take the required
3e9d69e7
AK
782 * action if the callback _is_ called).
783 *
784 * @mr: the memory region being updated.
785 * @addr: the address within @mr that is to be monitored
786 * @size: the size of the access to trigger the eventfd
787 * @match_data: whether to match against @data, instead of just @addr
788 * @data: the data to match against the guest write
789 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
790 **/
791void memory_region_add_eventfd(MemoryRegion *mr,
a8170e5e 792 hwaddr addr,
3e9d69e7
AK
793 unsigned size,
794 bool match_data,
795 uint64_t data,
753d5e14 796 EventNotifier *e);
3e9d69e7
AK
797
798/**
69ddaf66 799 * memory_region_del_eventfd: Cancel an eventfd.
3e9d69e7 800 *
69ddaf66
ASRJ
801 * Cancels an eventfd trigger requested by a previous
802 * memory_region_add_eventfd() call.
3e9d69e7
AK
803 *
804 * @mr: the memory region being updated.
805 * @addr: the address within @mr that is to be monitored
806 * @size: the size of the access to trigger the eventfd
807 * @match_data: whether to match against @data, instead of just @addr
808 * @data: the data to match against the guest write
809 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
810 */
811void memory_region_del_eventfd(MemoryRegion *mr,
a8170e5e 812 hwaddr addr,
3e9d69e7
AK
813 unsigned size,
814 bool match_data,
815 uint64_t data,
753d5e14
PB
816 EventNotifier *e);
817
093bc2cd 818/**
69ddaf66 819 * memory_region_add_subregion: Add a subregion to a container.
093bc2cd 820 *
69ddaf66 821 * Adds a subregion at @offset. The subregion may not overlap with other
093bc2cd
AK
822 * subregions (except for those explicitly marked as overlapping). A region
823 * may only be added once as a subregion (unless removed with
824 * memory_region_del_subregion()); use memory_region_init_alias() if you
825 * want a region to be a subregion in multiple locations.
826 *
827 * @mr: the region to contain the new subregion; must be a container
828 * initialized with memory_region_init().
829 * @offset: the offset relative to @mr where @subregion is added.
830 * @subregion: the subregion to be added.
831 */
832void memory_region_add_subregion(MemoryRegion *mr,
a8170e5e 833 hwaddr offset,
093bc2cd
AK
834 MemoryRegion *subregion);
835/**
1a7e8cae
BZ
836 * memory_region_add_subregion_overlap: Add a subregion to a container
837 * with overlap.
093bc2cd 838 *
69ddaf66 839 * Adds a subregion at @offset. The subregion may overlap with other
093bc2cd
AK
840 * subregions. Conflicts are resolved by having a higher @priority hide a
841 * lower @priority. Subregions without priority are taken as @priority 0.
842 * A region may only be added once as a subregion (unless removed with
843 * memory_region_del_subregion()); use memory_region_init_alias() if you
844 * want a region to be a subregion in multiple locations.
845 *
846 * @mr: the region to contain the new subregion; must be a container
847 * initialized with memory_region_init().
848 * @offset: the offset relative to @mr where @subregion is added.
849 * @subregion: the subregion to be added.
850 * @priority: used for resolving overlaps; highest priority wins.
851 */
852void memory_region_add_subregion_overlap(MemoryRegion *mr,
a8170e5e 853 hwaddr offset,
093bc2cd 854 MemoryRegion *subregion,
a1ff8ae0 855 int priority);
e34911c4
AK
856
857/**
858 * memory_region_get_ram_addr: Get the ram address associated with a memory
859 * region
860 *
dabdf394 861 * DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen
e34911c4
AK
862 * code is being reworked.
863 */
864ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
865
a2b257d6 866uint64_t memory_region_get_alignment(const MemoryRegion *mr);
093bc2cd
AK
867/**
868 * memory_region_del_subregion: Remove a subregion.
869 *
870 * Removes a subregion from its container.
871 *
872 * @mr: the container to be updated.
873 * @subregion: the region being removed; must be a current subregion of @mr.
874 */
875void memory_region_del_subregion(MemoryRegion *mr,
876 MemoryRegion *subregion);
877
6bba19ba
AK
878/*
879 * memory_region_set_enabled: dynamically enable or disable a region
880 *
881 * Enables or disables a memory region. A disabled memory region
882 * ignores all accesses to itself and its subregions. It does not
883 * obscure sibling subregions with lower priority - it simply behaves as
884 * if it was removed from the hierarchy.
885 *
886 * Regions default to being enabled.
887 *
888 * @mr: the region to be updated
889 * @enabled: whether to enable or disable the region
890 */
891void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
892
2282e1af
AK
893/*
894 * memory_region_set_address: dynamically update the address of a region
895 *
feca4ac1 896 * Dynamically updates the address of a region, relative to its container.
2282e1af
AK
897 * May be used on regions are currently part of a memory hierarchy.
898 *
899 * @mr: the region to be updated
feca4ac1 900 * @addr: new address, relative to container region
2282e1af 901 */
a8170e5e 902void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
2282e1af 903
e7af4c67
MT
904/*
905 * memory_region_set_size: dynamically update the size of a region.
906 *
907 * Dynamically updates the size of a region.
908 *
909 * @mr: the region to be updated
910 * @size: used size of the region.
911 */
912void memory_region_set_size(MemoryRegion *mr, uint64_t size);
913
4703359e
AK
914/*
915 * memory_region_set_alias_offset: dynamically update a memory alias's offset
916 *
917 * Dynamically updates the offset into the target region that an alias points
918 * to, as if the fourth argument to memory_region_init_alias() has changed.
919 *
920 * @mr: the #MemoryRegion to be updated; should be an alias.
921 * @offset: the new offset into the target memory region
922 */
923void memory_region_set_alias_offset(MemoryRegion *mr,
a8170e5e 924 hwaddr offset);
4703359e 925
3ce10901 926/**
feca4ac1
PB
927 * memory_region_present: checks if an address relative to a @container
928 * translates into #MemoryRegion within @container
3ce10901 929 *
feca4ac1 930 * Answer whether a #MemoryRegion within @container covers the address
3ce10901
PB
931 * @addr.
932 *
feca4ac1
PB
933 * @container: a #MemoryRegion within which @addr is a relative address
934 * @addr: the area within @container to be searched
3ce10901 935 */
feca4ac1 936bool memory_region_present(MemoryRegion *container, hwaddr addr);
3ce10901 937
eed2bacf
IM
938/**
939 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
940 * into any address space.
941 *
942 * @mr: a #MemoryRegion which should be checked if it's mapped
943 */
944bool memory_region_is_mapped(MemoryRegion *mr);
945
e2177955 946/**
73034e9e
PB
947 * memory_region_find: translate an address/size relative to a
948 * MemoryRegion into a #MemoryRegionSection.
e2177955 949 *
73034e9e
PB
950 * Locates the first #MemoryRegion within @mr that overlaps the range
951 * given by @addr and @size.
e2177955
AK
952 *
953 * Returns a #MemoryRegionSection that describes a contiguous overlap.
954 * It will have the following characteristics:
e2177955
AK
955 * .@size = 0 iff no overlap was found
956 * .@mr is non-%NULL iff an overlap was found
957 *
73034e9e
PB
958 * Remember that in the return value the @offset_within_region is
959 * relative to the returned region (in the .@mr field), not to the
960 * @mr argument.
961 *
962 * Similarly, the .@offset_within_address_space is relative to the
963 * address space that contains both regions, the passed and the
964 * returned one. However, in the special case where the @mr argument
feca4ac1 965 * has no container (and thus is the root of the address space), the
73034e9e
PB
966 * following will hold:
967 * .@offset_within_address_space >= @addr
968 * .@offset_within_address_space + .@size <= @addr + @size
969 *
970 * @mr: a MemoryRegion within which @addr is a relative address
971 * @addr: start of the area within @as to be searched
e2177955
AK
972 * @size: size of the area to be searched
973 */
73034e9e 974MemoryRegionSection memory_region_find(MemoryRegion *mr,
a8170e5e 975 hwaddr addr, uint64_t size);
e2177955 976
86e775c6 977/**
1d671369 978 * address_space_sync_dirty_bitmap: synchronize the dirty log for all memory
86e775c6
AK
979 *
980 * Synchronizes the dirty page log for an entire address space.
1d671369 981 * @as: the address space that contains the memory being synchronized
86e775c6 982 */
1d671369 983void address_space_sync_dirty_bitmap(AddressSpace *as);
86e775c6 984
69ddaf66
ASRJ
985/**
986 * memory_region_transaction_begin: Start a transaction.
987 *
988 * During a transaction, changes will be accumulated and made visible
dabdf394 989 * only when the transaction ends (is committed).
4ef4db86
AK
990 */
991void memory_region_transaction_begin(void);
69ddaf66
ASRJ
992
993/**
994 * memory_region_transaction_commit: Commit a transaction and make changes
995 * visible to the guest.
4ef4db86
AK
996 */
997void memory_region_transaction_commit(void);
998
7664e80c
AK
999/**
1000 * memory_listener_register: register callbacks to be called when memory
1001 * sections are mapped or unmapped into an address
1002 * space
1003 *
1004 * @listener: an object containing the callbacks to be called
7376e582 1005 * @filter: if non-%NULL, only regions in this address space will be observed
7664e80c 1006 */
f6790af6 1007void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
7664e80c
AK
1008
1009/**
1010 * memory_listener_unregister: undo the effect of memory_listener_register()
1011 *
1012 * @listener: an object containing the callbacks to be removed
1013 */
1014void memory_listener_unregister(MemoryListener *listener);
1015
1016/**
1017 * memory_global_dirty_log_start: begin dirty logging for all regions
1018 */
1019void memory_global_dirty_log_start(void);
1020
1021/**
1a7e8cae 1022 * memory_global_dirty_log_stop: end dirty logging for all regions
7664e80c
AK
1023 */
1024void memory_global_dirty_log_stop(void);
1025
314e2987
BS
1026void mtree_info(fprintf_function mon_printf, void *f);
1027
9ad2bbc1
AK
1028/**
1029 * address_space_init: initializes an address space
1030 *
1031 * @as: an uninitialized #AddressSpace
1032 * @root: a #MemoryRegion that routes addesses for the address space
7dca8043
AK
1033 * @name: an address space name. The name is only used for debugging
1034 * output.
9ad2bbc1 1035 */
7dca8043 1036void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
9ad2bbc1 1037
83f3c251
AK
1038
1039/**
1040 * address_space_destroy: destroy an address space
1041 *
1042 * Releases all resources associated with an address space. After an address space
1043 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
1044 * as well.
1045 *
1046 * @as: address space to be destroyed
1047 */
1048void address_space_destroy(AddressSpace *as);
1049
ac1970fb
AK
1050/**
1051 * address_space_rw: read from or write to an address space.
1052 *
30951157
AK
1053 * Return true if the operation hit any unassigned memory or encountered an
1054 * IOMMU fault.
fd8aaa76 1055 *
ac1970fb
AK
1056 * @as: #AddressSpace to be accessed
1057 * @addr: address within that address space
1058 * @buf: buffer with the data transferred
1059 * @is_write: indicates the transfer direction
1060 */
fd8aaa76 1061bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb
AK
1062 int len, bool is_write);
1063
1064/**
1065 * address_space_write: write to address space.
1066 *
30951157
AK
1067 * Return true if the operation hit any unassigned memory or encountered an
1068 * IOMMU fault.
fd8aaa76 1069 *
ac1970fb
AK
1070 * @as: #AddressSpace to be accessed
1071 * @addr: address within that address space
1072 * @buf: buffer with the data transferred
1073 */
fd8aaa76 1074bool address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
1075 const uint8_t *buf, int len);
1076
1077/**
1078 * address_space_read: read from an address space.
1079 *
30951157
AK
1080 * Return true if the operation hit any unassigned memory or encountered an
1081 * IOMMU fault.
fd8aaa76 1082 *
ac1970fb
AK
1083 * @as: #AddressSpace to be accessed
1084 * @addr: address within that address space
1085 * @buf: buffer with the data transferred
1086 */
fd8aaa76 1087bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len);
ac1970fb 1088
149f54b5 1089/* address_space_translate: translate an address range into an address space
5c8a00ce 1090 * into a MemoryRegion and an address range into that section
149f54b5
PB
1091 *
1092 * @as: #AddressSpace to be accessed
1093 * @addr: address within that address space
1094 * @xlat: pointer to address within the returned memory region section's
1095 * #MemoryRegion.
1096 * @len: pointer to length
1097 * @is_write: indicates the transfer direction
1098 */
5c8a00ce
PB
1099MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
1100 hwaddr *xlat, hwaddr *len,
1101 bool is_write);
149f54b5 1102
51644ab7
PB
1103/* address_space_access_valid: check for validity of accessing an address
1104 * space range
1105 *
30951157
AK
1106 * Check whether memory is assigned to the given address space range, and
1107 * access is permitted by any IOMMU regions that are active for the address
1108 * space.
51644ab7
PB
1109 *
1110 * For now, addr and len should be aligned to a page size. This limitation
1111 * will be lifted in the future.
1112 *
1113 * @as: #AddressSpace to be accessed
1114 * @addr: address within that address space
1115 * @len: length of the area to be checked
1116 * @is_write: indicates the transfer direction
1117 */
1118bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write);
1119
ac1970fb
AK
1120/* address_space_map: map a physical memory region into a host virtual address
1121 *
1122 * May map a subset of the requested range, given by and returned in @plen.
1123 * May return %NULL if resources needed to perform the mapping are exhausted.
1124 * Use only for reads OR writes - not for read-modify-write operations.
1125 * Use cpu_register_map_client() to know when retrying the map operation is
1126 * likely to succeed.
1127 *
1128 * @as: #AddressSpace to be accessed
1129 * @addr: address within that address space
1130 * @plen: pointer to length of buffer; updated on return
1131 * @is_write: indicates the transfer direction
1132 */
a8170e5e
AK
1133void *address_space_map(AddressSpace *as, hwaddr addr,
1134 hwaddr *plen, bool is_write);
ac1970fb
AK
1135
1136/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
1137 *
1138 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
1139 * the amount of memory that was actually read or written by the caller.
1140 *
1141 * @as: #AddressSpace used
1142 * @addr: address within that address space
1143 * @len: buffer length as returned by address_space_map()
1144 * @access_len: amount of data actually transferred
1145 * @is_write: indicates the transfer direction
1146 */
a8170e5e
AK
1147void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
1148 int is_write, hwaddr access_len);
ac1970fb
AK
1149
1150
093bc2cd
AK
1151#endif
1152
1153#endif