]> git.proxmox.com Git - qemu.git/blame - memory.h
vfio-pci: Update slow path INTx algorithm
[qemu.git] / memory.h
CommitLineData
093bc2cd
AK
1/*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#ifndef MEMORY_H
15#define MEMORY_H
16
17#ifndef CONFIG_USER_ONLY
18
19#include <stdint.h>
20#include <stdbool.h>
21#include "qemu-common.h"
22#include "cpu-common.h"
23#include "targphys.h"
24#include "qemu-queue.h"
658b2224 25#include "iorange.h"
627a0e90 26#include "ioport.h"
08dafab4 27#include "int128.h"
093bc2cd
AK
28
29typedef struct MemoryRegionOps MemoryRegionOps;
30typedef struct MemoryRegion MemoryRegion;
627a0e90 31typedef struct MemoryRegionPortio MemoryRegionPortio;
74901c3b 32typedef struct MemoryRegionMmio MemoryRegionMmio;
093bc2cd
AK
33
34/* Must match *_DIRTY_FLAGS in cpu-all.h. To be replaced with dynamic
35 * registration.
36 */
37#define DIRTY_MEMORY_VGA 0
38#define DIRTY_MEMORY_CODE 1
39#define DIRTY_MEMORY_MIGRATION 3
40
74901c3b
AK
41struct MemoryRegionMmio {
42 CPUReadMemoryFunc *read[3];
43 CPUWriteMemoryFunc *write[3];
44};
45
a2d33521
AK
46/* Internal use; thunks between old-style IORange and MemoryRegions. */
47typedef struct MemoryRegionIORange MemoryRegionIORange;
48struct MemoryRegionIORange {
49 IORange iorange;
50 MemoryRegion *mr;
51 target_phys_addr_t offset;
52};
53
093bc2cd
AK
54/*
55 * Memory region callbacks
56 */
57struct MemoryRegionOps {
58 /* Read from the memory region. @addr is relative to @mr; @size is
59 * in bytes. */
60 uint64_t (*read)(void *opaque,
61 target_phys_addr_t addr,
62 unsigned size);
63 /* Write to the memory region. @addr is relative to @mr; @size is
64 * in bytes. */
65 void (*write)(void *opaque,
66 target_phys_addr_t addr,
67 uint64_t data,
68 unsigned size);
69
70 enum device_endian endianness;
71 /* Guest-visible constraints: */
72 struct {
73 /* If nonzero, specify bounds on access sizes beyond which a machine
74 * check is thrown.
75 */
76 unsigned min_access_size;
77 unsigned max_access_size;
78 /* If true, unaligned accesses are supported. Otherwise unaligned
79 * accesses throw machine checks.
80 */
81 bool unaligned;
897fa7cf
AK
82 /*
83 * If present, and returns #false, the transaction is not accepted
84 * by the device (and results in machine dependent behaviour such
85 * as a machine check exception).
86 */
87 bool (*accepts)(void *opaque, target_phys_addr_t addr,
88 unsigned size, bool is_write);
093bc2cd
AK
89 } valid;
90 /* Internal implementation constraints: */
91 struct {
92 /* If nonzero, specifies the minimum size implemented. Smaller sizes
93 * will be rounded upwards and a partial result will be returned.
94 */
95 unsigned min_access_size;
96 /* If nonzero, specifies the maximum size implemented. Larger sizes
97 * will be done as a series of accesses with smaller sizes.
98 */
99 unsigned max_access_size;
100 /* If true, unaligned accesses are supported. Otherwise all accesses
101 * are converted to (possibly multiple) naturally aligned accesses.
102 */
103 bool unaligned;
104 } impl;
627a0e90
AK
105
106 /* If .read and .write are not present, old_portio may be used for
107 * backwards compatibility with old portio registration
108 */
109 const MemoryRegionPortio *old_portio;
74901c3b
AK
110 /* If .read and .write are not present, old_mmio may be used for
111 * backwards compatibility with old mmio registration
112 */
113 const MemoryRegionMmio old_mmio;
093bc2cd
AK
114};
115
116typedef struct CoalescedMemoryRange CoalescedMemoryRange;
3e9d69e7 117typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
093bc2cd
AK
118
119struct MemoryRegion {
120 /* All fields are private - violators will be prosecuted */
121 const MemoryRegionOps *ops;
122 void *opaque;
123 MemoryRegion *parent;
08dafab4 124 Int128 size;
093bc2cd 125 target_phys_addr_t addr;
545e92e0 126 void (*destructor)(MemoryRegion *mr);
093bc2cd 127 ram_addr_t ram_addr;
b3b00c78 128 bool subpage;
14a3c10a 129 bool terminates;
d0a9b5bc 130 bool readable;
8ea9252a 131 bool ram;
fb1cd6f9 132 bool readonly; /* For RAM regions */
6bba19ba 133 bool enabled;
75c578dc 134 bool rom_device;
1660e72d 135 bool warning_printed; /* For reservations */
d410515e 136 bool flush_coalesced_mmio;
093bc2cd
AK
137 MemoryRegion *alias;
138 target_phys_addr_t alias_offset;
139 unsigned priority;
140 bool may_overlap;
141 QTAILQ_HEAD(subregions, MemoryRegion) subregions;
142 QTAILQ_ENTRY(MemoryRegion) subregions_link;
143 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
144 const char *name;
5a583347 145 uint8_t dirty_log_mask;
3e9d69e7
AK
146 unsigned ioeventfd_nb;
147 MemoryRegionIoeventfd *ioeventfds;
093bc2cd
AK
148};
149
627a0e90
AK
150struct MemoryRegionPortio {
151 uint32_t offset;
152 uint32_t len;
153 unsigned size;
154 IOPortReadFunc *read;
155 IOPortWriteFunc *write;
156};
157
2dd30228 158#define PORTIO_END_OF_LIST() { }
627a0e90 159
e2177955
AK
160typedef struct MemoryRegionSection MemoryRegionSection;
161
162/**
163 * MemoryRegionSection: describes a fragment of a #MemoryRegion
164 *
165 * @mr: the region, or %NULL if empty
7664e80c 166 * @address_space: the address space the region is mapped in
e2177955
AK
167 * @offset_within_region: the beginning of the section, relative to @mr's start
168 * @size: the size of the section; will not exceed @mr's boundaries
169 * @offset_within_address_space: the address of the first byte of the section
170 * relative to the region's address space
7a8499e8 171 * @readonly: writes to this section are ignored
e2177955
AK
172 */
173struct MemoryRegionSection {
174 MemoryRegion *mr;
7664e80c 175 MemoryRegion *address_space;
e2177955
AK
176 target_phys_addr_t offset_within_region;
177 uint64_t size;
178 target_phys_addr_t offset_within_address_space;
7a8499e8 179 bool readonly;
e2177955
AK
180};
181
7664e80c
AK
182typedef struct MemoryListener MemoryListener;
183
184/**
185 * MemoryListener: callbacks structure for updates to the physical memory map
186 *
187 * Allows a component to adjust to changes in the guest-visible memory map.
188 * Use with memory_listener_register() and memory_listener_unregister().
189 */
190struct MemoryListener {
50c1e149
AK
191 void (*begin)(MemoryListener *listener);
192 void (*commit)(MemoryListener *listener);
7664e80c
AK
193 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
194 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
50c1e149 195 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
7664e80c
AK
196 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
197 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
198 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
199 void (*log_global_start)(MemoryListener *listener);
200 void (*log_global_stop)(MemoryListener *listener);
80a1ea37 201 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
753d5e14 202 bool match_data, uint64_t data, EventNotifier *e);
80a1ea37 203 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
753d5e14 204 bool match_data, uint64_t data, EventNotifier *e);
72e22d2f
AK
205 /* Lower = earlier (during add), later (during del) */
206 unsigned priority;
7376e582 207 MemoryRegion *address_space_filter;
72e22d2f 208 QTAILQ_ENTRY(MemoryListener) link;
7664e80c
AK
209};
210
093bc2cd
AK
211/**
212 * memory_region_init: Initialize a memory region
213 *
69ddaf66 214 * The region typically acts as a container for other memory regions. Use
093bc2cd
AK
215 * memory_region_add_subregion() to add subregions.
216 *
217 * @mr: the #MemoryRegion to be initialized
218 * @name: used for debugging; not visible to the user or ABI
219 * @size: size of the region; any subregions beyond this size will be clipped
220 */
221void memory_region_init(MemoryRegion *mr,
222 const char *name,
223 uint64_t size);
224/**
225 * memory_region_init_io: Initialize an I/O memory region.
226 *
69ddaf66 227 * Accesses into the region will cause the callbacks in @ops to be called.
093bc2cd
AK
228 * if @size is nonzero, subregions will be clipped to @size.
229 *
230 * @mr: the #MemoryRegion to be initialized.
231 * @ops: a structure containing read and write callbacks to be used when
232 * I/O is performed on the region.
233 * @opaque: passed to to the read and write callbacks of the @ops structure.
234 * @name: used for debugging; not visible to the user or ABI
235 * @size: size of the region.
236 */
237void memory_region_init_io(MemoryRegion *mr,
238 const MemoryRegionOps *ops,
239 void *opaque,
240 const char *name,
241 uint64_t size);
242
243/**
244 * memory_region_init_ram: Initialize RAM memory region. Accesses into the
69ddaf66 245 * region will modify memory directly.
093bc2cd
AK
246 *
247 * @mr: the #MemoryRegion to be initialized.
c5705a77 248 * @name: the name of the region.
093bc2cd
AK
249 * @size: size of the region.
250 */
251void memory_region_init_ram(MemoryRegion *mr,
093bc2cd
AK
252 const char *name,
253 uint64_t size);
254
255/**
1a7e8cae
BZ
256 * memory_region_init_ram_ptr: Initialize RAM memory region from a
257 * user-provided pointer. Accesses into the
258 * region will modify memory directly.
093bc2cd
AK
259 *
260 * @mr: the #MemoryRegion to be initialized.
c5705a77 261 * @name: the name of the region.
093bc2cd
AK
262 * @size: size of the region.
263 * @ptr: memory to be mapped; must contain at least @size bytes.
264 */
265void memory_region_init_ram_ptr(MemoryRegion *mr,
093bc2cd
AK
266 const char *name,
267 uint64_t size,
268 void *ptr);
269
270/**
271 * memory_region_init_alias: Initialize a memory region that aliases all or a
272 * part of another memory region.
273 *
274 * @mr: the #MemoryRegion to be initialized.
275 * @name: used for debugging; not visible to the user or ABI
276 * @orig: the region to be referenced; @mr will be equivalent to
277 * @orig between @offset and @offset + @size - 1.
278 * @offset: start of the section in @orig to be referenced.
279 * @size: size of the region.
280 */
281void memory_region_init_alias(MemoryRegion *mr,
282 const char *name,
283 MemoryRegion *orig,
284 target_phys_addr_t offset,
285 uint64_t size);
d0a9b5bc
AK
286
287/**
288 * memory_region_init_rom_device: Initialize a ROM memory region. Writes are
289 * handled via callbacks.
290 *
291 * @mr: the #MemoryRegion to be initialized.
292 * @ops: callbacks for write access handling.
c5705a77 293 * @name: the name of the region.
d0a9b5bc
AK
294 * @size: size of the region.
295 */
296void memory_region_init_rom_device(MemoryRegion *mr,
297 const MemoryRegionOps *ops,
75f5941c 298 void *opaque,
d0a9b5bc
AK
299 const char *name,
300 uint64_t size);
301
1660e72d
JK
302/**
303 * memory_region_init_reservation: Initialize a memory region that reserves
304 * I/O space.
305 *
306 * A reservation region primariy serves debugging purposes. It claims I/O
307 * space that is not supposed to be handled by QEMU itself. Any access via
308 * the memory API will cause an abort().
309 *
310 * @mr: the #MemoryRegion to be initialized
311 * @name: used for debugging; not visible to the user or ABI
312 * @size: size of the region.
313 */
314void memory_region_init_reservation(MemoryRegion *mr,
315 const char *name,
316 uint64_t size);
093bc2cd 317/**
69ddaf66 318 * memory_region_destroy: Destroy a memory region and reclaim all resources.
093bc2cd
AK
319 *
320 * @mr: the region to be destroyed. May not currently be a subregion
321 * (see memory_region_add_subregion()) or referenced in an alias
322 * (see memory_region_init_alias()).
323 */
324void memory_region_destroy(MemoryRegion *mr);
325
326/**
327 * memory_region_size: get a memory region's size.
328 *
329 * @mr: the memory region being queried.
330 */
331uint64_t memory_region_size(MemoryRegion *mr);
332
8ea9252a
AK
333/**
334 * memory_region_is_ram: check whether a memory region is random access
335 *
336 * Returns %true is a memory region is random access.
337 *
338 * @mr: the memory region being queried
339 */
340bool memory_region_is_ram(MemoryRegion *mr);
341
fd062573
BS
342/**
343 * memory_region_is_romd: check whether a memory region is ROMD
344 *
345 * Returns %true is a memory region is ROMD and currently set to allow
346 * direct reads.
347 *
348 * @mr: the memory region being queried
349 */
350static inline bool memory_region_is_romd(MemoryRegion *mr)
351{
352 return mr->rom_device && mr->readable;
353}
354
8991c79b
AK
355/**
356 * memory_region_name: get a memory region's name
357 *
358 * Returns the string that was used to initialize the memory region.
359 *
360 * @mr: the memory region being queried
361 */
362const char *memory_region_name(MemoryRegion *mr);
363
55043ba3
AK
364/**
365 * memory_region_is_logging: return whether a memory region is logging writes
366 *
367 * Returns %true if the memory region is logging writes
368 *
369 * @mr: the memory region being queried
370 */
371bool memory_region_is_logging(MemoryRegion *mr);
372
ce7923da
AK
373/**
374 * memory_region_is_rom: check whether a memory region is ROM
375 *
376 * Returns %true is a memory region is read-only memory.
377 *
378 * @mr: the memory region being queried
379 */
380bool memory_region_is_rom(MemoryRegion *mr);
381
093bc2cd
AK
382/**
383 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
384 *
385 * Returns a host pointer to a RAM memory region (created with
386 * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
387 * care.
388 *
389 * @mr: the memory region being queried.
390 */
391void *memory_region_get_ram_ptr(MemoryRegion *mr);
392
093bc2cd
AK
393/**
394 * memory_region_set_log: Turn dirty logging on or off for a region.
395 *
396 * Turns dirty logging on or off for a specified client (display, migration).
397 * Only meaningful for RAM regions.
398 *
399 * @mr: the memory region being updated.
400 * @log: whether dirty logging is to be enabled or disabled.
401 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
402 * %DIRTY_MEMORY_VGA.
403 */
404void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
405
406/**
cd7a45c9
BS
407 * memory_region_get_dirty: Check whether a range of bytes is dirty
408 * for a specified client.
093bc2cd 409 *
cd7a45c9 410 * Checks whether a range of bytes has been written to since the last
093bc2cd
AK
411 * call to memory_region_reset_dirty() with the same @client. Dirty logging
412 * must be enabled.
413 *
414 * @mr: the memory region being queried.
415 * @addr: the address (relative to the start of the region) being queried.
cd7a45c9 416 * @size: the size of the range being queried.
093bc2cd
AK
417 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
418 * %DIRTY_MEMORY_VGA.
419 */
420bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
cd7a45c9 421 target_phys_addr_t size, unsigned client);
093bc2cd
AK
422
423/**
fd4aa979 424 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
093bc2cd 425 *
fd4aa979
BS
426 * Marks a range of bytes as dirty, after it has been dirtied outside
427 * guest code.
093bc2cd 428 *
fd4aa979 429 * @mr: the memory region being dirtied.
093bc2cd 430 * @addr: the address (relative to the start of the region) being dirtied.
fd4aa979 431 * @size: size of the range being dirtied.
093bc2cd 432 */
fd4aa979
BS
433void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr,
434 target_phys_addr_t size);
093bc2cd
AK
435
436/**
437 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
438 * any external TLBs (e.g. kvm)
439 *
440 * Flushes dirty information from accelerators such as kvm and vhost-net
441 * and makes it available to users of the memory API.
442 *
443 * @mr: the region being flushed.
444 */
445void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
446
447/**
448 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
449 * client.
450 *
451 * Marks a range of pages as no longer dirty.
452 *
453 * @mr: the region being updated.
454 * @addr: the start of the subrange being cleaned.
455 * @size: the size of the subrange being cleaned.
456 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
457 * %DIRTY_MEMORY_VGA.
458 */
459void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
460 target_phys_addr_t size, unsigned client);
461
462/**
463 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
464 *
465 * Allows a memory region to be marked as read-only (turning it into a ROM).
466 * only useful on RAM regions.
467 *
468 * @mr: the region being updated.
469 * @readonly: whether rhe region is to be ROM or RAM.
470 */
471void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
472
d0a9b5bc
AK
473/**
474 * memory_region_rom_device_set_readable: enable/disable ROM readability
475 *
476 * Allows a ROM device (initialized with memory_region_init_rom_device() to
477 * to be marked as readable (default) or not readable. When it is readable,
478 * the device is mapped to guest memory. When not readable, reads are
479 * forwarded to the #MemoryRegion.read function.
480 *
481 * @mr: the memory region to be updated
482 * @readable: whether reads are satisified directly (%true) or via callbacks
483 * (%false)
484 */
485void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable);
486
093bc2cd
AK
487/**
488 * memory_region_set_coalescing: Enable memory coalescing for the region.
489 *
490 * Enabled writes to a region to be queued for later processing. MMIO ->write
491 * callbacks may be delayed until a non-coalesced MMIO is issued.
492 * Only useful for IO regions. Roughly similar to write-combining hardware.
493 *
494 * @mr: the memory region to be write coalesced
495 */
496void memory_region_set_coalescing(MemoryRegion *mr);
497
498/**
499 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
500 * a region.
501 *
502 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
503 * Multiple calls can be issued coalesced disjoint ranges.
504 *
505 * @mr: the memory region to be updated.
506 * @offset: the start of the range within the region to be coalesced.
507 * @size: the size of the subrange to be coalesced.
508 */
509void memory_region_add_coalescing(MemoryRegion *mr,
510 target_phys_addr_t offset,
511 uint64_t size);
512
513/**
514 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
515 *
516 * Disables any coalescing caused by memory_region_set_coalescing() or
517 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
518 * hardware.
519 *
520 * @mr: the memory region to be updated.
521 */
522void memory_region_clear_coalescing(MemoryRegion *mr);
523
d410515e
JK
524/**
525 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
526 * accesses.
527 *
528 * Ensure that pending coalesced MMIO request are flushed before the memory
529 * region is accessed. This property is automatically enabled for all regions
530 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
531 *
532 * @mr: the memory region to be updated.
533 */
534void memory_region_set_flush_coalesced(MemoryRegion *mr);
535
536/**
537 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
538 * accesses.
539 *
540 * Clear the automatic coalesced MMIO flushing enabled via
541 * memory_region_set_flush_coalesced. Note that this service has no effect on
542 * memory regions that have MMIO coalescing enabled for themselves. For them,
543 * automatic flushing will stop once coalescing is disabled.
544 *
545 * @mr: the memory region to be updated.
546 */
547void memory_region_clear_flush_coalesced(MemoryRegion *mr);
548
3e9d69e7
AK
549/**
550 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
551 * is written to a location.
552 *
553 * Marks a word in an IO region (initialized with memory_region_init_io())
554 * as a trigger for an eventfd event. The I/O callback will not be called.
69ddaf66 555 * The caller must be prepared to handle failure (that is, take the required
3e9d69e7
AK
556 * action if the callback _is_ called).
557 *
558 * @mr: the memory region being updated.
559 * @addr: the address within @mr that is to be monitored
560 * @size: the size of the access to trigger the eventfd
561 * @match_data: whether to match against @data, instead of just @addr
562 * @data: the data to match against the guest write
563 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
564 **/
565void memory_region_add_eventfd(MemoryRegion *mr,
566 target_phys_addr_t addr,
567 unsigned size,
568 bool match_data,
569 uint64_t data,
753d5e14 570 EventNotifier *e);
3e9d69e7
AK
571
572/**
69ddaf66 573 * memory_region_del_eventfd: Cancel an eventfd.
3e9d69e7 574 *
69ddaf66
ASRJ
575 * Cancels an eventfd trigger requested by a previous
576 * memory_region_add_eventfd() call.
3e9d69e7
AK
577 *
578 * @mr: the memory region being updated.
579 * @addr: the address within @mr that is to be monitored
580 * @size: the size of the access to trigger the eventfd
581 * @match_data: whether to match against @data, instead of just @addr
582 * @data: the data to match against the guest write
583 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
584 */
585void memory_region_del_eventfd(MemoryRegion *mr,
586 target_phys_addr_t addr,
587 unsigned size,
588 bool match_data,
589 uint64_t data,
753d5e14
PB
590 EventNotifier *e);
591
093bc2cd 592/**
69ddaf66 593 * memory_region_add_subregion: Add a subregion to a container.
093bc2cd 594 *
69ddaf66 595 * Adds a subregion at @offset. The subregion may not overlap with other
093bc2cd
AK
596 * subregions (except for those explicitly marked as overlapping). A region
597 * may only be added once as a subregion (unless removed with
598 * memory_region_del_subregion()); use memory_region_init_alias() if you
599 * want a region to be a subregion in multiple locations.
600 *
601 * @mr: the region to contain the new subregion; must be a container
602 * initialized with memory_region_init().
603 * @offset: the offset relative to @mr where @subregion is added.
604 * @subregion: the subregion to be added.
605 */
606void memory_region_add_subregion(MemoryRegion *mr,
607 target_phys_addr_t offset,
608 MemoryRegion *subregion);
609/**
1a7e8cae
BZ
610 * memory_region_add_subregion_overlap: Add a subregion to a container
611 * with overlap.
093bc2cd 612 *
69ddaf66 613 * Adds a subregion at @offset. The subregion may overlap with other
093bc2cd
AK
614 * subregions. Conflicts are resolved by having a higher @priority hide a
615 * lower @priority. Subregions without priority are taken as @priority 0.
616 * A region may only be added once as a subregion (unless removed with
617 * memory_region_del_subregion()); use memory_region_init_alias() if you
618 * want a region to be a subregion in multiple locations.
619 *
620 * @mr: the region to contain the new subregion; must be a container
621 * initialized with memory_region_init().
622 * @offset: the offset relative to @mr where @subregion is added.
623 * @subregion: the subregion to be added.
624 * @priority: used for resolving overlaps; highest priority wins.
625 */
626void memory_region_add_subregion_overlap(MemoryRegion *mr,
627 target_phys_addr_t offset,
628 MemoryRegion *subregion,
629 unsigned priority);
e34911c4
AK
630
631/**
632 * memory_region_get_ram_addr: Get the ram address associated with a memory
633 * region
634 *
dabdf394 635 * DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen
e34911c4
AK
636 * code is being reworked.
637 */
638ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
639
093bc2cd
AK
640/**
641 * memory_region_del_subregion: Remove a subregion.
642 *
643 * Removes a subregion from its container.
644 *
645 * @mr: the container to be updated.
646 * @subregion: the region being removed; must be a current subregion of @mr.
647 */
648void memory_region_del_subregion(MemoryRegion *mr,
649 MemoryRegion *subregion);
650
6bba19ba
AK
651/*
652 * memory_region_set_enabled: dynamically enable or disable a region
653 *
654 * Enables or disables a memory region. A disabled memory region
655 * ignores all accesses to itself and its subregions. It does not
656 * obscure sibling subregions with lower priority - it simply behaves as
657 * if it was removed from the hierarchy.
658 *
659 * Regions default to being enabled.
660 *
661 * @mr: the region to be updated
662 * @enabled: whether to enable or disable the region
663 */
664void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
665
2282e1af
AK
666/*
667 * memory_region_set_address: dynamically update the address of a region
668 *
669 * Dynamically updates the address of a region, relative to its parent.
670 * May be used on regions are currently part of a memory hierarchy.
671 *
672 * @mr: the region to be updated
673 * @addr: new address, relative to parent region
674 */
675void memory_region_set_address(MemoryRegion *mr, target_phys_addr_t addr);
676
4703359e
AK
677/*
678 * memory_region_set_alias_offset: dynamically update a memory alias's offset
679 *
680 * Dynamically updates the offset into the target region that an alias points
681 * to, as if the fourth argument to memory_region_init_alias() has changed.
682 *
683 * @mr: the #MemoryRegion to be updated; should be an alias.
684 * @offset: the new offset into the target memory region
685 */
686void memory_region_set_alias_offset(MemoryRegion *mr,
687 target_phys_addr_t offset);
688
e2177955
AK
689/**
690 * memory_region_find: locate a MemoryRegion in an address space
691 *
692 * Locates the first #MemoryRegion within an address space given by
693 * @address_space that overlaps the range given by @addr and @size.
694 *
695 * Returns a #MemoryRegionSection that describes a contiguous overlap.
696 * It will have the following characteristics:
697 * .@offset_within_address_space >= @addr
698 * .@offset_within_address_space + .@size <= @addr + @size
699 * .@size = 0 iff no overlap was found
700 * .@mr is non-%NULL iff an overlap was found
701 *
702 * @address_space: a top-level (i.e. parentless) region that contains
703 * the region to be found
704 * @addr: start of the area within @address_space to be searched
705 * @size: size of the area to be searched
706 */
707MemoryRegionSection memory_region_find(MemoryRegion *address_space,
708 target_phys_addr_t addr, uint64_t size);
709
fd062573
BS
710/**
711 * memory_region_section_addr: get offset within MemoryRegionSection
712 *
713 * Returns offset within MemoryRegionSection
714 *
715 * @section: the memory region section being queried
716 * @addr: address in address space
717 */
718static inline target_phys_addr_t
719memory_region_section_addr(MemoryRegionSection *section,
720 target_phys_addr_t addr)
721{
722 addr -= section->offset_within_address_space;
723 addr += section->offset_within_region;
724 return addr;
725}
86e775c6
AK
726
727/**
728 * memory_global_sync_dirty_bitmap: synchronize the dirty log for all memory
729 *
730 * Synchronizes the dirty page log for an entire address space.
731 * @address_space: a top-level (i.e. parentless) region that contains the
732 * memory being synchronized
733 */
734void memory_global_sync_dirty_bitmap(MemoryRegion *address_space);
735
69ddaf66
ASRJ
736/**
737 * memory_region_transaction_begin: Start a transaction.
738 *
739 * During a transaction, changes will be accumulated and made visible
dabdf394 740 * only when the transaction ends (is committed).
4ef4db86
AK
741 */
742void memory_region_transaction_begin(void);
69ddaf66
ASRJ
743
744/**
745 * memory_region_transaction_commit: Commit a transaction and make changes
746 * visible to the guest.
4ef4db86
AK
747 */
748void memory_region_transaction_commit(void);
749
7664e80c
AK
750/**
751 * memory_listener_register: register callbacks to be called when memory
752 * sections are mapped or unmapped into an address
753 * space
754 *
755 * @listener: an object containing the callbacks to be called
7376e582 756 * @filter: if non-%NULL, only regions in this address space will be observed
7664e80c 757 */
7376e582 758void memory_listener_register(MemoryListener *listener, MemoryRegion *filter);
7664e80c
AK
759
760/**
761 * memory_listener_unregister: undo the effect of memory_listener_register()
762 *
763 * @listener: an object containing the callbacks to be removed
764 */
765void memory_listener_unregister(MemoryListener *listener);
766
767/**
768 * memory_global_dirty_log_start: begin dirty logging for all regions
769 */
770void memory_global_dirty_log_start(void);
771
772/**
1a7e8cae 773 * memory_global_dirty_log_stop: end dirty logging for all regions
7664e80c
AK
774 */
775void memory_global_dirty_log_stop(void);
776
314e2987
BS
777void mtree_info(fprintf_function mon_printf, void *f);
778
093bc2cd
AK
779#endif
780
781#endif