]> git.proxmox.com Git - qemu.git/blob - memory.h
Merge remote-tracking branch 'qemu-kvm/memory/page_desc' into staging
[qemu.git] / memory.h
1 /*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef MEMORY_H
15 #define MEMORY_H
16
17 #ifndef CONFIG_USER_ONLY
18
19 #include <stdint.h>
20 #include <stdbool.h>
21 #include "qemu-common.h"
22 #include "cpu-common.h"
23 #include "targphys.h"
24 #include "qemu-queue.h"
25 #include "iorange.h"
26 #include "ioport.h"
27 #include "int128.h"
28
29 typedef struct MemoryRegionOps MemoryRegionOps;
30 typedef struct MemoryRegion MemoryRegion;
31 typedef struct MemoryRegionPortio MemoryRegionPortio;
32 typedef struct MemoryRegionMmio MemoryRegionMmio;
33
34 /* Must match *_DIRTY_FLAGS in cpu-all.h. To be replaced with dynamic
35 * registration.
36 */
37 #define DIRTY_MEMORY_VGA 0
38 #define DIRTY_MEMORY_CODE 1
39 #define DIRTY_MEMORY_MIGRATION 3
40
41 struct MemoryRegionMmio {
42 CPUReadMemoryFunc *read[3];
43 CPUWriteMemoryFunc *write[3];
44 };
45
46 /*
47 * Memory region callbacks
48 */
49 struct MemoryRegionOps {
50 /* Read from the memory region. @addr is relative to @mr; @size is
51 * in bytes. */
52 uint64_t (*read)(void *opaque,
53 target_phys_addr_t addr,
54 unsigned size);
55 /* Write to the memory region. @addr is relative to @mr; @size is
56 * in bytes. */
57 void (*write)(void *opaque,
58 target_phys_addr_t addr,
59 uint64_t data,
60 unsigned size);
61
62 enum device_endian endianness;
63 /* Guest-visible constraints: */
64 struct {
65 /* If nonzero, specify bounds on access sizes beyond which a machine
66 * check is thrown.
67 */
68 unsigned min_access_size;
69 unsigned max_access_size;
70 /* If true, unaligned accesses are supported. Otherwise unaligned
71 * accesses throw machine checks.
72 */
73 bool unaligned;
74 /*
75 * If present, and returns #false, the transaction is not accepted
76 * by the device (and results in machine dependent behaviour such
77 * as a machine check exception).
78 */
79 bool (*accepts)(void *opaque, target_phys_addr_t addr,
80 unsigned size, bool is_write);
81 } valid;
82 /* Internal implementation constraints: */
83 struct {
84 /* If nonzero, specifies the minimum size implemented. Smaller sizes
85 * will be rounded upwards and a partial result will be returned.
86 */
87 unsigned min_access_size;
88 /* If nonzero, specifies the maximum size implemented. Larger sizes
89 * will be done as a series of accesses with smaller sizes.
90 */
91 unsigned max_access_size;
92 /* If true, unaligned accesses are supported. Otherwise all accesses
93 * are converted to (possibly multiple) naturally aligned accesses.
94 */
95 bool unaligned;
96 } impl;
97
98 /* If .read and .write are not present, old_portio may be used for
99 * backwards compatibility with old portio registration
100 */
101 const MemoryRegionPortio *old_portio;
102 /* If .read and .write are not present, old_mmio may be used for
103 * backwards compatibility with old mmio registration
104 */
105 const MemoryRegionMmio old_mmio;
106 };
107
108 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
109 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
110
111 struct MemoryRegion {
112 /* All fields are private - violators will be prosecuted */
113 const MemoryRegionOps *ops;
114 void *opaque;
115 MemoryRegion *parent;
116 Int128 size;
117 target_phys_addr_t addr;
118 target_phys_addr_t offset;
119 bool backend_registered;
120 void (*destructor)(MemoryRegion *mr);
121 ram_addr_t ram_addr;
122 IORange iorange;
123 bool terminates;
124 bool readable;
125 bool ram;
126 bool readonly; /* For RAM regions */
127 bool enabled;
128 MemoryRegion *alias;
129 target_phys_addr_t alias_offset;
130 unsigned priority;
131 bool may_overlap;
132 QTAILQ_HEAD(subregions, MemoryRegion) subregions;
133 QTAILQ_ENTRY(MemoryRegion) subregions_link;
134 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
135 const char *name;
136 uint8_t dirty_log_mask;
137 unsigned ioeventfd_nb;
138 MemoryRegionIoeventfd *ioeventfds;
139 };
140
141 struct MemoryRegionPortio {
142 uint32_t offset;
143 uint32_t len;
144 unsigned size;
145 IOPortReadFunc *read;
146 IOPortWriteFunc *write;
147 };
148
149 #define PORTIO_END_OF_LIST() { }
150
151 typedef struct MemoryRegionSection MemoryRegionSection;
152
153 /**
154 * MemoryRegionSection: describes a fragment of a #MemoryRegion
155 *
156 * @mr: the region, or %NULL if empty
157 * @address_space: the address space the region is mapped in
158 * @offset_within_region: the beginning of the section, relative to @mr's start
159 * @size: the size of the section; will not exceed @mr's boundaries
160 * @offset_within_address_space: the address of the first byte of the section
161 * relative to the region's address space
162 */
163 struct MemoryRegionSection {
164 MemoryRegion *mr;
165 MemoryRegion *address_space;
166 target_phys_addr_t offset_within_region;
167 uint64_t size;
168 target_phys_addr_t offset_within_address_space;
169 };
170
171 typedef struct MemoryListener MemoryListener;
172
173 /**
174 * MemoryListener: callbacks structure for updates to the physical memory map
175 *
176 * Allows a component to adjust to changes in the guest-visible memory map.
177 * Use with memory_listener_register() and memory_listener_unregister().
178 */
179 struct MemoryListener {
180 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
181 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
182 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
183 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
184 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
185 void (*log_global_start)(MemoryListener *listener);
186 void (*log_global_stop)(MemoryListener *listener);
187 QLIST_ENTRY(MemoryListener) link;
188 };
189
190 /**
191 * memory_region_init: Initialize a memory region
192 *
193 * The region typically acts as a container for other memory regions. Use
194 * memory_region_add_subregion() to add subregions.
195 *
196 * @mr: the #MemoryRegion to be initialized
197 * @name: used for debugging; not visible to the user or ABI
198 * @size: size of the region; any subregions beyond this size will be clipped
199 */
200 void memory_region_init(MemoryRegion *mr,
201 const char *name,
202 uint64_t size);
203 /**
204 * memory_region_init_io: Initialize an I/O memory region.
205 *
206 * Accesses into the region will cause the callbacks in @ops to be called.
207 * if @size is nonzero, subregions will be clipped to @size.
208 *
209 * @mr: the #MemoryRegion to be initialized.
210 * @ops: a structure containing read and write callbacks to be used when
211 * I/O is performed on the region.
212 * @opaque: passed to to the read and write callbacks of the @ops structure.
213 * @name: used for debugging; not visible to the user or ABI
214 * @size: size of the region.
215 */
216 void memory_region_init_io(MemoryRegion *mr,
217 const MemoryRegionOps *ops,
218 void *opaque,
219 const char *name,
220 uint64_t size);
221
222 /**
223 * memory_region_init_ram: Initialize RAM memory region. Accesses into the
224 * region will modify memory directly.
225 *
226 * @mr: the #MemoryRegion to be initialized.
227 * @dev: a device associated with the region; may be %NULL.
228 * @name: the name of the region; the pair (@dev, @name) must be globally
229 * unique. The name is part of the save/restore ABI and so cannot be
230 * changed.
231 * @size: size of the region.
232 */
233 void memory_region_init_ram(MemoryRegion *mr,
234 DeviceState *dev, /* FIXME: layering violation */
235 const char *name,
236 uint64_t size);
237
238 /**
239 * memory_region_init_ram: Initialize RAM memory region from a user-provided.
240 * pointer. Accesses into the region will modify
241 * memory directly.
242 *
243 * @mr: the #MemoryRegion to be initialized.
244 * @dev: a device associated with the region; may be %NULL.
245 * @name: the name of the region; the pair (@dev, @name) must be globally
246 * unique. The name is part of the save/restore ABI and so cannot be
247 * changed.
248 * @size: size of the region.
249 * @ptr: memory to be mapped; must contain at least @size bytes.
250 */
251 void memory_region_init_ram_ptr(MemoryRegion *mr,
252 DeviceState *dev, /* FIXME: layering violation */
253 const char *name,
254 uint64_t size,
255 void *ptr);
256
257 /**
258 * memory_region_init_alias: Initialize a memory region that aliases all or a
259 * part of another memory region.
260 *
261 * @mr: the #MemoryRegion to be initialized.
262 * @name: used for debugging; not visible to the user or ABI
263 * @orig: the region to be referenced; @mr will be equivalent to
264 * @orig between @offset and @offset + @size - 1.
265 * @offset: start of the section in @orig to be referenced.
266 * @size: size of the region.
267 */
268 void memory_region_init_alias(MemoryRegion *mr,
269 const char *name,
270 MemoryRegion *orig,
271 target_phys_addr_t offset,
272 uint64_t size);
273
274 /**
275 * memory_region_init_rom_device: Initialize a ROM memory region. Writes are
276 * handled via callbacks.
277 *
278 * @mr: the #MemoryRegion to be initialized.
279 * @ops: callbacks for write access handling.
280 * @dev: a device associated with the region; may be %NULL.
281 * @name: the name of the region; the pair (@dev, @name) must be globally
282 * unique. The name is part of the save/restore ABI and so cannot be
283 * changed.
284 * @size: size of the region.
285 */
286 void memory_region_init_rom_device(MemoryRegion *mr,
287 const MemoryRegionOps *ops,
288 void *opaque,
289 DeviceState *dev, /* FIXME: layering violation */
290 const char *name,
291 uint64_t size);
292
293 /**
294 * memory_region_destroy: Destroy a memory region and reclaim all resources.
295 *
296 * @mr: the region to be destroyed. May not currently be a subregion
297 * (see memory_region_add_subregion()) or referenced in an alias
298 * (see memory_region_init_alias()).
299 */
300 void memory_region_destroy(MemoryRegion *mr);
301
302 /**
303 * memory_region_size: get a memory region's size.
304 *
305 * @mr: the memory region being queried.
306 */
307 uint64_t memory_region_size(MemoryRegion *mr);
308
309 /**
310 * memory_region_is_ram: check whether a memory region is random access
311 *
312 * Returns %true is a memory region is random access.
313 *
314 * @mr: the memory region being queried
315 */
316 bool memory_region_is_ram(MemoryRegion *mr);
317
318 /**
319 * memory_region_is_logging: return whether a memory region is logging writes
320 *
321 * Returns %true if the memory region is logging writes
322 *
323 * @mr: the memory region being queried
324 */
325 bool memory_region_is_logging(MemoryRegion *mr);
326
327 /**
328 * memory_region_is_rom: check whether a memory region is ROM
329 *
330 * Returns %true is a memory region is read-only memory.
331 *
332 * @mr: the memory region being queried
333 */
334 bool memory_region_is_rom(MemoryRegion *mr);
335
336 /**
337 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
338 *
339 * Returns a host pointer to a RAM memory region (created with
340 * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
341 * care.
342 *
343 * @mr: the memory region being queried.
344 */
345 void *memory_region_get_ram_ptr(MemoryRegion *mr);
346
347 /**
348 * memory_region_set_offset: Sets an offset to be added to MemoryRegionOps
349 * callbacks.
350 *
351 * This function is deprecated and should not be used in new code.
352 */
353 void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset);
354
355 /**
356 * memory_region_set_log: Turn dirty logging on or off for a region.
357 *
358 * Turns dirty logging on or off for a specified client (display, migration).
359 * Only meaningful for RAM regions.
360 *
361 * @mr: the memory region being updated.
362 * @log: whether dirty logging is to be enabled or disabled.
363 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
364 * %DIRTY_MEMORY_VGA.
365 */
366 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
367
368 /**
369 * memory_region_get_dirty: Check whether a page is dirty for a specified
370 * client.
371 *
372 * Checks whether a page has been written to since the last
373 * call to memory_region_reset_dirty() with the same @client. Dirty logging
374 * must be enabled.
375 *
376 * @mr: the memory region being queried.
377 * @addr: the address (relative to the start of the region) being queried.
378 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
379 * %DIRTY_MEMORY_VGA.
380 */
381 bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
382 unsigned client);
383
384 /**
385 * memory_region_set_dirty: Mark a page as dirty in a memory region.
386 *
387 * Marks a page as dirty, after it has been dirtied outside guest code.
388 *
389 * @mr: the memory region being queried.
390 * @addr: the address (relative to the start of the region) being dirtied.
391 */
392 void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr);
393
394 /**
395 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
396 * any external TLBs (e.g. kvm)
397 *
398 * Flushes dirty information from accelerators such as kvm and vhost-net
399 * and makes it available to users of the memory API.
400 *
401 * @mr: the region being flushed.
402 */
403 void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
404
405 /**
406 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
407 * client.
408 *
409 * Marks a range of pages as no longer dirty.
410 *
411 * @mr: the region being updated.
412 * @addr: the start of the subrange being cleaned.
413 * @size: the size of the subrange being cleaned.
414 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
415 * %DIRTY_MEMORY_VGA.
416 */
417 void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
418 target_phys_addr_t size, unsigned client);
419
420 /**
421 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
422 *
423 * Allows a memory region to be marked as read-only (turning it into a ROM).
424 * only useful on RAM regions.
425 *
426 * @mr: the region being updated.
427 * @readonly: whether rhe region is to be ROM or RAM.
428 */
429 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
430
431 /**
432 * memory_region_rom_device_set_readable: enable/disable ROM readability
433 *
434 * Allows a ROM device (initialized with memory_region_init_rom_device() to
435 * to be marked as readable (default) or not readable. When it is readable,
436 * the device is mapped to guest memory. When not readable, reads are
437 * forwarded to the #MemoryRegion.read function.
438 *
439 * @mr: the memory region to be updated
440 * @readable: whether reads are satisified directly (%true) or via callbacks
441 * (%false)
442 */
443 void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable);
444
445 /**
446 * memory_region_set_coalescing: Enable memory coalescing for the region.
447 *
448 * Enabled writes to a region to be queued for later processing. MMIO ->write
449 * callbacks may be delayed until a non-coalesced MMIO is issued.
450 * Only useful for IO regions. Roughly similar to write-combining hardware.
451 *
452 * @mr: the memory region to be write coalesced
453 */
454 void memory_region_set_coalescing(MemoryRegion *mr);
455
456 /**
457 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
458 * a region.
459 *
460 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
461 * Multiple calls can be issued coalesced disjoint ranges.
462 *
463 * @mr: the memory region to be updated.
464 * @offset: the start of the range within the region to be coalesced.
465 * @size: the size of the subrange to be coalesced.
466 */
467 void memory_region_add_coalescing(MemoryRegion *mr,
468 target_phys_addr_t offset,
469 uint64_t size);
470
471 /**
472 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
473 *
474 * Disables any coalescing caused by memory_region_set_coalescing() or
475 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
476 * hardware.
477 *
478 * @mr: the memory region to be updated.
479 */
480 void memory_region_clear_coalescing(MemoryRegion *mr);
481
482 /**
483 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
484 * is written to a location.
485 *
486 * Marks a word in an IO region (initialized with memory_region_init_io())
487 * as a trigger for an eventfd event. The I/O callback will not be called.
488 * The caller must be prepared to handle failure (that is, take the required
489 * action if the callback _is_ called).
490 *
491 * @mr: the memory region being updated.
492 * @addr: the address within @mr that is to be monitored
493 * @size: the size of the access to trigger the eventfd
494 * @match_data: whether to match against @data, instead of just @addr
495 * @data: the data to match against the guest write
496 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
497 **/
498 void memory_region_add_eventfd(MemoryRegion *mr,
499 target_phys_addr_t addr,
500 unsigned size,
501 bool match_data,
502 uint64_t data,
503 int fd);
504
505 /**
506 * memory_region_del_eventfd: Cancel an eventfd.
507 *
508 * Cancels an eventfd trigger requested by a previous
509 * memory_region_add_eventfd() call.
510 *
511 * @mr: the memory region being updated.
512 * @addr: the address within @mr that is to be monitored
513 * @size: the size of the access to trigger the eventfd
514 * @match_data: whether to match against @data, instead of just @addr
515 * @data: the data to match against the guest write
516 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
517 */
518 void memory_region_del_eventfd(MemoryRegion *mr,
519 target_phys_addr_t addr,
520 unsigned size,
521 bool match_data,
522 uint64_t data,
523 int fd);
524 /**
525 * memory_region_add_subregion: Add a subregion to a container.
526 *
527 * Adds a subregion at @offset. The subregion may not overlap with other
528 * subregions (except for those explicitly marked as overlapping). A region
529 * may only be added once as a subregion (unless removed with
530 * memory_region_del_subregion()); use memory_region_init_alias() if you
531 * want a region to be a subregion in multiple locations.
532 *
533 * @mr: the region to contain the new subregion; must be a container
534 * initialized with memory_region_init().
535 * @offset: the offset relative to @mr where @subregion is added.
536 * @subregion: the subregion to be added.
537 */
538 void memory_region_add_subregion(MemoryRegion *mr,
539 target_phys_addr_t offset,
540 MemoryRegion *subregion);
541 /**
542 * memory_region_add_subregion: Add a subregion to a container, with overlap.
543 *
544 * Adds a subregion at @offset. The subregion may overlap with other
545 * subregions. Conflicts are resolved by having a higher @priority hide a
546 * lower @priority. Subregions without priority are taken as @priority 0.
547 * A region may only be added once as a subregion (unless removed with
548 * memory_region_del_subregion()); use memory_region_init_alias() if you
549 * want a region to be a subregion in multiple locations.
550 *
551 * @mr: the region to contain the new subregion; must be a container
552 * initialized with memory_region_init().
553 * @offset: the offset relative to @mr where @subregion is added.
554 * @subregion: the subregion to be added.
555 * @priority: used for resolving overlaps; highest priority wins.
556 */
557 void memory_region_add_subregion_overlap(MemoryRegion *mr,
558 target_phys_addr_t offset,
559 MemoryRegion *subregion,
560 unsigned priority);
561
562 /**
563 * memory_region_get_ram_addr: Get the ram address associated with a memory
564 * region
565 *
566 * DO NOT USE THIS FUCNTION. This is a temporary workaround while the Xen
567 * code is being reworked.
568 */
569 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
570
571 /**
572 * memory_region_del_subregion: Remove a subregion.
573 *
574 * Removes a subregion from its container.
575 *
576 * @mr: the container to be updated.
577 * @subregion: the region being removed; must be a current subregion of @mr.
578 */
579 void memory_region_del_subregion(MemoryRegion *mr,
580 MemoryRegion *subregion);
581
582 /*
583 * memory_region_set_enabled: dynamically enable or disable a region
584 *
585 * Enables or disables a memory region. A disabled memory region
586 * ignores all accesses to itself and its subregions. It does not
587 * obscure sibling subregions with lower priority - it simply behaves as
588 * if it was removed from the hierarchy.
589 *
590 * Regions default to being enabled.
591 *
592 * @mr: the region to be updated
593 * @enabled: whether to enable or disable the region
594 */
595 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
596
597 /*
598 * memory_region_set_address: dynamically update the address of a region
599 *
600 * Dynamically updates the address of a region, relative to its parent.
601 * May be used on regions are currently part of a memory hierarchy.
602 *
603 * @mr: the region to be updated
604 * @addr: new address, relative to parent region
605 */
606 void memory_region_set_address(MemoryRegion *mr, target_phys_addr_t addr);
607
608 /*
609 * memory_region_set_alias_offset: dynamically update a memory alias's offset
610 *
611 * Dynamically updates the offset into the target region that an alias points
612 * to, as if the fourth argument to memory_region_init_alias() has changed.
613 *
614 * @mr: the #MemoryRegion to be updated; should be an alias.
615 * @offset: the new offset into the target memory region
616 */
617 void memory_region_set_alias_offset(MemoryRegion *mr,
618 target_phys_addr_t offset);
619
620 /**
621 * memory_region_find: locate a MemoryRegion in an address space
622 *
623 * Locates the first #MemoryRegion within an address space given by
624 * @address_space that overlaps the range given by @addr and @size.
625 *
626 * Returns a #MemoryRegionSection that describes a contiguous overlap.
627 * It will have the following characteristics:
628 * .@offset_within_address_space >= @addr
629 * .@offset_within_address_space + .@size <= @addr + @size
630 * .@size = 0 iff no overlap was found
631 * .@mr is non-%NULL iff an overlap was found
632 *
633 * @address_space: a top-level (i.e. parentless) region that contains
634 * the region to be found
635 * @addr: start of the area within @address_space to be searched
636 * @size: size of the area to be searched
637 */
638 MemoryRegionSection memory_region_find(MemoryRegion *address_space,
639 target_phys_addr_t addr, uint64_t size);
640
641
642 /**
643 * memory_global_sync_dirty_bitmap: synchronize the dirty log for all memory
644 *
645 * Synchronizes the dirty page log for an entire address space.
646 * @address_space: a top-level (i.e. parentless) region that contains the
647 * memory being synchronized
648 */
649 void memory_global_sync_dirty_bitmap(MemoryRegion *address_space);
650
651 /**
652 * memory_region_transaction_begin: Start a transaction.
653 *
654 * During a transaction, changes will be accumulated and made visible
655 * only when the transaction ends (is commited).
656 */
657 void memory_region_transaction_begin(void);
658
659 /**
660 * memory_region_transaction_commit: Commit a transaction and make changes
661 * visible to the guest.
662 */
663 void memory_region_transaction_commit(void);
664
665 /**
666 * memory_listener_register: register callbacks to be called when memory
667 * sections are mapped or unmapped into an address
668 * space
669 *
670 * @listener: an object containing the callbacks to be called
671 */
672 void memory_listener_register(MemoryListener *listener);
673
674 /**
675 * memory_listener_unregister: undo the effect of memory_listener_register()
676 *
677 * @listener: an object containing the callbacks to be removed
678 */
679 void memory_listener_unregister(MemoryListener *listener);
680
681 /**
682 * memory_global_dirty_log_start: begin dirty logging for all regions
683 */
684 void memory_global_dirty_log_start(void);
685
686 /**
687 * memory_global_dirty_log_stop: begin dirty logging for all regions
688 */
689 void memory_global_dirty_log_stop(void);
690
691 void mtree_info(fprintf_function mon_printf, void *f);
692
693 #endif
694
695 #endif