]> git.proxmox.com Git - qemu.git/blob - include/exec/memory.h
Merge remote-tracking branch 'sstabellini/xen_fixes_20130603' into staging
[qemu.git] / include / exec / memory.h
1 /*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef MEMORY_H
15 #define MEMORY_H
16
17 #ifndef CONFIG_USER_ONLY
18
19 #include <stdint.h>
20 #include <stdbool.h>
21 #include "qemu-common.h"
22 #include "exec/cpu-common.h"
23 #include "exec/hwaddr.h"
24 #include "qemu/queue.h"
25 #include "exec/iorange.h"
26 #include "exec/ioport.h"
27 #include "qemu/int128.h"
28
29 #define MAX_PHYS_ADDR_SPACE_BITS 62
30 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
31
32 typedef struct MemoryRegionOps MemoryRegionOps;
33 typedef struct MemoryRegionPortio MemoryRegionPortio;
34 typedef struct MemoryRegionMmio MemoryRegionMmio;
35
36 /* Must match *_DIRTY_FLAGS in cpu-all.h. To be replaced with dynamic
37 * registration.
38 */
39 #define DIRTY_MEMORY_VGA 0
40 #define DIRTY_MEMORY_CODE 1
41 #define DIRTY_MEMORY_MIGRATION 3
42
43 struct MemoryRegionMmio {
44 CPUReadMemoryFunc *read[3];
45 CPUWriteMemoryFunc *write[3];
46 };
47
48 /* Internal use; thunks between old-style IORange and MemoryRegions. */
49 typedef struct MemoryRegionIORange MemoryRegionIORange;
50 struct MemoryRegionIORange {
51 IORange iorange;
52 MemoryRegion *mr;
53 hwaddr offset;
54 };
55
56 /*
57 * Memory region callbacks
58 */
59 struct MemoryRegionOps {
60 /* Read from the memory region. @addr is relative to @mr; @size is
61 * in bytes. */
62 uint64_t (*read)(void *opaque,
63 hwaddr addr,
64 unsigned size);
65 /* Write to the memory region. @addr is relative to @mr; @size is
66 * in bytes. */
67 void (*write)(void *opaque,
68 hwaddr addr,
69 uint64_t data,
70 unsigned size);
71
72 enum device_endian endianness;
73 /* Guest-visible constraints: */
74 struct {
75 /* If nonzero, specify bounds on access sizes beyond which a machine
76 * check is thrown.
77 */
78 unsigned min_access_size;
79 unsigned max_access_size;
80 /* If true, unaligned accesses are supported. Otherwise unaligned
81 * accesses throw machine checks.
82 */
83 bool unaligned;
84 /*
85 * If present, and returns #false, the transaction is not accepted
86 * by the device (and results in machine dependent behaviour such
87 * as a machine check exception).
88 */
89 bool (*accepts)(void *opaque, hwaddr addr,
90 unsigned size, bool is_write);
91 } valid;
92 /* Internal implementation constraints: */
93 struct {
94 /* If nonzero, specifies the minimum size implemented. Smaller sizes
95 * will be rounded upwards and a partial result will be returned.
96 */
97 unsigned min_access_size;
98 /* If nonzero, specifies the maximum size implemented. Larger sizes
99 * will be done as a series of accesses with smaller sizes.
100 */
101 unsigned max_access_size;
102 /* If true, unaligned accesses are supported. Otherwise all accesses
103 * are converted to (possibly multiple) naturally aligned accesses.
104 */
105 bool unaligned;
106 } impl;
107
108 /* If .read and .write are not present, old_portio may be used for
109 * backwards compatibility with old portio registration
110 */
111 const MemoryRegionPortio *old_portio;
112 /* If .read and .write are not present, old_mmio may be used for
113 * backwards compatibility with old mmio registration
114 */
115 const MemoryRegionMmio old_mmio;
116 };
117
118 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
119 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
120
121 struct MemoryRegion {
122 /* All fields are private - violators will be prosecuted */
123 const MemoryRegionOps *ops;
124 void *opaque;
125 MemoryRegion *parent;
126 Int128 size;
127 hwaddr addr;
128 void (*destructor)(MemoryRegion *mr);
129 ram_addr_t ram_addr;
130 bool subpage;
131 bool terminates;
132 bool romd_mode;
133 bool ram;
134 bool readonly; /* For RAM regions */
135 bool enabled;
136 bool rom_device;
137 bool warning_printed; /* For reservations */
138 bool flush_coalesced_mmio;
139 MemoryRegion *alias;
140 hwaddr alias_offset;
141 unsigned priority;
142 bool may_overlap;
143 QTAILQ_HEAD(subregions, MemoryRegion) subregions;
144 QTAILQ_ENTRY(MemoryRegion) subregions_link;
145 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
146 const char *name;
147 uint8_t dirty_log_mask;
148 unsigned ioeventfd_nb;
149 MemoryRegionIoeventfd *ioeventfds;
150 };
151
152 struct MemoryRegionPortio {
153 uint32_t offset;
154 uint32_t len;
155 unsigned size;
156 IOPortReadFunc *read;
157 IOPortWriteFunc *write;
158 };
159
160 #define PORTIO_END_OF_LIST() { }
161
162 /**
163 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
164 */
165 struct AddressSpace {
166 /* All fields are private. */
167 const char *name;
168 MemoryRegion *root;
169 struct FlatView *current_map;
170 int ioeventfd_nb;
171 struct MemoryRegionIoeventfd *ioeventfds;
172 struct AddressSpaceDispatch *dispatch;
173 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
174 };
175
176 /**
177 * MemoryRegionSection: describes a fragment of a #MemoryRegion
178 *
179 * @mr: the region, or %NULL if empty
180 * @address_space: the address space the region is mapped in
181 * @offset_within_region: the beginning of the section, relative to @mr's start
182 * @size: the size of the section; will not exceed @mr's boundaries
183 * @offset_within_address_space: the address of the first byte of the section
184 * relative to the region's address space
185 * @readonly: writes to this section are ignored
186 */
187 struct MemoryRegionSection {
188 MemoryRegion *mr;
189 AddressSpace *address_space;
190 hwaddr offset_within_region;
191 uint64_t size;
192 hwaddr offset_within_address_space;
193 bool readonly;
194 };
195
196 typedef struct MemoryListener MemoryListener;
197
198 /**
199 * MemoryListener: callbacks structure for updates to the physical memory map
200 *
201 * Allows a component to adjust to changes in the guest-visible memory map.
202 * Use with memory_listener_register() and memory_listener_unregister().
203 */
204 struct MemoryListener {
205 void (*begin)(MemoryListener *listener);
206 void (*commit)(MemoryListener *listener);
207 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
208 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
209 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
210 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
211 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
212 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
213 void (*log_global_start)(MemoryListener *listener);
214 void (*log_global_stop)(MemoryListener *listener);
215 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
216 bool match_data, uint64_t data, EventNotifier *e);
217 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
218 bool match_data, uint64_t data, EventNotifier *e);
219 void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
220 hwaddr addr, hwaddr len);
221 void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
222 hwaddr addr, hwaddr len);
223 /* Lower = earlier (during add), later (during del) */
224 unsigned priority;
225 AddressSpace *address_space_filter;
226 QTAILQ_ENTRY(MemoryListener) link;
227 };
228
229 /**
230 * memory_region_init: Initialize a memory region
231 *
232 * The region typically acts as a container for other memory regions. Use
233 * memory_region_add_subregion() to add subregions.
234 *
235 * @mr: the #MemoryRegion to be initialized
236 * @name: used for debugging; not visible to the user or ABI
237 * @size: size of the region; any subregions beyond this size will be clipped
238 */
239 void memory_region_init(MemoryRegion *mr,
240 const char *name,
241 uint64_t size);
242 /**
243 * memory_region_init_io: Initialize an I/O memory region.
244 *
245 * Accesses into the region will cause the callbacks in @ops to be called.
246 * if @size is nonzero, subregions will be clipped to @size.
247 *
248 * @mr: the #MemoryRegion to be initialized.
249 * @ops: a structure containing read and write callbacks to be used when
250 * I/O is performed on the region.
251 * @opaque: passed to to the read and write callbacks of the @ops structure.
252 * @name: used for debugging; not visible to the user or ABI
253 * @size: size of the region.
254 */
255 void memory_region_init_io(MemoryRegion *mr,
256 const MemoryRegionOps *ops,
257 void *opaque,
258 const char *name,
259 uint64_t size);
260
261 /**
262 * memory_region_init_ram: Initialize RAM memory region. Accesses into the
263 * region will modify memory directly.
264 *
265 * @mr: the #MemoryRegion to be initialized.
266 * @name: the name of the region.
267 * @size: size of the region.
268 */
269 void memory_region_init_ram(MemoryRegion *mr,
270 const char *name,
271 uint64_t size);
272
273 /**
274 * memory_region_init_ram_ptr: Initialize RAM memory region from a
275 * user-provided pointer. Accesses into the
276 * region will modify memory directly.
277 *
278 * @mr: the #MemoryRegion to be initialized.
279 * @name: the name of the region.
280 * @size: size of the region.
281 * @ptr: memory to be mapped; must contain at least @size bytes.
282 */
283 void memory_region_init_ram_ptr(MemoryRegion *mr,
284 const char *name,
285 uint64_t size,
286 void *ptr);
287
288 /**
289 * memory_region_init_alias: Initialize a memory region that aliases all or a
290 * part of another memory region.
291 *
292 * @mr: the #MemoryRegion to be initialized.
293 * @name: used for debugging; not visible to the user or ABI
294 * @orig: the region to be referenced; @mr will be equivalent to
295 * @orig between @offset and @offset + @size - 1.
296 * @offset: start of the section in @orig to be referenced.
297 * @size: size of the region.
298 */
299 void memory_region_init_alias(MemoryRegion *mr,
300 const char *name,
301 MemoryRegion *orig,
302 hwaddr offset,
303 uint64_t size);
304
305 /**
306 * memory_region_init_rom_device: Initialize a ROM memory region. Writes are
307 * handled via callbacks.
308 *
309 * @mr: the #MemoryRegion to be initialized.
310 * @ops: callbacks for write access handling.
311 * @name: the name of the region.
312 * @size: size of the region.
313 */
314 void memory_region_init_rom_device(MemoryRegion *mr,
315 const MemoryRegionOps *ops,
316 void *opaque,
317 const char *name,
318 uint64_t size);
319
320 /**
321 * memory_region_init_reservation: Initialize a memory region that reserves
322 * I/O space.
323 *
324 * A reservation region primariy serves debugging purposes. It claims I/O
325 * space that is not supposed to be handled by QEMU itself. Any access via
326 * the memory API will cause an abort().
327 *
328 * @mr: the #MemoryRegion to be initialized
329 * @name: used for debugging; not visible to the user or ABI
330 * @size: size of the region.
331 */
332 void memory_region_init_reservation(MemoryRegion *mr,
333 const char *name,
334 uint64_t size);
335 /**
336 * memory_region_destroy: Destroy a memory region and reclaim all resources.
337 *
338 * @mr: the region to be destroyed. May not currently be a subregion
339 * (see memory_region_add_subregion()) or referenced in an alias
340 * (see memory_region_init_alias()).
341 */
342 void memory_region_destroy(MemoryRegion *mr);
343
344 /**
345 * memory_region_size: get a memory region's size.
346 *
347 * @mr: the memory region being queried.
348 */
349 uint64_t memory_region_size(MemoryRegion *mr);
350
351 /**
352 * memory_region_is_ram: check whether a memory region is random access
353 *
354 * Returns %true is a memory region is random access.
355 *
356 * @mr: the memory region being queried
357 */
358 bool memory_region_is_ram(MemoryRegion *mr);
359
360 /**
361 * memory_region_is_romd: check whether a memory region is in ROMD mode
362 *
363 * Returns %true if a memory region is a ROM device and currently set to allow
364 * direct reads.
365 *
366 * @mr: the memory region being queried
367 */
368 static inline bool memory_region_is_romd(MemoryRegion *mr)
369 {
370 return mr->rom_device && mr->romd_mode;
371 }
372
373 /**
374 * memory_region_name: get a memory region's name
375 *
376 * Returns the string that was used to initialize the memory region.
377 *
378 * @mr: the memory region being queried
379 */
380 const char *memory_region_name(MemoryRegion *mr);
381
382 /**
383 * memory_region_is_logging: return whether a memory region is logging writes
384 *
385 * Returns %true if the memory region is logging writes
386 *
387 * @mr: the memory region being queried
388 */
389 bool memory_region_is_logging(MemoryRegion *mr);
390
391 /**
392 * memory_region_is_rom: check whether a memory region is ROM
393 *
394 * Returns %true is a memory region is read-only memory.
395 *
396 * @mr: the memory region being queried
397 */
398 bool memory_region_is_rom(MemoryRegion *mr);
399
400 /**
401 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
402 *
403 * Returns a host pointer to a RAM memory region (created with
404 * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
405 * care.
406 *
407 * @mr: the memory region being queried.
408 */
409 void *memory_region_get_ram_ptr(MemoryRegion *mr);
410
411 /**
412 * memory_region_set_log: Turn dirty logging on or off for a region.
413 *
414 * Turns dirty logging on or off for a specified client (display, migration).
415 * Only meaningful for RAM regions.
416 *
417 * @mr: the memory region being updated.
418 * @log: whether dirty logging is to be enabled or disabled.
419 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
420 * %DIRTY_MEMORY_VGA.
421 */
422 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
423
424 /**
425 * memory_region_get_dirty: Check whether a range of bytes is dirty
426 * for a specified client.
427 *
428 * Checks whether a range of bytes has been written to since the last
429 * call to memory_region_reset_dirty() with the same @client. Dirty logging
430 * must be enabled.
431 *
432 * @mr: the memory region being queried.
433 * @addr: the address (relative to the start of the region) being queried.
434 * @size: the size of the range being queried.
435 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
436 * %DIRTY_MEMORY_VGA.
437 */
438 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
439 hwaddr size, unsigned client);
440
441 /**
442 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
443 *
444 * Marks a range of bytes as dirty, after it has been dirtied outside
445 * guest code.
446 *
447 * @mr: the memory region being dirtied.
448 * @addr: the address (relative to the start of the region) being dirtied.
449 * @size: size of the range being dirtied.
450 */
451 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
452 hwaddr size);
453
454 /**
455 * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty
456 * for a specified client. It clears them.
457 *
458 * Checks whether a range of bytes has been written to since the last
459 * call to memory_region_reset_dirty() with the same @client. Dirty logging
460 * must be enabled.
461 *
462 * @mr: the memory region being queried.
463 * @addr: the address (relative to the start of the region) being queried.
464 * @size: the size of the range being queried.
465 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
466 * %DIRTY_MEMORY_VGA.
467 */
468 bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
469 hwaddr size, unsigned client);
470 /**
471 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
472 * any external TLBs (e.g. kvm)
473 *
474 * Flushes dirty information from accelerators such as kvm and vhost-net
475 * and makes it available to users of the memory API.
476 *
477 * @mr: the region being flushed.
478 */
479 void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
480
481 /**
482 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
483 * client.
484 *
485 * Marks a range of pages as no longer dirty.
486 *
487 * @mr: the region being updated.
488 * @addr: the start of the subrange being cleaned.
489 * @size: the size of the subrange being cleaned.
490 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
491 * %DIRTY_MEMORY_VGA.
492 */
493 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
494 hwaddr size, unsigned client);
495
496 /**
497 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
498 *
499 * Allows a memory region to be marked as read-only (turning it into a ROM).
500 * only useful on RAM regions.
501 *
502 * @mr: the region being updated.
503 * @readonly: whether rhe region is to be ROM or RAM.
504 */
505 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
506
507 /**
508 * memory_region_rom_device_set_romd: enable/disable ROMD mode
509 *
510 * Allows a ROM device (initialized with memory_region_init_rom_device() to
511 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
512 * device is mapped to guest memory and satisfies read access directly.
513 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
514 * Writes are always handled by the #MemoryRegion.write function.
515 *
516 * @mr: the memory region to be updated
517 * @romd_mode: %true to put the region into ROMD mode
518 */
519 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
520
521 /**
522 * memory_region_set_coalescing: Enable memory coalescing for the region.
523 *
524 * Enabled writes to a region to be queued for later processing. MMIO ->write
525 * callbacks may be delayed until a non-coalesced MMIO is issued.
526 * Only useful for IO regions. Roughly similar to write-combining hardware.
527 *
528 * @mr: the memory region to be write coalesced
529 */
530 void memory_region_set_coalescing(MemoryRegion *mr);
531
532 /**
533 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
534 * a region.
535 *
536 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
537 * Multiple calls can be issued coalesced disjoint ranges.
538 *
539 * @mr: the memory region to be updated.
540 * @offset: the start of the range within the region to be coalesced.
541 * @size: the size of the subrange to be coalesced.
542 */
543 void memory_region_add_coalescing(MemoryRegion *mr,
544 hwaddr offset,
545 uint64_t size);
546
547 /**
548 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
549 *
550 * Disables any coalescing caused by memory_region_set_coalescing() or
551 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
552 * hardware.
553 *
554 * @mr: the memory region to be updated.
555 */
556 void memory_region_clear_coalescing(MemoryRegion *mr);
557
558 /**
559 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
560 * accesses.
561 *
562 * Ensure that pending coalesced MMIO request are flushed before the memory
563 * region is accessed. This property is automatically enabled for all regions
564 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
565 *
566 * @mr: the memory region to be updated.
567 */
568 void memory_region_set_flush_coalesced(MemoryRegion *mr);
569
570 /**
571 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
572 * accesses.
573 *
574 * Clear the automatic coalesced MMIO flushing enabled via
575 * memory_region_set_flush_coalesced. Note that this service has no effect on
576 * memory regions that have MMIO coalescing enabled for themselves. For them,
577 * automatic flushing will stop once coalescing is disabled.
578 *
579 * @mr: the memory region to be updated.
580 */
581 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
582
583 /**
584 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
585 * is written to a location.
586 *
587 * Marks a word in an IO region (initialized with memory_region_init_io())
588 * as a trigger for an eventfd event. The I/O callback will not be called.
589 * The caller must be prepared to handle failure (that is, take the required
590 * action if the callback _is_ called).
591 *
592 * @mr: the memory region being updated.
593 * @addr: the address within @mr that is to be monitored
594 * @size: the size of the access to trigger the eventfd
595 * @match_data: whether to match against @data, instead of just @addr
596 * @data: the data to match against the guest write
597 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
598 **/
599 void memory_region_add_eventfd(MemoryRegion *mr,
600 hwaddr addr,
601 unsigned size,
602 bool match_data,
603 uint64_t data,
604 EventNotifier *e);
605
606 /**
607 * memory_region_del_eventfd: Cancel an eventfd.
608 *
609 * Cancels an eventfd trigger requested by a previous
610 * memory_region_add_eventfd() call.
611 *
612 * @mr: the memory region being updated.
613 * @addr: the address within @mr that is to be monitored
614 * @size: the size of the access to trigger the eventfd
615 * @match_data: whether to match against @data, instead of just @addr
616 * @data: the data to match against the guest write
617 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
618 */
619 void memory_region_del_eventfd(MemoryRegion *mr,
620 hwaddr addr,
621 unsigned size,
622 bool match_data,
623 uint64_t data,
624 EventNotifier *e);
625
626 /**
627 * memory_region_add_subregion: Add a subregion to a container.
628 *
629 * Adds a subregion at @offset. The subregion may not overlap with other
630 * subregions (except for those explicitly marked as overlapping). A region
631 * may only be added once as a subregion (unless removed with
632 * memory_region_del_subregion()); use memory_region_init_alias() if you
633 * want a region to be a subregion in multiple locations.
634 *
635 * @mr: the region to contain the new subregion; must be a container
636 * initialized with memory_region_init().
637 * @offset: the offset relative to @mr where @subregion is added.
638 * @subregion: the subregion to be added.
639 */
640 void memory_region_add_subregion(MemoryRegion *mr,
641 hwaddr offset,
642 MemoryRegion *subregion);
643 /**
644 * memory_region_add_subregion_overlap: Add a subregion to a container
645 * with overlap.
646 *
647 * Adds a subregion at @offset. The subregion may overlap with other
648 * subregions. Conflicts are resolved by having a higher @priority hide a
649 * lower @priority. Subregions without priority are taken as @priority 0.
650 * A region may only be added once as a subregion (unless removed with
651 * memory_region_del_subregion()); use memory_region_init_alias() if you
652 * want a region to be a subregion in multiple locations.
653 *
654 * @mr: the region to contain the new subregion; must be a container
655 * initialized with memory_region_init().
656 * @offset: the offset relative to @mr where @subregion is added.
657 * @subregion: the subregion to be added.
658 * @priority: used for resolving overlaps; highest priority wins.
659 */
660 void memory_region_add_subregion_overlap(MemoryRegion *mr,
661 hwaddr offset,
662 MemoryRegion *subregion,
663 unsigned priority);
664
665 /**
666 * memory_region_get_ram_addr: Get the ram address associated with a memory
667 * region
668 *
669 * DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen
670 * code is being reworked.
671 */
672 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
673
674 /**
675 * memory_region_del_subregion: Remove a subregion.
676 *
677 * Removes a subregion from its container.
678 *
679 * @mr: the container to be updated.
680 * @subregion: the region being removed; must be a current subregion of @mr.
681 */
682 void memory_region_del_subregion(MemoryRegion *mr,
683 MemoryRegion *subregion);
684
685 /*
686 * memory_region_set_enabled: dynamically enable or disable a region
687 *
688 * Enables or disables a memory region. A disabled memory region
689 * ignores all accesses to itself and its subregions. It does not
690 * obscure sibling subregions with lower priority - it simply behaves as
691 * if it was removed from the hierarchy.
692 *
693 * Regions default to being enabled.
694 *
695 * @mr: the region to be updated
696 * @enabled: whether to enable or disable the region
697 */
698 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
699
700 /*
701 * memory_region_set_address: dynamically update the address of a region
702 *
703 * Dynamically updates the address of a region, relative to its parent.
704 * May be used on regions are currently part of a memory hierarchy.
705 *
706 * @mr: the region to be updated
707 * @addr: new address, relative to parent region
708 */
709 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
710
711 /*
712 * memory_region_set_alias_offset: dynamically update a memory alias's offset
713 *
714 * Dynamically updates the offset into the target region that an alias points
715 * to, as if the fourth argument to memory_region_init_alias() has changed.
716 *
717 * @mr: the #MemoryRegion to be updated; should be an alias.
718 * @offset: the new offset into the target memory region
719 */
720 void memory_region_set_alias_offset(MemoryRegion *mr,
721 hwaddr offset);
722
723 /**
724 * memory_region_find: translate an address/size relative to a
725 * MemoryRegion into a #MemoryRegionSection.
726 *
727 * Locates the first #MemoryRegion within @mr that overlaps the range
728 * given by @addr and @size.
729 *
730 * Returns a #MemoryRegionSection that describes a contiguous overlap.
731 * It will have the following characteristics:
732 * .@size = 0 iff no overlap was found
733 * .@mr is non-%NULL iff an overlap was found
734 *
735 * Remember that in the return value the @offset_within_region is
736 * relative to the returned region (in the .@mr field), not to the
737 * @mr argument.
738 *
739 * Similarly, the .@offset_within_address_space is relative to the
740 * address space that contains both regions, the passed and the
741 * returned one. However, in the special case where the @mr argument
742 * has no parent (and thus is the root of the address space), the
743 * following will hold:
744 * .@offset_within_address_space >= @addr
745 * .@offset_within_address_space + .@size <= @addr + @size
746 *
747 * @mr: a MemoryRegion within which @addr is a relative address
748 * @addr: start of the area within @as to be searched
749 * @size: size of the area to be searched
750 */
751 MemoryRegionSection memory_region_find(MemoryRegion *mr,
752 hwaddr addr, uint64_t size);
753
754 /**
755 * address_space_sync_dirty_bitmap: synchronize the dirty log for all memory
756 *
757 * Synchronizes the dirty page log for an entire address space.
758 * @as: the address space that contains the memory being synchronized
759 */
760 void address_space_sync_dirty_bitmap(AddressSpace *as);
761
762 /**
763 * memory_region_transaction_begin: Start a transaction.
764 *
765 * During a transaction, changes will be accumulated and made visible
766 * only when the transaction ends (is committed).
767 */
768 void memory_region_transaction_begin(void);
769
770 /**
771 * memory_region_transaction_commit: Commit a transaction and make changes
772 * visible to the guest.
773 */
774 void memory_region_transaction_commit(void);
775
776 /**
777 * memory_listener_register: register callbacks to be called when memory
778 * sections are mapped or unmapped into an address
779 * space
780 *
781 * @listener: an object containing the callbacks to be called
782 * @filter: if non-%NULL, only regions in this address space will be observed
783 */
784 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
785
786 /**
787 * memory_listener_unregister: undo the effect of memory_listener_register()
788 *
789 * @listener: an object containing the callbacks to be removed
790 */
791 void memory_listener_unregister(MemoryListener *listener);
792
793 /**
794 * memory_global_dirty_log_start: begin dirty logging for all regions
795 */
796 void memory_global_dirty_log_start(void);
797
798 /**
799 * memory_global_dirty_log_stop: end dirty logging for all regions
800 */
801 void memory_global_dirty_log_stop(void);
802
803 void mtree_info(fprintf_function mon_printf, void *f);
804
805 /**
806 * address_space_init: initializes an address space
807 *
808 * @as: an uninitialized #AddressSpace
809 * @root: a #MemoryRegion that routes addesses for the address space
810 */
811 void address_space_init(AddressSpace *as, MemoryRegion *root);
812
813
814 /**
815 * address_space_destroy: destroy an address space
816 *
817 * Releases all resources associated with an address space. After an address space
818 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
819 * as well.
820 *
821 * @as: address space to be destroyed
822 */
823 void address_space_destroy(AddressSpace *as);
824
825 /**
826 * address_space_rw: read from or write to an address space.
827 *
828 * Return true if the operation hit any unassigned memory.
829 *
830 * @as: #AddressSpace to be accessed
831 * @addr: address within that address space
832 * @buf: buffer with the data transferred
833 * @is_write: indicates the transfer direction
834 */
835 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
836 int len, bool is_write);
837
838 /**
839 * address_space_write: write to address space.
840 *
841 * Return true if the operation hit any unassigned memory.
842 *
843 * @as: #AddressSpace to be accessed
844 * @addr: address within that address space
845 * @buf: buffer with the data transferred
846 */
847 bool address_space_write(AddressSpace *as, hwaddr addr,
848 const uint8_t *buf, int len);
849
850 /**
851 * address_space_read: read from an address space.
852 *
853 * Return true if the operation hit any unassigned memory.
854 *
855 * @as: #AddressSpace to be accessed
856 * @addr: address within that address space
857 * @buf: buffer with the data transferred
858 */
859 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len);
860
861 /* address_space_translate: translate an address range into an address space
862 * into a MemoryRegionSection and an address range into that section
863 *
864 * @as: #AddressSpace to be accessed
865 * @addr: address within that address space
866 * @xlat: pointer to address within the returned memory region section's
867 * #MemoryRegion.
868 * @len: pointer to length
869 * @is_write: indicates the transfer direction
870 */
871 MemoryRegionSection *address_space_translate(AddressSpace *as, hwaddr addr,
872 hwaddr *xlat, hwaddr *len,
873 bool is_write);
874
875 /* address_space_access_valid: check for validity of accessing an address
876 * space range
877 *
878 * Check whether memory is assigned to the given address space range.
879 *
880 * For now, addr and len should be aligned to a page size. This limitation
881 * will be lifted in the future.
882 *
883 * @as: #AddressSpace to be accessed
884 * @addr: address within that address space
885 * @len: length of the area to be checked
886 * @is_write: indicates the transfer direction
887 */
888 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write);
889
890 /* address_space_map: map a physical memory region into a host virtual address
891 *
892 * May map a subset of the requested range, given by and returned in @plen.
893 * May return %NULL if resources needed to perform the mapping are exhausted.
894 * Use only for reads OR writes - not for read-modify-write operations.
895 * Use cpu_register_map_client() to know when retrying the map operation is
896 * likely to succeed.
897 *
898 * @as: #AddressSpace to be accessed
899 * @addr: address within that address space
900 * @plen: pointer to length of buffer; updated on return
901 * @is_write: indicates the transfer direction
902 */
903 void *address_space_map(AddressSpace *as, hwaddr addr,
904 hwaddr *plen, bool is_write);
905
906 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
907 *
908 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
909 * the amount of memory that was actually read or written by the caller.
910 *
911 * @as: #AddressSpace used
912 * @addr: address within that address space
913 * @len: buffer length as returned by address_space_map()
914 * @access_len: amount of data actually transferred
915 * @is_write: indicates the transfer direction
916 */
917 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
918 int is_write, hwaddr access_len);
919
920
921 #endif
922
923 #endif