]> git.proxmox.com Git - mirror_qemu.git/blob - include/exec/memory.h
memory: expose alignment used for allocating RAM as MemoryRegion API
[mirror_qemu.git] / include / exec / memory.h
1 /*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef MEMORY_H
15 #define MEMORY_H
16
17 #ifndef CONFIG_USER_ONLY
18
19 #define DIRTY_MEMORY_VGA 0
20 #define DIRTY_MEMORY_CODE 1
21 #define DIRTY_MEMORY_MIGRATION 2
22 #define DIRTY_MEMORY_NUM 3 /* num of dirty bits */
23
24 #include <stdint.h>
25 #include <stdbool.h>
26 #include "qemu-common.h"
27 #include "exec/cpu-common.h"
28 #ifndef CONFIG_USER_ONLY
29 #include "exec/hwaddr.h"
30 #endif
31 #include "qemu/queue.h"
32 #include "qemu/int128.h"
33 #include "qemu/notify.h"
34 #include "qapi/error.h"
35 #include "qom/object.h"
36
37 #define MAX_PHYS_ADDR_SPACE_BITS 62
38 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
39
40 #define TYPE_MEMORY_REGION "qemu:memory-region"
41 #define MEMORY_REGION(obj) \
42 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
43
44 typedef struct MemoryRegionOps MemoryRegionOps;
45 typedef struct MemoryRegionMmio MemoryRegionMmio;
46
47 struct MemoryRegionMmio {
48 CPUReadMemoryFunc *read[3];
49 CPUWriteMemoryFunc *write[3];
50 };
51
52 typedef struct IOMMUTLBEntry IOMMUTLBEntry;
53
54 /* See address_space_translate: bit 0 is read, bit 1 is write. */
55 typedef enum {
56 IOMMU_NONE = 0,
57 IOMMU_RO = 1,
58 IOMMU_WO = 2,
59 IOMMU_RW = 3,
60 } IOMMUAccessFlags;
61
62 struct IOMMUTLBEntry {
63 AddressSpace *target_as;
64 hwaddr iova;
65 hwaddr translated_addr;
66 hwaddr addr_mask; /* 0xfff = 4k translation */
67 IOMMUAccessFlags perm;
68 };
69
70 /*
71 * Memory region callbacks
72 */
73 struct MemoryRegionOps {
74 /* Read from the memory region. @addr is relative to @mr; @size is
75 * in bytes. */
76 uint64_t (*read)(void *opaque,
77 hwaddr addr,
78 unsigned size);
79 /* Write to the memory region. @addr is relative to @mr; @size is
80 * in bytes. */
81 void (*write)(void *opaque,
82 hwaddr addr,
83 uint64_t data,
84 unsigned size);
85
86 enum device_endian endianness;
87 /* Guest-visible constraints: */
88 struct {
89 /* If nonzero, specify bounds on access sizes beyond which a machine
90 * check is thrown.
91 */
92 unsigned min_access_size;
93 unsigned max_access_size;
94 /* If true, unaligned accesses are supported. Otherwise unaligned
95 * accesses throw machine checks.
96 */
97 bool unaligned;
98 /*
99 * If present, and returns #false, the transaction is not accepted
100 * by the device (and results in machine dependent behaviour such
101 * as a machine check exception).
102 */
103 bool (*accepts)(void *opaque, hwaddr addr,
104 unsigned size, bool is_write);
105 } valid;
106 /* Internal implementation constraints: */
107 struct {
108 /* If nonzero, specifies the minimum size implemented. Smaller sizes
109 * will be rounded upwards and a partial result will be returned.
110 */
111 unsigned min_access_size;
112 /* If nonzero, specifies the maximum size implemented. Larger sizes
113 * will be done as a series of accesses with smaller sizes.
114 */
115 unsigned max_access_size;
116 /* If true, unaligned accesses are supported. Otherwise all accesses
117 * are converted to (possibly multiple) naturally aligned accesses.
118 */
119 bool unaligned;
120 } impl;
121
122 /* If .read and .write are not present, old_mmio may be used for
123 * backwards compatibility with old mmio registration
124 */
125 const MemoryRegionMmio old_mmio;
126 };
127
128 typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps;
129
130 struct MemoryRegionIOMMUOps {
131 /* Return a TLB entry that contains a given address. */
132 IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write);
133 };
134
135 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
136 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
137
138 struct MemoryRegion {
139 Object parent_obj;
140 /* All fields are private - violators will be prosecuted */
141 const MemoryRegionOps *ops;
142 const MemoryRegionIOMMUOps *iommu_ops;
143 void *opaque;
144 MemoryRegion *container;
145 Int128 size;
146 hwaddr addr;
147 void (*destructor)(MemoryRegion *mr);
148 ram_addr_t ram_addr;
149 uint64_t align;
150 bool subpage;
151 bool terminates;
152 bool romd_mode;
153 bool ram;
154 bool skip_dump;
155 bool readonly; /* For RAM regions */
156 bool enabled;
157 bool rom_device;
158 bool warning_printed; /* For reservations */
159 bool flush_coalesced_mmio;
160 MemoryRegion *alias;
161 hwaddr alias_offset;
162 int32_t priority;
163 bool may_overlap;
164 QTAILQ_HEAD(subregions, MemoryRegion) subregions;
165 QTAILQ_ENTRY(MemoryRegion) subregions_link;
166 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
167 const char *name;
168 uint8_t dirty_log_mask;
169 unsigned ioeventfd_nb;
170 MemoryRegionIoeventfd *ioeventfds;
171 NotifierList iommu_notify;
172 };
173
174 /**
175 * MemoryListener: callbacks structure for updates to the physical memory map
176 *
177 * Allows a component to adjust to changes in the guest-visible memory map.
178 * Use with memory_listener_register() and memory_listener_unregister().
179 */
180 struct MemoryListener {
181 void (*begin)(MemoryListener *listener);
182 void (*commit)(MemoryListener *listener);
183 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
184 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
185 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
186 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
187 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
188 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
189 void (*log_global_start)(MemoryListener *listener);
190 void (*log_global_stop)(MemoryListener *listener);
191 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
192 bool match_data, uint64_t data, EventNotifier *e);
193 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
194 bool match_data, uint64_t data, EventNotifier *e);
195 void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
196 hwaddr addr, hwaddr len);
197 void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
198 hwaddr addr, hwaddr len);
199 /* Lower = earlier (during add), later (during del) */
200 unsigned priority;
201 AddressSpace *address_space_filter;
202 QTAILQ_ENTRY(MemoryListener) link;
203 };
204
205 /**
206 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
207 */
208 struct AddressSpace {
209 /* All fields are private. */
210 char *name;
211 MemoryRegion *root;
212 struct FlatView *current_map;
213 int ioeventfd_nb;
214 struct MemoryRegionIoeventfd *ioeventfds;
215 struct AddressSpaceDispatch *dispatch;
216 struct AddressSpaceDispatch *next_dispatch;
217 MemoryListener dispatch_listener;
218
219 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
220 };
221
222 /**
223 * MemoryRegionSection: describes a fragment of a #MemoryRegion
224 *
225 * @mr: the region, or %NULL if empty
226 * @address_space: the address space the region is mapped in
227 * @offset_within_region: the beginning of the section, relative to @mr's start
228 * @size: the size of the section; will not exceed @mr's boundaries
229 * @offset_within_address_space: the address of the first byte of the section
230 * relative to the region's address space
231 * @readonly: writes to this section are ignored
232 */
233 struct MemoryRegionSection {
234 MemoryRegion *mr;
235 AddressSpace *address_space;
236 hwaddr offset_within_region;
237 Int128 size;
238 hwaddr offset_within_address_space;
239 bool readonly;
240 };
241
242 /**
243 * memory_region_init: Initialize a memory region
244 *
245 * The region typically acts as a container for other memory regions. Use
246 * memory_region_add_subregion() to add subregions.
247 *
248 * @mr: the #MemoryRegion to be initialized
249 * @owner: the object that tracks the region's reference count
250 * @name: used for debugging; not visible to the user or ABI
251 * @size: size of the region; any subregions beyond this size will be clipped
252 */
253 void memory_region_init(MemoryRegion *mr,
254 struct Object *owner,
255 const char *name,
256 uint64_t size);
257
258 /**
259 * memory_region_ref: Add 1 to a memory region's reference count
260 *
261 * Whenever memory regions are accessed outside the BQL, they need to be
262 * preserved against hot-unplug. MemoryRegions actually do not have their
263 * own reference count; they piggyback on a QOM object, their "owner".
264 * This function adds a reference to the owner.
265 *
266 * All MemoryRegions must have an owner if they can disappear, even if the
267 * device they belong to operates exclusively under the BQL. This is because
268 * the region could be returned at any time by memory_region_find, and this
269 * is usually under guest control.
270 *
271 * @mr: the #MemoryRegion
272 */
273 void memory_region_ref(MemoryRegion *mr);
274
275 /**
276 * memory_region_unref: Remove 1 to a memory region's reference count
277 *
278 * Whenever memory regions are accessed outside the BQL, they need to be
279 * preserved against hot-unplug. MemoryRegions actually do not have their
280 * own reference count; they piggyback on a QOM object, their "owner".
281 * This function removes a reference to the owner and possibly destroys it.
282 *
283 * @mr: the #MemoryRegion
284 */
285 void memory_region_unref(MemoryRegion *mr);
286
287 /**
288 * memory_region_init_io: Initialize an I/O memory region.
289 *
290 * Accesses into the region will cause the callbacks in @ops to be called.
291 * if @size is nonzero, subregions will be clipped to @size.
292 *
293 * @mr: the #MemoryRegion to be initialized.
294 * @owner: the object that tracks the region's reference count
295 * @ops: a structure containing read and write callbacks to be used when
296 * I/O is performed on the region.
297 * @opaque: passed to to the read and write callbacks of the @ops structure.
298 * @name: used for debugging; not visible to the user or ABI
299 * @size: size of the region.
300 */
301 void memory_region_init_io(MemoryRegion *mr,
302 struct Object *owner,
303 const MemoryRegionOps *ops,
304 void *opaque,
305 const char *name,
306 uint64_t size);
307
308 /**
309 * memory_region_init_ram: Initialize RAM memory region. Accesses into the
310 * region will modify memory directly.
311 *
312 * @mr: the #MemoryRegion to be initialized.
313 * @owner: the object that tracks the region's reference count
314 * @name: the name of the region.
315 * @size: size of the region.
316 * @errp: pointer to Error*, to store an error if it happens.
317 */
318 void memory_region_init_ram(MemoryRegion *mr,
319 struct Object *owner,
320 const char *name,
321 uint64_t size,
322 Error **errp);
323
324 #ifdef __linux__
325 /**
326 * memory_region_init_ram_from_file: Initialize RAM memory region with a
327 * mmap-ed backend.
328 *
329 * @mr: the #MemoryRegion to be initialized.
330 * @owner: the object that tracks the region's reference count
331 * @name: the name of the region.
332 * @size: size of the region.
333 * @share: %true if memory must be mmaped with the MAP_SHARED flag
334 * @path: the path in which to allocate the RAM.
335 * @errp: pointer to Error*, to store an error if it happens.
336 */
337 void memory_region_init_ram_from_file(MemoryRegion *mr,
338 struct Object *owner,
339 const char *name,
340 uint64_t size,
341 bool share,
342 const char *path,
343 Error **errp);
344 #endif
345
346 /**
347 * memory_region_init_ram_ptr: Initialize RAM memory region from a
348 * user-provided pointer. Accesses into the
349 * region will modify memory directly.
350 *
351 * @mr: the #MemoryRegion to be initialized.
352 * @owner: the object that tracks the region's reference count
353 * @name: the name of the region.
354 * @size: size of the region.
355 * @ptr: memory to be mapped; must contain at least @size bytes.
356 */
357 void memory_region_init_ram_ptr(MemoryRegion *mr,
358 struct Object *owner,
359 const char *name,
360 uint64_t size,
361 void *ptr);
362
363 /**
364 * memory_region_init_alias: Initialize a memory region that aliases all or a
365 * part of another memory region.
366 *
367 * @mr: the #MemoryRegion to be initialized.
368 * @owner: the object that tracks the region's reference count
369 * @name: used for debugging; not visible to the user or ABI
370 * @orig: the region to be referenced; @mr will be equivalent to
371 * @orig between @offset and @offset + @size - 1.
372 * @offset: start of the section in @orig to be referenced.
373 * @size: size of the region.
374 */
375 void memory_region_init_alias(MemoryRegion *mr,
376 struct Object *owner,
377 const char *name,
378 MemoryRegion *orig,
379 hwaddr offset,
380 uint64_t size);
381
382 /**
383 * memory_region_init_rom_device: Initialize a ROM memory region. Writes are
384 * handled via callbacks.
385 *
386 * @mr: the #MemoryRegion to be initialized.
387 * @owner: the object that tracks the region's reference count
388 * @ops: callbacks for write access handling.
389 * @name: the name of the region.
390 * @size: size of the region.
391 * @errp: pointer to Error*, to store an error if it happens.
392 */
393 void memory_region_init_rom_device(MemoryRegion *mr,
394 struct Object *owner,
395 const MemoryRegionOps *ops,
396 void *opaque,
397 const char *name,
398 uint64_t size,
399 Error **errp);
400
401 /**
402 * memory_region_init_reservation: Initialize a memory region that reserves
403 * I/O space.
404 *
405 * A reservation region primariy serves debugging purposes. It claims I/O
406 * space that is not supposed to be handled by QEMU itself. Any access via
407 * the memory API will cause an abort().
408 *
409 * @mr: the #MemoryRegion to be initialized
410 * @owner: the object that tracks the region's reference count
411 * @name: used for debugging; not visible to the user or ABI
412 * @size: size of the region.
413 */
414 void memory_region_init_reservation(MemoryRegion *mr,
415 struct Object *owner,
416 const char *name,
417 uint64_t size);
418
419 /**
420 * memory_region_init_iommu: Initialize a memory region that translates
421 * addresses
422 *
423 * An IOMMU region translates addresses and forwards accesses to a target
424 * memory region.
425 *
426 * @mr: the #MemoryRegion to be initialized
427 * @owner: the object that tracks the region's reference count
428 * @ops: a function that translates addresses into the @target region
429 * @name: used for debugging; not visible to the user or ABI
430 * @size: size of the region.
431 */
432 void memory_region_init_iommu(MemoryRegion *mr,
433 struct Object *owner,
434 const MemoryRegionIOMMUOps *ops,
435 const char *name,
436 uint64_t size);
437
438 /**
439 * memory_region_owner: get a memory region's owner.
440 *
441 * @mr: the memory region being queried.
442 */
443 struct Object *memory_region_owner(MemoryRegion *mr);
444
445 /**
446 * memory_region_size: get a memory region's size.
447 *
448 * @mr: the memory region being queried.
449 */
450 uint64_t memory_region_size(MemoryRegion *mr);
451
452 /**
453 * memory_region_is_ram: check whether a memory region is random access
454 *
455 * Returns %true is a memory region is random access.
456 *
457 * @mr: the memory region being queried
458 */
459 bool memory_region_is_ram(MemoryRegion *mr);
460
461 /**
462 * memory_region_is_skip_dump: check whether a memory region should not be
463 * dumped
464 *
465 * Returns %true is a memory region should not be dumped(e.g. VFIO BAR MMAP).
466 *
467 * @mr: the memory region being queried
468 */
469 bool memory_region_is_skip_dump(MemoryRegion *mr);
470
471 /**
472 * memory_region_set_skip_dump: Set skip_dump flag, dump will ignore this memory
473 * region
474 *
475 * @mr: the memory region being queried
476 */
477 void memory_region_set_skip_dump(MemoryRegion *mr);
478
479 /**
480 * memory_region_is_romd: check whether a memory region is in ROMD mode
481 *
482 * Returns %true if a memory region is a ROM device and currently set to allow
483 * direct reads.
484 *
485 * @mr: the memory region being queried
486 */
487 static inline bool memory_region_is_romd(MemoryRegion *mr)
488 {
489 return mr->rom_device && mr->romd_mode;
490 }
491
492 /**
493 * memory_region_is_iommu: check whether a memory region is an iommu
494 *
495 * Returns %true is a memory region is an iommu.
496 *
497 * @mr: the memory region being queried
498 */
499 bool memory_region_is_iommu(MemoryRegion *mr);
500
501 /**
502 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
503 *
504 * @mr: the memory region that was changed
505 * @entry: the new entry in the IOMMU translation table. The entry
506 * replaces all old entries for the same virtual I/O address range.
507 * Deleted entries have .@perm == 0.
508 */
509 void memory_region_notify_iommu(MemoryRegion *mr,
510 IOMMUTLBEntry entry);
511
512 /**
513 * memory_region_register_iommu_notifier: register a notifier for changes to
514 * IOMMU translation entries.
515 *
516 * @mr: the memory region to observe
517 * @n: the notifier to be added; the notifier receives a pointer to an
518 * #IOMMUTLBEntry as the opaque value; the pointer ceases to be
519 * valid on exit from the notifier.
520 */
521 void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n);
522
523 /**
524 * memory_region_unregister_iommu_notifier: unregister a notifier for
525 * changes to IOMMU translation entries.
526 *
527 * @n: the notifier to be removed.
528 */
529 void memory_region_unregister_iommu_notifier(Notifier *n);
530
531 /**
532 * memory_region_name: get a memory region's name
533 *
534 * Returns the string that was used to initialize the memory region.
535 *
536 * @mr: the memory region being queried
537 */
538 const char *memory_region_name(const MemoryRegion *mr);
539
540 /**
541 * memory_region_is_logging: return whether a memory region is logging writes
542 *
543 * Returns %true if the memory region is logging writes
544 *
545 * @mr: the memory region being queried
546 */
547 bool memory_region_is_logging(MemoryRegion *mr);
548
549 /**
550 * memory_region_is_rom: check whether a memory region is ROM
551 *
552 * Returns %true is a memory region is read-only memory.
553 *
554 * @mr: the memory region being queried
555 */
556 bool memory_region_is_rom(MemoryRegion *mr);
557
558 /**
559 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
560 *
561 * Returns a file descriptor backing a file-based RAM memory region,
562 * or -1 if the region is not a file-based RAM memory region.
563 *
564 * @mr: the RAM or alias memory region being queried.
565 */
566 int memory_region_get_fd(MemoryRegion *mr);
567
568 /**
569 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
570 *
571 * Returns a host pointer to a RAM memory region (created with
572 * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
573 * care.
574 *
575 * @mr: the memory region being queried.
576 */
577 void *memory_region_get_ram_ptr(MemoryRegion *mr);
578
579 /**
580 * memory_region_set_log: Turn dirty logging on or off for a region.
581 *
582 * Turns dirty logging on or off for a specified client (display, migration).
583 * Only meaningful for RAM regions.
584 *
585 * @mr: the memory region being updated.
586 * @log: whether dirty logging is to be enabled or disabled.
587 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
588 * %DIRTY_MEMORY_VGA.
589 */
590 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
591
592 /**
593 * memory_region_get_dirty: Check whether a range of bytes is dirty
594 * for a specified client.
595 *
596 * Checks whether a range of bytes has been written to since the last
597 * call to memory_region_reset_dirty() with the same @client. Dirty logging
598 * must be enabled.
599 *
600 * @mr: the memory region being queried.
601 * @addr: the address (relative to the start of the region) being queried.
602 * @size: the size of the range being queried.
603 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
604 * %DIRTY_MEMORY_VGA.
605 */
606 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
607 hwaddr size, unsigned client);
608
609 /**
610 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
611 *
612 * Marks a range of bytes as dirty, after it has been dirtied outside
613 * guest code.
614 *
615 * @mr: the memory region being dirtied.
616 * @addr: the address (relative to the start of the region) being dirtied.
617 * @size: size of the range being dirtied.
618 */
619 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
620 hwaddr size);
621
622 /**
623 * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty
624 * for a specified client. It clears them.
625 *
626 * Checks whether a range of bytes has been written to since the last
627 * call to memory_region_reset_dirty() with the same @client. Dirty logging
628 * must be enabled.
629 *
630 * @mr: the memory region being queried.
631 * @addr: the address (relative to the start of the region) being queried.
632 * @size: the size of the range being queried.
633 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
634 * %DIRTY_MEMORY_VGA.
635 */
636 bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
637 hwaddr size, unsigned client);
638 /**
639 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
640 * any external TLBs (e.g. kvm)
641 *
642 * Flushes dirty information from accelerators such as kvm and vhost-net
643 * and makes it available to users of the memory API.
644 *
645 * @mr: the region being flushed.
646 */
647 void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
648
649 /**
650 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
651 * client.
652 *
653 * Marks a range of pages as no longer dirty.
654 *
655 * @mr: the region being updated.
656 * @addr: the start of the subrange being cleaned.
657 * @size: the size of the subrange being cleaned.
658 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
659 * %DIRTY_MEMORY_VGA.
660 */
661 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
662 hwaddr size, unsigned client);
663
664 /**
665 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
666 *
667 * Allows a memory region to be marked as read-only (turning it into a ROM).
668 * only useful on RAM regions.
669 *
670 * @mr: the region being updated.
671 * @readonly: whether rhe region is to be ROM or RAM.
672 */
673 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
674
675 /**
676 * memory_region_rom_device_set_romd: enable/disable ROMD mode
677 *
678 * Allows a ROM device (initialized with memory_region_init_rom_device() to
679 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
680 * device is mapped to guest memory and satisfies read access directly.
681 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
682 * Writes are always handled by the #MemoryRegion.write function.
683 *
684 * @mr: the memory region to be updated
685 * @romd_mode: %true to put the region into ROMD mode
686 */
687 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
688
689 /**
690 * memory_region_set_coalescing: Enable memory coalescing for the region.
691 *
692 * Enabled writes to a region to be queued for later processing. MMIO ->write
693 * callbacks may be delayed until a non-coalesced MMIO is issued.
694 * Only useful for IO regions. Roughly similar to write-combining hardware.
695 *
696 * @mr: the memory region to be write coalesced
697 */
698 void memory_region_set_coalescing(MemoryRegion *mr);
699
700 /**
701 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
702 * a region.
703 *
704 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
705 * Multiple calls can be issued coalesced disjoint ranges.
706 *
707 * @mr: the memory region to be updated.
708 * @offset: the start of the range within the region to be coalesced.
709 * @size: the size of the subrange to be coalesced.
710 */
711 void memory_region_add_coalescing(MemoryRegion *mr,
712 hwaddr offset,
713 uint64_t size);
714
715 /**
716 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
717 *
718 * Disables any coalescing caused by memory_region_set_coalescing() or
719 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
720 * hardware.
721 *
722 * @mr: the memory region to be updated.
723 */
724 void memory_region_clear_coalescing(MemoryRegion *mr);
725
726 /**
727 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
728 * accesses.
729 *
730 * Ensure that pending coalesced MMIO request are flushed before the memory
731 * region is accessed. This property is automatically enabled for all regions
732 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
733 *
734 * @mr: the memory region to be updated.
735 */
736 void memory_region_set_flush_coalesced(MemoryRegion *mr);
737
738 /**
739 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
740 * accesses.
741 *
742 * Clear the automatic coalesced MMIO flushing enabled via
743 * memory_region_set_flush_coalesced. Note that this service has no effect on
744 * memory regions that have MMIO coalescing enabled for themselves. For them,
745 * automatic flushing will stop once coalescing is disabled.
746 *
747 * @mr: the memory region to be updated.
748 */
749 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
750
751 /**
752 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
753 * is written to a location.
754 *
755 * Marks a word in an IO region (initialized with memory_region_init_io())
756 * as a trigger for an eventfd event. The I/O callback will not be called.
757 * The caller must be prepared to handle failure (that is, take the required
758 * action if the callback _is_ called).
759 *
760 * @mr: the memory region being updated.
761 * @addr: the address within @mr that is to be monitored
762 * @size: the size of the access to trigger the eventfd
763 * @match_data: whether to match against @data, instead of just @addr
764 * @data: the data to match against the guest write
765 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
766 **/
767 void memory_region_add_eventfd(MemoryRegion *mr,
768 hwaddr addr,
769 unsigned size,
770 bool match_data,
771 uint64_t data,
772 EventNotifier *e);
773
774 /**
775 * memory_region_del_eventfd: Cancel an eventfd.
776 *
777 * Cancels an eventfd trigger requested by a previous
778 * memory_region_add_eventfd() call.
779 *
780 * @mr: the memory region being updated.
781 * @addr: the address within @mr that is to be monitored
782 * @size: the size of the access to trigger the eventfd
783 * @match_data: whether to match against @data, instead of just @addr
784 * @data: the data to match against the guest write
785 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
786 */
787 void memory_region_del_eventfd(MemoryRegion *mr,
788 hwaddr addr,
789 unsigned size,
790 bool match_data,
791 uint64_t data,
792 EventNotifier *e);
793
794 /**
795 * memory_region_add_subregion: Add a subregion to a container.
796 *
797 * Adds a subregion at @offset. The subregion may not overlap with other
798 * subregions (except for those explicitly marked as overlapping). A region
799 * may only be added once as a subregion (unless removed with
800 * memory_region_del_subregion()); use memory_region_init_alias() if you
801 * want a region to be a subregion in multiple locations.
802 *
803 * @mr: the region to contain the new subregion; must be a container
804 * initialized with memory_region_init().
805 * @offset: the offset relative to @mr where @subregion is added.
806 * @subregion: the subregion to be added.
807 */
808 void memory_region_add_subregion(MemoryRegion *mr,
809 hwaddr offset,
810 MemoryRegion *subregion);
811 /**
812 * memory_region_add_subregion_overlap: Add a subregion to a container
813 * with overlap.
814 *
815 * Adds a subregion at @offset. The subregion may overlap with other
816 * subregions. Conflicts are resolved by having a higher @priority hide a
817 * lower @priority. Subregions without priority are taken as @priority 0.
818 * A region may only be added once as a subregion (unless removed with
819 * memory_region_del_subregion()); use memory_region_init_alias() if you
820 * want a region to be a subregion in multiple locations.
821 *
822 * @mr: the region to contain the new subregion; must be a container
823 * initialized with memory_region_init().
824 * @offset: the offset relative to @mr where @subregion is added.
825 * @subregion: the subregion to be added.
826 * @priority: used for resolving overlaps; highest priority wins.
827 */
828 void memory_region_add_subregion_overlap(MemoryRegion *mr,
829 hwaddr offset,
830 MemoryRegion *subregion,
831 int priority);
832
833 /**
834 * memory_region_get_ram_addr: Get the ram address associated with a memory
835 * region
836 *
837 * DO NOT USE THIS FUNCTION. This is a temporary workaround while the Xen
838 * code is being reworked.
839 */
840 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
841
842 uint64_t memory_region_get_alignment(const MemoryRegion *mr);
843 /**
844 * memory_region_del_subregion: Remove a subregion.
845 *
846 * Removes a subregion from its container.
847 *
848 * @mr: the container to be updated.
849 * @subregion: the region being removed; must be a current subregion of @mr.
850 */
851 void memory_region_del_subregion(MemoryRegion *mr,
852 MemoryRegion *subregion);
853
854 /*
855 * memory_region_set_enabled: dynamically enable or disable a region
856 *
857 * Enables or disables a memory region. A disabled memory region
858 * ignores all accesses to itself and its subregions. It does not
859 * obscure sibling subregions with lower priority - it simply behaves as
860 * if it was removed from the hierarchy.
861 *
862 * Regions default to being enabled.
863 *
864 * @mr: the region to be updated
865 * @enabled: whether to enable or disable the region
866 */
867 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
868
869 /*
870 * memory_region_set_address: dynamically update the address of a region
871 *
872 * Dynamically updates the address of a region, relative to its container.
873 * May be used on regions are currently part of a memory hierarchy.
874 *
875 * @mr: the region to be updated
876 * @addr: new address, relative to container region
877 */
878 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
879
880 /*
881 * memory_region_set_alias_offset: dynamically update a memory alias's offset
882 *
883 * Dynamically updates the offset into the target region that an alias points
884 * to, as if the fourth argument to memory_region_init_alias() has changed.
885 *
886 * @mr: the #MemoryRegion to be updated; should be an alias.
887 * @offset: the new offset into the target memory region
888 */
889 void memory_region_set_alias_offset(MemoryRegion *mr,
890 hwaddr offset);
891
892 /**
893 * memory_region_present: checks if an address relative to a @container
894 * translates into #MemoryRegion within @container
895 *
896 * Answer whether a #MemoryRegion within @container covers the address
897 * @addr.
898 *
899 * @container: a #MemoryRegion within which @addr is a relative address
900 * @addr: the area within @container to be searched
901 */
902 bool memory_region_present(MemoryRegion *container, hwaddr addr);
903
904 /**
905 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
906 * into any address space.
907 *
908 * @mr: a #MemoryRegion which should be checked if it's mapped
909 */
910 bool memory_region_is_mapped(MemoryRegion *mr);
911
912 /**
913 * memory_region_find: translate an address/size relative to a
914 * MemoryRegion into a #MemoryRegionSection.
915 *
916 * Locates the first #MemoryRegion within @mr that overlaps the range
917 * given by @addr and @size.
918 *
919 * Returns a #MemoryRegionSection that describes a contiguous overlap.
920 * It will have the following characteristics:
921 * .@size = 0 iff no overlap was found
922 * .@mr is non-%NULL iff an overlap was found
923 *
924 * Remember that in the return value the @offset_within_region is
925 * relative to the returned region (in the .@mr field), not to the
926 * @mr argument.
927 *
928 * Similarly, the .@offset_within_address_space is relative to the
929 * address space that contains both regions, the passed and the
930 * returned one. However, in the special case where the @mr argument
931 * has no container (and thus is the root of the address space), the
932 * following will hold:
933 * .@offset_within_address_space >= @addr
934 * .@offset_within_address_space + .@size <= @addr + @size
935 *
936 * @mr: a MemoryRegion within which @addr is a relative address
937 * @addr: start of the area within @as to be searched
938 * @size: size of the area to be searched
939 */
940 MemoryRegionSection memory_region_find(MemoryRegion *mr,
941 hwaddr addr, uint64_t size);
942
943 /**
944 * address_space_sync_dirty_bitmap: synchronize the dirty log for all memory
945 *
946 * Synchronizes the dirty page log for an entire address space.
947 * @as: the address space that contains the memory being synchronized
948 */
949 void address_space_sync_dirty_bitmap(AddressSpace *as);
950
951 /**
952 * memory_region_transaction_begin: Start a transaction.
953 *
954 * During a transaction, changes will be accumulated and made visible
955 * only when the transaction ends (is committed).
956 */
957 void memory_region_transaction_begin(void);
958
959 /**
960 * memory_region_transaction_commit: Commit a transaction and make changes
961 * visible to the guest.
962 */
963 void memory_region_transaction_commit(void);
964
965 /**
966 * memory_listener_register: register callbacks to be called when memory
967 * sections are mapped or unmapped into an address
968 * space
969 *
970 * @listener: an object containing the callbacks to be called
971 * @filter: if non-%NULL, only regions in this address space will be observed
972 */
973 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
974
975 /**
976 * memory_listener_unregister: undo the effect of memory_listener_register()
977 *
978 * @listener: an object containing the callbacks to be removed
979 */
980 void memory_listener_unregister(MemoryListener *listener);
981
982 /**
983 * memory_global_dirty_log_start: begin dirty logging for all regions
984 */
985 void memory_global_dirty_log_start(void);
986
987 /**
988 * memory_global_dirty_log_stop: end dirty logging for all regions
989 */
990 void memory_global_dirty_log_stop(void);
991
992 void mtree_info(fprintf_function mon_printf, void *f);
993
994 /**
995 * address_space_init: initializes an address space
996 *
997 * @as: an uninitialized #AddressSpace
998 * @root: a #MemoryRegion that routes addesses for the address space
999 * @name: an address space name. The name is only used for debugging
1000 * output.
1001 */
1002 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
1003
1004
1005 /**
1006 * address_space_destroy: destroy an address space
1007 *
1008 * Releases all resources associated with an address space. After an address space
1009 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
1010 * as well.
1011 *
1012 * @as: address space to be destroyed
1013 */
1014 void address_space_destroy(AddressSpace *as);
1015
1016 /**
1017 * address_space_rw: read from or write to an address space.
1018 *
1019 * Return true if the operation hit any unassigned memory or encountered an
1020 * IOMMU fault.
1021 *
1022 * @as: #AddressSpace to be accessed
1023 * @addr: address within that address space
1024 * @buf: buffer with the data transferred
1025 * @is_write: indicates the transfer direction
1026 */
1027 bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
1028 int len, bool is_write);
1029
1030 /**
1031 * address_space_write: write to address space.
1032 *
1033 * Return true if the operation hit any unassigned memory or encountered an
1034 * IOMMU fault.
1035 *
1036 * @as: #AddressSpace to be accessed
1037 * @addr: address within that address space
1038 * @buf: buffer with the data transferred
1039 */
1040 bool address_space_write(AddressSpace *as, hwaddr addr,
1041 const uint8_t *buf, int len);
1042
1043 /**
1044 * address_space_read: read from an address space.
1045 *
1046 * Return true if the operation hit any unassigned memory or encountered an
1047 * IOMMU fault.
1048 *
1049 * @as: #AddressSpace to be accessed
1050 * @addr: address within that address space
1051 * @buf: buffer with the data transferred
1052 */
1053 bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len);
1054
1055 /* address_space_translate: translate an address range into an address space
1056 * into a MemoryRegion and an address range into that section
1057 *
1058 * @as: #AddressSpace to be accessed
1059 * @addr: address within that address space
1060 * @xlat: pointer to address within the returned memory region section's
1061 * #MemoryRegion.
1062 * @len: pointer to length
1063 * @is_write: indicates the transfer direction
1064 */
1065 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
1066 hwaddr *xlat, hwaddr *len,
1067 bool is_write);
1068
1069 /* address_space_access_valid: check for validity of accessing an address
1070 * space range
1071 *
1072 * Check whether memory is assigned to the given address space range, and
1073 * access is permitted by any IOMMU regions that are active for the address
1074 * space.
1075 *
1076 * For now, addr and len should be aligned to a page size. This limitation
1077 * will be lifted in the future.
1078 *
1079 * @as: #AddressSpace to be accessed
1080 * @addr: address within that address space
1081 * @len: length of the area to be checked
1082 * @is_write: indicates the transfer direction
1083 */
1084 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write);
1085
1086 /* address_space_map: map a physical memory region into a host virtual address
1087 *
1088 * May map a subset of the requested range, given by and returned in @plen.
1089 * May return %NULL if resources needed to perform the mapping are exhausted.
1090 * Use only for reads OR writes - not for read-modify-write operations.
1091 * Use cpu_register_map_client() to know when retrying the map operation is
1092 * likely to succeed.
1093 *
1094 * @as: #AddressSpace to be accessed
1095 * @addr: address within that address space
1096 * @plen: pointer to length of buffer; updated on return
1097 * @is_write: indicates the transfer direction
1098 */
1099 void *address_space_map(AddressSpace *as, hwaddr addr,
1100 hwaddr *plen, bool is_write);
1101
1102 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
1103 *
1104 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
1105 * the amount of memory that was actually read or written by the caller.
1106 *
1107 * @as: #AddressSpace used
1108 * @addr: address within that address space
1109 * @len: buffer length as returned by address_space_map()
1110 * @access_len: amount of data actually transferred
1111 * @is_write: indicates the transfer direction
1112 */
1113 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
1114 int is_write, hwaddr access_len);
1115
1116
1117 #endif
1118
1119 #endif