2 * Physical memory management API
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
17 #ifndef CONFIG_USER_ONLY
19 #include "exec/cpu-common.h"
20 #include "exec/hwaddr.h"
21 #include "exec/memattrs.h"
22 #include "exec/memop.h"
23 #include "exec/ramlist.h"
24 #include "qemu/bswap.h"
25 #include "qemu/queue.h"
26 #include "qemu/int128.h"
27 #include "qemu/notify.h"
28 #include "qom/object.h"
31 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
33 #define MAX_PHYS_ADDR_SPACE_BITS 62
34 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
36 #define TYPE_MEMORY_REGION "qemu:memory-region"
37 #define MEMORY_REGION(obj) \
38 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
40 #define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region"
41 #define IOMMU_MEMORY_REGION(obj) \
42 OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_IOMMU_MEMORY_REGION)
43 #define IOMMU_MEMORY_REGION_CLASS(klass) \
44 OBJECT_CLASS_CHECK(IOMMUMemoryRegionClass, (klass), \
45 TYPE_IOMMU_MEMORY_REGION)
46 #define IOMMU_MEMORY_REGION_GET_CLASS(obj) \
47 OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \
48 TYPE_IOMMU_MEMORY_REGION)
50 extern bool global_dirty_log
;
52 typedef struct MemoryRegionOps MemoryRegionOps
;
53 typedef struct MemoryRegionMmio MemoryRegionMmio
;
55 struct MemoryRegionMmio
{
56 CPUReadMemoryFunc
*read
[3];
57 CPUWriteMemoryFunc
*write
[3];
60 typedef struct IOMMUTLBEntry IOMMUTLBEntry
;
62 /* See address_space_translate: bit 0 is read, bit 1 is write. */
70 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
72 struct IOMMUTLBEntry
{
73 AddressSpace
*target_as
;
75 hwaddr translated_addr
;
76 hwaddr addr_mask
; /* 0xfff = 4k translation */
77 IOMMUAccessFlags perm
;
81 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
82 * register with one or multiple IOMMU Notifier capability bit(s).
85 IOMMU_NOTIFIER_NONE
= 0,
86 /* Notify cache invalidations */
87 IOMMU_NOTIFIER_UNMAP
= 0x1,
88 /* Notify entry changes (newly created entries) */
89 IOMMU_NOTIFIER_MAP
= 0x2,
92 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
95 typedef void (*IOMMUNotify
)(struct IOMMUNotifier
*notifier
,
98 struct IOMMUNotifier
{
100 IOMMUNotifierFlag notifier_flags
;
101 /* Notify for address space range start <= addr <= end */
105 QLIST_ENTRY(IOMMUNotifier
) node
;
107 typedef struct IOMMUNotifier IOMMUNotifier
;
109 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
110 #define RAM_PREALLOC (1 << 0)
112 /* RAM is mmap-ed with MAP_SHARED */
113 #define RAM_SHARED (1 << 1)
115 /* Only a portion of RAM (used_length) is actually used, and migrated.
116 * This used_length size can change across reboots.
118 #define RAM_RESIZEABLE (1 << 2)
120 /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
121 * zero the page and wake waiting processes.
122 * (Set during postcopy)
124 #define RAM_UF_ZEROPAGE (1 << 3)
126 /* RAM can be migrated */
127 #define RAM_MIGRATABLE (1 << 4)
129 /* RAM is a persistent kind memory */
130 #define RAM_PMEM (1 << 5)
132 static inline void iommu_notifier_init(IOMMUNotifier
*n
, IOMMUNotify fn
,
133 IOMMUNotifierFlag flags
,
134 hwaddr start
, hwaddr end
,
138 n
->notifier_flags
= flags
;
141 n
->iommu_idx
= iommu_idx
;
145 * Memory region callbacks
147 struct MemoryRegionOps
{
148 /* Read from the memory region. @addr is relative to @mr; @size is
150 uint64_t (*read
)(void *opaque
,
153 /* Write to the memory region. @addr is relative to @mr; @size is
155 void (*write
)(void *opaque
,
160 MemTxResult (*read_with_attrs
)(void *opaque
,
165 MemTxResult (*write_with_attrs
)(void *opaque
,
171 enum device_endian endianness
;
172 /* Guest-visible constraints: */
174 /* If nonzero, specify bounds on access sizes beyond which a machine
177 unsigned min_access_size
;
178 unsigned max_access_size
;
179 /* If true, unaligned accesses are supported. Otherwise unaligned
180 * accesses throw machine checks.
184 * If present, and returns #false, the transaction is not accepted
185 * by the device (and results in machine dependent behaviour such
186 * as a machine check exception).
188 bool (*accepts
)(void *opaque
, hwaddr addr
,
189 unsigned size
, bool is_write
,
192 /* Internal implementation constraints: */
194 /* If nonzero, specifies the minimum size implemented. Smaller sizes
195 * will be rounded upwards and a partial result will be returned.
197 unsigned min_access_size
;
198 /* If nonzero, specifies the maximum size implemented. Larger sizes
199 * will be done as a series of accesses with smaller sizes.
201 unsigned max_access_size
;
202 /* If true, unaligned accesses are supported. Otherwise all accesses
203 * are converted to (possibly multiple) naturally aligned accesses.
209 typedef struct MemoryRegionClass
{
211 ObjectClass parent_class
;
215 enum IOMMUMemoryRegionAttr
{
216 IOMMU_ATTR_SPAPR_TCE_FD
220 * IOMMUMemoryRegionClass:
222 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
223 * and provide an implementation of at least the @translate method here
224 * to handle requests to the memory region. Other methods are optional.
226 * The IOMMU implementation must use the IOMMU notifier infrastructure
227 * to report whenever mappings are changed, by calling
228 * memory_region_notify_iommu() (or, if necessary, by calling
229 * memory_region_notify_one() for each registered notifier).
231 * Conceptually an IOMMU provides a mapping from input address
232 * to an output TLB entry. If the IOMMU is aware of memory transaction
233 * attributes and the output TLB entry depends on the transaction
234 * attributes, we represent this using IOMMU indexes. Each index
235 * selects a particular translation table that the IOMMU has:
236 * @attrs_to_index returns the IOMMU index for a set of transaction attributes
237 * @translate takes an input address and an IOMMU index
238 * and the mapping returned can only depend on the input address and the
241 * Most IOMMUs don't care about the transaction attributes and support
242 * only a single IOMMU index. A more complex IOMMU might have one index
243 * for secure transactions and one for non-secure transactions.
245 typedef struct IOMMUMemoryRegionClass
{
247 MemoryRegionClass parent_class
;
250 * Return a TLB entry that contains a given address.
252 * The IOMMUAccessFlags indicated via @flag are optional and may
253 * be specified as IOMMU_NONE to indicate that the caller needs
254 * the full translation information for both reads and writes. If
255 * the access flags are specified then the IOMMU implementation
256 * may use this as an optimization, to stop doing a page table
257 * walk as soon as it knows that the requested permissions are not
258 * allowed. If IOMMU_NONE is passed then the IOMMU must do the
259 * full page table walk and report the permissions in the returned
260 * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
261 * return different mappings for reads and writes.)
263 * The returned information remains valid while the caller is
264 * holding the big QEMU lock or is inside an RCU critical section;
265 * if the caller wishes to cache the mapping beyond that it must
266 * register an IOMMU notifier so it can invalidate its cached
267 * information when the IOMMU mapping changes.
269 * @iommu: the IOMMUMemoryRegion
270 * @hwaddr: address to be translated within the memory region
271 * @flag: requested access permissions
272 * @iommu_idx: IOMMU index for the translation
274 IOMMUTLBEntry (*translate
)(IOMMUMemoryRegion
*iommu
, hwaddr addr
,
275 IOMMUAccessFlags flag
, int iommu_idx
);
276 /* Returns minimum supported page size in bytes.
277 * If this method is not provided then the minimum is assumed to
278 * be TARGET_PAGE_SIZE.
280 * @iommu: the IOMMUMemoryRegion
282 uint64_t (*get_min_page_size
)(IOMMUMemoryRegion
*iommu
);
283 /* Called when IOMMU Notifier flag changes (ie when the set of
284 * events which IOMMU users are requesting notification for changes).
285 * Optional method -- need not be provided if the IOMMU does not
286 * need to know exactly which events must be notified.
288 * @iommu: the IOMMUMemoryRegion
289 * @old_flags: events which previously needed to be notified
290 * @new_flags: events which now need to be notified
292 void (*notify_flag_changed
)(IOMMUMemoryRegion
*iommu
,
293 IOMMUNotifierFlag old_flags
,
294 IOMMUNotifierFlag new_flags
);
295 /* Called to handle memory_region_iommu_replay().
297 * The default implementation of memory_region_iommu_replay() is to
298 * call the IOMMU translate method for every page in the address space
299 * with flag == IOMMU_NONE and then call the notifier if translate
300 * returns a valid mapping. If this method is implemented then it
301 * overrides the default behaviour, and must provide the full semantics
302 * of memory_region_iommu_replay(), by calling @notifier for every
303 * translation present in the IOMMU.
305 * Optional method -- an IOMMU only needs to provide this method
306 * if the default is inefficient or produces undesirable side effects.
308 * Note: this is not related to record-and-replay functionality.
310 void (*replay
)(IOMMUMemoryRegion
*iommu
, IOMMUNotifier
*notifier
);
312 /* Get IOMMU misc attributes. This is an optional method that
313 * can be used to allow users of the IOMMU to get implementation-specific
314 * information. The IOMMU implements this method to handle calls
315 * by IOMMU users to memory_region_iommu_get_attr() by filling in
316 * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
317 * the IOMMU supports. If the method is unimplemented then
318 * memory_region_iommu_get_attr() will always return -EINVAL.
320 * @iommu: the IOMMUMemoryRegion
321 * @attr: attribute being queried
322 * @data: memory to fill in with the attribute data
324 * Returns 0 on success, or a negative errno; in particular
325 * returns -EINVAL for unrecognized or unimplemented attribute types.
327 int (*get_attr
)(IOMMUMemoryRegion
*iommu
, enum IOMMUMemoryRegionAttr attr
,
330 /* Return the IOMMU index to use for a given set of transaction attributes.
332 * Optional method: if an IOMMU only supports a single IOMMU index then
333 * the default implementation of memory_region_iommu_attrs_to_index()
336 * The indexes supported by an IOMMU must be contiguous, starting at 0.
338 * @iommu: the IOMMUMemoryRegion
339 * @attrs: memory transaction attributes
341 int (*attrs_to_index
)(IOMMUMemoryRegion
*iommu
, MemTxAttrs attrs
);
343 /* Return the number of IOMMU indexes this IOMMU supports.
345 * Optional method: if this method is not provided, then
346 * memory_region_iommu_num_indexes() will return 1, indicating that
347 * only a single IOMMU index is supported.
349 * @iommu: the IOMMUMemoryRegion
351 int (*num_indexes
)(IOMMUMemoryRegion
*iommu
);
352 } IOMMUMemoryRegionClass
;
354 typedef struct CoalescedMemoryRange CoalescedMemoryRange
;
355 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd
;
357 struct MemoryRegion
{
360 /* All fields are private - violators will be prosecuted */
362 /* The following fields should fit in a cache line */
366 bool readonly
; /* For RAM regions */
369 bool flush_coalesced_mmio
;
371 uint8_t dirty_log_mask
;
376 const MemoryRegionOps
*ops
;
378 MemoryRegion
*container
;
381 void (*destructor
)(MemoryRegion
*mr
);
386 bool warning_printed
; /* For reservations */
387 uint8_t vga_logging_count
;
391 QTAILQ_HEAD(, MemoryRegion
) subregions
;
392 QTAILQ_ENTRY(MemoryRegion
) subregions_link
;
393 QTAILQ_HEAD(, CoalescedMemoryRange
) coalesced
;
395 unsigned ioeventfd_nb
;
396 MemoryRegionIoeventfd
*ioeventfds
;
399 struct IOMMUMemoryRegion
{
400 MemoryRegion parent_obj
;
402 QLIST_HEAD(, IOMMUNotifier
) iommu_notify
;
403 IOMMUNotifierFlag iommu_notify_flags
;
406 #define IOMMU_NOTIFIER_FOREACH(n, mr) \
407 QLIST_FOREACH((n), &(mr)->iommu_notify, node)
410 * MemoryListener: callbacks structure for updates to the physical memory map
412 * Allows a component to adjust to changes in the guest-visible memory map.
413 * Use with memory_listener_register() and memory_listener_unregister().
415 struct MemoryListener
{
416 void (*begin
)(MemoryListener
*listener
);
417 void (*commit
)(MemoryListener
*listener
);
418 void (*region_add
)(MemoryListener
*listener
, MemoryRegionSection
*section
);
419 void (*region_del
)(MemoryListener
*listener
, MemoryRegionSection
*section
);
420 void (*region_nop
)(MemoryListener
*listener
, MemoryRegionSection
*section
);
421 void (*log_start
)(MemoryListener
*listener
, MemoryRegionSection
*section
,
423 void (*log_stop
)(MemoryListener
*listener
, MemoryRegionSection
*section
,
425 void (*log_sync
)(MemoryListener
*listener
, MemoryRegionSection
*section
);
426 void (*log_clear
)(MemoryListener
*listener
, MemoryRegionSection
*section
);
427 void (*log_global_start
)(MemoryListener
*listener
);
428 void (*log_global_stop
)(MemoryListener
*listener
);
429 void (*log_global_after_sync
)(MemoryListener
*listener
);
430 void (*eventfd_add
)(MemoryListener
*listener
, MemoryRegionSection
*section
,
431 bool match_data
, uint64_t data
, EventNotifier
*e
);
432 void (*eventfd_del
)(MemoryListener
*listener
, MemoryRegionSection
*section
,
433 bool match_data
, uint64_t data
, EventNotifier
*e
);
434 void (*coalesced_io_add
)(MemoryListener
*listener
, MemoryRegionSection
*section
,
435 hwaddr addr
, hwaddr len
);
436 void (*coalesced_io_del
)(MemoryListener
*listener
, MemoryRegionSection
*section
,
437 hwaddr addr
, hwaddr len
);
438 /* Lower = earlier (during add), later (during del) */
440 AddressSpace
*address_space
;
441 QTAILQ_ENTRY(MemoryListener
) link
;
442 QTAILQ_ENTRY(MemoryListener
) link_as
;
446 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
448 struct AddressSpace
{
449 /* All fields are private. */
454 /* Accessed via RCU. */
455 struct FlatView
*current_map
;
458 struct MemoryRegionIoeventfd
*ioeventfds
;
459 QTAILQ_HEAD(, MemoryListener
) listeners
;
460 QTAILQ_ENTRY(AddressSpace
) address_spaces_link
;
463 typedef struct AddressSpaceDispatch AddressSpaceDispatch
;
464 typedef struct FlatRange FlatRange
;
466 /* Flattened global view of current active memory hierarchy. Kept in sorted
474 unsigned nr_allocated
;
475 struct AddressSpaceDispatch
*dispatch
;
479 static inline FlatView
*address_space_to_flatview(AddressSpace
*as
)
481 return atomic_rcu_read(&as
->current_map
);
486 * MemoryRegionSection: describes a fragment of a #MemoryRegion
488 * @mr: the region, or %NULL if empty
489 * @fv: the flat view of the address space the region is mapped in
490 * @offset_within_region: the beginning of the section, relative to @mr's start
491 * @size: the size of the section; will not exceed @mr's boundaries
492 * @offset_within_address_space: the address of the first byte of the section
493 * relative to the region's address space
494 * @readonly: writes to this section are ignored
495 * @nonvolatile: this section is non-volatile
497 struct MemoryRegionSection
{
500 hwaddr offset_within_region
;
502 hwaddr offset_within_address_space
;
508 * memory_region_init: Initialize a memory region
510 * The region typically acts as a container for other memory regions. Use
511 * memory_region_add_subregion() to add subregions.
513 * @mr: the #MemoryRegion to be initialized
514 * @owner: the object that tracks the region's reference count
515 * @name: used for debugging; not visible to the user or ABI
516 * @size: size of the region; any subregions beyond this size will be clipped
518 void memory_region_init(MemoryRegion
*mr
,
519 struct Object
*owner
,
524 * memory_region_ref: Add 1 to a memory region's reference count
526 * Whenever memory regions are accessed outside the BQL, they need to be
527 * preserved against hot-unplug. MemoryRegions actually do not have their
528 * own reference count; they piggyback on a QOM object, their "owner".
529 * This function adds a reference to the owner.
531 * All MemoryRegions must have an owner if they can disappear, even if the
532 * device they belong to operates exclusively under the BQL. This is because
533 * the region could be returned at any time by memory_region_find, and this
534 * is usually under guest control.
536 * @mr: the #MemoryRegion
538 void memory_region_ref(MemoryRegion
*mr
);
541 * memory_region_unref: Remove 1 to a memory region's reference count
543 * Whenever memory regions are accessed outside the BQL, they need to be
544 * preserved against hot-unplug. MemoryRegions actually do not have their
545 * own reference count; they piggyback on a QOM object, their "owner".
546 * This function removes a reference to the owner and possibly destroys it.
548 * @mr: the #MemoryRegion
550 void memory_region_unref(MemoryRegion
*mr
);
553 * memory_region_init_io: Initialize an I/O memory region.
555 * Accesses into the region will cause the callbacks in @ops to be called.
556 * if @size is nonzero, subregions will be clipped to @size.
558 * @mr: the #MemoryRegion to be initialized.
559 * @owner: the object that tracks the region's reference count
560 * @ops: a structure containing read and write callbacks to be used when
561 * I/O is performed on the region.
562 * @opaque: passed to the read and write callbacks of the @ops structure.
563 * @name: used for debugging; not visible to the user or ABI
564 * @size: size of the region.
566 void memory_region_init_io(MemoryRegion
*mr
,
567 struct Object
*owner
,
568 const MemoryRegionOps
*ops
,
574 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
575 * into the region will modify memory
578 * @mr: the #MemoryRegion to be initialized.
579 * @owner: the object that tracks the region's reference count
580 * @name: Region name, becomes part of RAMBlock name used in migration stream
581 * must be unique within any device
582 * @size: size of the region.
583 * @errp: pointer to Error*, to store an error if it happens.
585 * Note that this function does not do anything to cause the data in the
586 * RAM memory region to be migrated; that is the responsibility of the caller.
588 void memory_region_init_ram_nomigrate(MemoryRegion
*mr
,
589 struct Object
*owner
,
595 * memory_region_init_ram_shared_nomigrate: Initialize RAM memory region.
596 * Accesses into the region will
597 * modify memory directly.
599 * @mr: the #MemoryRegion to be initialized.
600 * @owner: the object that tracks the region's reference count
601 * @name: Region name, becomes part of RAMBlock name used in migration stream
602 * must be unique within any device
603 * @size: size of the region.
604 * @share: allow remapping RAM to different addresses
605 * @errp: pointer to Error*, to store an error if it happens.
607 * Note that this function is similar to memory_region_init_ram_nomigrate.
608 * The only difference is part of the RAM region can be remapped.
610 void memory_region_init_ram_shared_nomigrate(MemoryRegion
*mr
,
611 struct Object
*owner
,
618 * memory_region_init_resizeable_ram: Initialize memory region with resizeable
619 * RAM. Accesses into the region will
620 * modify memory directly. Only an initial
621 * portion of this RAM is actually used.
622 * The used size can change across reboots.
624 * @mr: the #MemoryRegion to be initialized.
625 * @owner: the object that tracks the region's reference count
626 * @name: Region name, becomes part of RAMBlock name used in migration stream
627 * must be unique within any device
628 * @size: used size of the region.
629 * @max_size: max size of the region.
630 * @resized: callback to notify owner about used size change.
631 * @errp: pointer to Error*, to store an error if it happens.
633 * Note that this function does not do anything to cause the data in the
634 * RAM memory region to be migrated; that is the responsibility of the caller.
636 void memory_region_init_resizeable_ram(MemoryRegion
*mr
,
637 struct Object
*owner
,
641 void (*resized
)(const char*,
648 * memory_region_init_ram_from_file: Initialize RAM memory region with a
651 * @mr: the #MemoryRegion to be initialized.
652 * @owner: the object that tracks the region's reference count
653 * @name: Region name, becomes part of RAMBlock name used in migration stream
654 * must be unique within any device
655 * @size: size of the region.
656 * @align: alignment of the region base address; if 0, the default alignment
657 * (getpagesize()) will be used.
658 * @ram_flags: Memory region features:
659 * - RAM_SHARED: memory must be mmaped with the MAP_SHARED flag
660 * - RAM_PMEM: the memory is persistent memory
661 * Other bits are ignored now.
662 * @path: the path in which to allocate the RAM.
663 * @errp: pointer to Error*, to store an error if it happens.
665 * Note that this function does not do anything to cause the data in the
666 * RAM memory region to be migrated; that is the responsibility of the caller.
668 void memory_region_init_ram_from_file(MemoryRegion
*mr
,
669 struct Object
*owner
,
678 * memory_region_init_ram_from_fd: Initialize RAM memory region with a
681 * @mr: the #MemoryRegion to be initialized.
682 * @owner: the object that tracks the region's reference count
683 * @name: the name of the region.
684 * @size: size of the region.
685 * @share: %true if memory must be mmaped with the MAP_SHARED flag
686 * @fd: the fd to mmap.
687 * @errp: pointer to Error*, to store an error if it happens.
689 * Note that this function does not do anything to cause the data in the
690 * RAM memory region to be migrated; that is the responsibility of the caller.
692 void memory_region_init_ram_from_fd(MemoryRegion
*mr
,
693 struct Object
*owner
,
702 * memory_region_init_ram_ptr: Initialize RAM memory region from a
703 * user-provided pointer. Accesses into the
704 * region will modify memory directly.
706 * @mr: the #MemoryRegion to be initialized.
707 * @owner: the object that tracks the region's reference count
708 * @name: Region name, becomes part of RAMBlock name used in migration stream
709 * must be unique within any device
710 * @size: size of the region.
711 * @ptr: memory to be mapped; must contain at least @size bytes.
713 * Note that this function does not do anything to cause the data in the
714 * RAM memory region to be migrated; that is the responsibility of the caller.
716 void memory_region_init_ram_ptr(MemoryRegion
*mr
,
717 struct Object
*owner
,
723 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
724 * a user-provided pointer.
726 * A RAM device represents a mapping to a physical device, such as to a PCI
727 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
728 * into the VM address space and access to the region will modify memory
729 * directly. However, the memory region should not be included in a memory
730 * dump (device may not be enabled/mapped at the time of the dump), and
731 * operations incompatible with manipulating MMIO should be avoided. Replaces
734 * @mr: the #MemoryRegion to be initialized.
735 * @owner: the object that tracks the region's reference count
736 * @name: the name of the region.
737 * @size: size of the region.
738 * @ptr: memory to be mapped; must contain at least @size bytes.
740 * Note that this function does not do anything to cause the data in the
741 * RAM memory region to be migrated; that is the responsibility of the caller.
742 * (For RAM device memory regions, migrating the contents rarely makes sense.)
744 void memory_region_init_ram_device_ptr(MemoryRegion
*mr
,
745 struct Object
*owner
,
751 * memory_region_init_alias: Initialize a memory region that aliases all or a
752 * part of another memory region.
754 * @mr: the #MemoryRegion to be initialized.
755 * @owner: the object that tracks the region's reference count
756 * @name: used for debugging; not visible to the user or ABI
757 * @orig: the region to be referenced; @mr will be equivalent to
758 * @orig between @offset and @offset + @size - 1.
759 * @offset: start of the section in @orig to be referenced.
760 * @size: size of the region.
762 void memory_region_init_alias(MemoryRegion
*mr
,
763 struct Object
*owner
,
770 * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
772 * This has the same effect as calling memory_region_init_ram_nomigrate()
773 * and then marking the resulting region read-only with
774 * memory_region_set_readonly().
776 * Note that this function does not do anything to cause the data in the
777 * RAM side of the memory region to be migrated; that is the responsibility
780 * @mr: the #MemoryRegion to be initialized.
781 * @owner: the object that tracks the region's reference count
782 * @name: Region name, becomes part of RAMBlock name used in migration stream
783 * must be unique within any device
784 * @size: size of the region.
785 * @errp: pointer to Error*, to store an error if it happens.
787 void memory_region_init_rom_nomigrate(MemoryRegion
*mr
,
788 struct Object
*owner
,
794 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
795 * Writes are handled via callbacks.
797 * Note that this function does not do anything to cause the data in the
798 * RAM side of the memory region to be migrated; that is the responsibility
801 * @mr: the #MemoryRegion to be initialized.
802 * @owner: the object that tracks the region's reference count
803 * @ops: callbacks for write access handling (must not be NULL).
804 * @opaque: passed to the read and write callbacks of the @ops structure.
805 * @name: Region name, becomes part of RAMBlock name used in migration stream
806 * must be unique within any device
807 * @size: size of the region.
808 * @errp: pointer to Error*, to store an error if it happens.
810 void memory_region_init_rom_device_nomigrate(MemoryRegion
*mr
,
811 struct Object
*owner
,
812 const MemoryRegionOps
*ops
,
819 * memory_region_init_iommu: Initialize a memory region of a custom type
820 * that translates addresses
822 * An IOMMU region translates addresses and forwards accesses to a target
825 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
826 * @_iommu_mr should be a pointer to enough memory for an instance of
827 * that subclass, @instance_size is the size of that subclass, and
828 * @mrtypename is its name. This function will initialize @_iommu_mr as an
829 * instance of the subclass, and its methods will then be called to handle
830 * accesses to the memory region. See the documentation of
831 * #IOMMUMemoryRegionClass for further details.
833 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
834 * @instance_size: the IOMMUMemoryRegion subclass instance size
835 * @mrtypename: the type name of the #IOMMUMemoryRegion
836 * @owner: the object that tracks the region's reference count
837 * @name: used for debugging; not visible to the user or ABI
838 * @size: size of the region.
840 void memory_region_init_iommu(void *_iommu_mr
,
841 size_t instance_size
,
842 const char *mrtypename
,
848 * memory_region_init_ram - Initialize RAM memory region. Accesses into the
849 * region will modify memory directly.
851 * @mr: the #MemoryRegion to be initialized
852 * @owner: the object that tracks the region's reference count (must be
853 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
854 * @name: name of the memory region
855 * @size: size of the region in bytes
856 * @errp: pointer to Error*, to store an error if it happens.
858 * This function allocates RAM for a board model or device, and
859 * arranges for it to be migrated (by calling vmstate_register_ram()
860 * if @owner is a DeviceState, or vmstate_register_ram_global() if
863 * TODO: Currently we restrict @owner to being either NULL (for
864 * global RAM regions with no owner) or devices, so that we can
865 * give the RAM block a unique name for migration purposes.
866 * We should lift this restriction and allow arbitrary Objects.
867 * If you pass a non-NULL non-device @owner then we will assert.
869 void memory_region_init_ram(MemoryRegion
*mr
,
870 struct Object
*owner
,
876 * memory_region_init_rom: Initialize a ROM memory region.
878 * This has the same effect as calling memory_region_init_ram()
879 * and then marking the resulting region read-only with
880 * memory_region_set_readonly(). This includes arranging for the
881 * contents to be migrated.
883 * TODO: Currently we restrict @owner to being either NULL (for
884 * global RAM regions with no owner) or devices, so that we can
885 * give the RAM block a unique name for migration purposes.
886 * We should lift this restriction and allow arbitrary Objects.
887 * If you pass a non-NULL non-device @owner then we will assert.
889 * @mr: the #MemoryRegion to be initialized.
890 * @owner: the object that tracks the region's reference count
891 * @name: Region name, becomes part of RAMBlock name used in migration stream
892 * must be unique within any device
893 * @size: size of the region.
894 * @errp: pointer to Error*, to store an error if it happens.
896 void memory_region_init_rom(MemoryRegion
*mr
,
897 struct Object
*owner
,
903 * memory_region_init_rom_device: Initialize a ROM memory region.
904 * Writes are handled via callbacks.
906 * This function initializes a memory region backed by RAM for reads
907 * and callbacks for writes, and arranges for the RAM backing to
908 * be migrated (by calling vmstate_register_ram()
909 * if @owner is a DeviceState, or vmstate_register_ram_global() if
912 * TODO: Currently we restrict @owner to being either NULL (for
913 * global RAM regions with no owner) or devices, so that we can
914 * give the RAM block a unique name for migration purposes.
915 * We should lift this restriction and allow arbitrary Objects.
916 * If you pass a non-NULL non-device @owner then we will assert.
918 * @mr: the #MemoryRegion to be initialized.
919 * @owner: the object that tracks the region's reference count
920 * @ops: callbacks for write access handling (must not be NULL).
921 * @name: Region name, becomes part of RAMBlock name used in migration stream
922 * must be unique within any device
923 * @size: size of the region.
924 * @errp: pointer to Error*, to store an error if it happens.
926 void memory_region_init_rom_device(MemoryRegion
*mr
,
927 struct Object
*owner
,
928 const MemoryRegionOps
*ops
,
936 * memory_region_owner: get a memory region's owner.
938 * @mr: the memory region being queried.
940 struct Object
*memory_region_owner(MemoryRegion
*mr
);
943 * memory_region_size: get a memory region's size.
945 * @mr: the memory region being queried.
947 uint64_t memory_region_size(MemoryRegion
*mr
);
950 * memory_region_is_ram: check whether a memory region is random access
952 * Returns %true if a memory region is random access.
954 * @mr: the memory region being queried
956 static inline bool memory_region_is_ram(MemoryRegion
*mr
)
962 * memory_region_is_ram_device: check whether a memory region is a ram device
964 * Returns %true if a memory region is a device backed ram region
966 * @mr: the memory region being queried
968 bool memory_region_is_ram_device(MemoryRegion
*mr
);
971 * memory_region_is_romd: check whether a memory region is in ROMD mode
973 * Returns %true if a memory region is a ROM device and currently set to allow
976 * @mr: the memory region being queried
978 static inline bool memory_region_is_romd(MemoryRegion
*mr
)
980 return mr
->rom_device
&& mr
->romd_mode
;
984 * memory_region_get_iommu: check whether a memory region is an iommu
986 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
989 * @mr: the memory region being queried
991 static inline IOMMUMemoryRegion
*memory_region_get_iommu(MemoryRegion
*mr
)
994 return memory_region_get_iommu(mr
->alias
);
997 return (IOMMUMemoryRegion
*) mr
;
1003 * memory_region_get_iommu_class_nocheck: returns iommu memory region class
1004 * if an iommu or NULL if not
1006 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
1007 * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
1009 * @mr: the memory region being queried
1011 static inline IOMMUMemoryRegionClass
*memory_region_get_iommu_class_nocheck(
1012 IOMMUMemoryRegion
*iommu_mr
)
1014 return (IOMMUMemoryRegionClass
*) (((Object
*)iommu_mr
)->class);
1017 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
1020 * memory_region_iommu_get_min_page_size: get minimum supported page size
1023 * Returns minimum supported page size for an iommu.
1025 * @iommu_mr: the memory region being queried
1027 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion
*iommu_mr
);
1030 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
1032 * The notification type will be decided by entry.perm bits:
1034 * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE.
1035 * - For MAP (newly added entry) notifies: set entry.perm to the
1036 * permission of the page (which is definitely !IOMMU_NONE).
1038 * Note: for any IOMMU implementation, an in-place mapping change
1039 * should be notified with an UNMAP followed by a MAP.
1041 * @iommu_mr: the memory region that was changed
1042 * @iommu_idx: the IOMMU index for the translation table which has changed
1043 * @entry: the new entry in the IOMMU translation table. The entry
1044 * replaces all old entries for the same virtual I/O address range.
1045 * Deleted entries have .@perm == 0.
1047 void memory_region_notify_iommu(IOMMUMemoryRegion
*iommu_mr
,
1049 IOMMUTLBEntry entry
);
1052 * memory_region_notify_one: notify a change in an IOMMU translation
1053 * entry to a single notifier
1055 * This works just like memory_region_notify_iommu(), but it only
1056 * notifies a specific notifier, not all of them.
1058 * @notifier: the notifier to be notified
1059 * @entry: the new entry in the IOMMU translation table. The entry
1060 * replaces all old entries for the same virtual I/O address range.
1061 * Deleted entries have .@perm == 0.
1063 void memory_region_notify_one(IOMMUNotifier
*notifier
,
1064 IOMMUTLBEntry
*entry
);
1067 * memory_region_register_iommu_notifier: register a notifier for changes to
1068 * IOMMU translation entries.
1070 * @mr: the memory region to observe
1071 * @n: the IOMMUNotifier to be added; the notify callback receives a
1072 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
1073 * ceases to be valid on exit from the notifier.
1075 void memory_region_register_iommu_notifier(MemoryRegion
*mr
,
1079 * memory_region_iommu_replay: replay existing IOMMU translations to
1080 * a notifier with the minimum page granularity returned by
1081 * mr->iommu_ops->get_page_size().
1083 * Note: this is not related to record-and-replay functionality.
1085 * @iommu_mr: the memory region to observe
1086 * @n: the notifier to which to replay iommu mappings
1088 void memory_region_iommu_replay(IOMMUMemoryRegion
*iommu_mr
, IOMMUNotifier
*n
);
1091 * memory_region_unregister_iommu_notifier: unregister a notifier for
1092 * changes to IOMMU translation entries.
1094 * @mr: the memory region which was observed and for which notity_stopped()
1095 * needs to be called
1096 * @n: the notifier to be removed.
1098 void memory_region_unregister_iommu_notifier(MemoryRegion
*mr
,
1102 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
1103 * defined on the IOMMU.
1105 * Returns 0 on success, or a negative errno otherwise. In particular,
1106 * -EINVAL indicates that the IOMMU does not support the requested
1109 * @iommu_mr: the memory region
1110 * @attr: the requested attribute
1111 * @data: a pointer to the requested attribute data
1113 int memory_region_iommu_get_attr(IOMMUMemoryRegion
*iommu_mr
,
1114 enum IOMMUMemoryRegionAttr attr
,
1118 * memory_region_iommu_attrs_to_index: return the IOMMU index to
1119 * use for translations with the given memory transaction attributes.
1121 * @iommu_mr: the memory region
1122 * @attrs: the memory transaction attributes
1124 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion
*iommu_mr
,
1128 * memory_region_iommu_num_indexes: return the total number of IOMMU
1129 * indexes that this IOMMU supports.
1131 * @iommu_mr: the memory region
1133 int memory_region_iommu_num_indexes(IOMMUMemoryRegion
*iommu_mr
);
1136 * memory_region_name: get a memory region's name
1138 * Returns the string that was used to initialize the memory region.
1140 * @mr: the memory region being queried
1142 const char *memory_region_name(const MemoryRegion
*mr
);
1145 * memory_region_is_logging: return whether a memory region is logging writes
1147 * Returns %true if the memory region is logging writes for the given client
1149 * @mr: the memory region being queried
1150 * @client: the client being queried
1152 bool memory_region_is_logging(MemoryRegion
*mr
, uint8_t client
);
1155 * memory_region_get_dirty_log_mask: return the clients for which a
1156 * memory region is logging writes.
1158 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
1159 * are the bit indices.
1161 * @mr: the memory region being queried
1163 uint8_t memory_region_get_dirty_log_mask(MemoryRegion
*mr
);
1166 * memory_region_is_rom: check whether a memory region is ROM
1168 * Returns %true if a memory region is read-only memory.
1170 * @mr: the memory region being queried
1172 static inline bool memory_region_is_rom(MemoryRegion
*mr
)
1174 return mr
->ram
&& mr
->readonly
;
1178 * memory_region_is_nonvolatile: check whether a memory region is non-volatile
1180 * Returns %true is a memory region is non-volatile memory.
1182 * @mr: the memory region being queried
1184 static inline bool memory_region_is_nonvolatile(MemoryRegion
*mr
)
1186 return mr
->nonvolatile
;
1190 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
1192 * Returns a file descriptor backing a file-based RAM memory region,
1193 * or -1 if the region is not a file-based RAM memory region.
1195 * @mr: the RAM or alias memory region being queried.
1197 int memory_region_get_fd(MemoryRegion
*mr
);
1200 * memory_region_from_host: Convert a pointer into a RAM memory region
1201 * and an offset within it.
1203 * Given a host pointer inside a RAM memory region (created with
1204 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
1205 * the MemoryRegion and the offset within it.
1207 * Use with care; by the time this function returns, the returned pointer is
1208 * not protected by RCU anymore. If the caller is not within an RCU critical
1209 * section and does not hold the iothread lock, it must have other means of
1210 * protecting the pointer, such as a reference to the region that includes
1211 * the incoming ram_addr_t.
1213 * @ptr: the host pointer to be converted
1214 * @offset: the offset within memory region
1216 MemoryRegion
*memory_region_from_host(void *ptr
, ram_addr_t
*offset
);
1219 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
1221 * Returns a host pointer to a RAM memory region (created with
1222 * memory_region_init_ram() or memory_region_init_ram_ptr()).
1224 * Use with care; by the time this function returns, the returned pointer is
1225 * not protected by RCU anymore. If the caller is not within an RCU critical
1226 * section and does not hold the iothread lock, it must have other means of
1227 * protecting the pointer, such as a reference to the region that includes
1228 * the incoming ram_addr_t.
1230 * @mr: the memory region being queried.
1232 void *memory_region_get_ram_ptr(MemoryRegion
*mr
);
1234 /* memory_region_ram_resize: Resize a RAM region.
1236 * Only legal before guest might have detected the memory size: e.g. on
1237 * incoming migration, or right after reset.
1239 * @mr: a memory region created with @memory_region_init_resizeable_ram.
1240 * @newsize: the new size the region
1241 * @errp: pointer to Error*, to store an error if it happens.
1243 void memory_region_ram_resize(MemoryRegion
*mr
, ram_addr_t newsize
,
1247 * memory_region_set_log: Turn dirty logging on or off for a region.
1249 * Turns dirty logging on or off for a specified client (display, migration).
1250 * Only meaningful for RAM regions.
1252 * @mr: the memory region being updated.
1253 * @log: whether dirty logging is to be enabled or disabled.
1254 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
1256 void memory_region_set_log(MemoryRegion
*mr
, bool log
, unsigned client
);
1259 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
1261 * Marks a range of bytes as dirty, after it has been dirtied outside
1264 * @mr: the memory region being dirtied.
1265 * @addr: the address (relative to the start of the region) being dirtied.
1266 * @size: size of the range being dirtied.
1268 void memory_region_set_dirty(MemoryRegion
*mr
, hwaddr addr
,
1272 * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
1274 * This function is called when the caller wants to clear the remote
1275 * dirty bitmap of a memory range within the memory region. This can
1276 * be used by e.g. KVM to manually clear dirty log when
1277 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
1280 * @mr: the memory region to clear the dirty log upon
1281 * @start: start address offset within the memory region
1282 * @len: length of the memory region to clear dirty bitmap
1284 void memory_region_clear_dirty_bitmap(MemoryRegion
*mr
, hwaddr start
,
1288 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
1289 * bitmap and clear it.
1291 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
1292 * returns the snapshot. The snapshot can then be used to query dirty
1293 * status, using memory_region_snapshot_get_dirty. Snapshotting allows
1294 * querying the same page multiple times, which is especially useful for
1295 * display updates where the scanlines often are not page aligned.
1297 * The dirty bitmap region which gets copyed into the snapshot (and
1298 * cleared afterwards) can be larger than requested. The boundaries
1299 * are rounded up/down so complete bitmap longs (covering 64 pages on
1300 * 64bit hosts) can be copied over into the bitmap snapshot. Which
1301 * isn't a problem for display updates as the extra pages are outside
1302 * the visible area, and in case the visible area changes a full
1303 * display redraw is due anyway. Should other use cases for this
1304 * function emerge we might have to revisit this implementation
1307 * Use g_free to release DirtyBitmapSnapshot.
1309 * @mr: the memory region being queried.
1310 * @addr: the address (relative to the start of the region) being queried.
1311 * @size: the size of the range being queried.
1312 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
1314 DirtyBitmapSnapshot
*memory_region_snapshot_and_clear_dirty(MemoryRegion
*mr
,
1320 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
1321 * in the specified dirty bitmap snapshot.
1323 * @mr: the memory region being queried.
1324 * @snap: the dirty bitmap snapshot
1325 * @addr: the address (relative to the start of the region) being queried.
1326 * @size: the size of the range being queried.
1328 bool memory_region_snapshot_get_dirty(MemoryRegion
*mr
,
1329 DirtyBitmapSnapshot
*snap
,
1330 hwaddr addr
, hwaddr size
);
1333 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
1336 * Marks a range of pages as no longer dirty.
1338 * @mr: the region being updated.
1339 * @addr: the start of the subrange being cleaned.
1340 * @size: the size of the subrange being cleaned.
1341 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1342 * %DIRTY_MEMORY_VGA.
1344 void memory_region_reset_dirty(MemoryRegion
*mr
, hwaddr addr
,
1345 hwaddr size
, unsigned client
);
1348 * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
1349 * TBs (for self-modifying code).
1351 * The MemoryRegionOps->write() callback of a ROM device must use this function
1352 * to mark byte ranges that have been modified internally, such as by directly
1353 * accessing the memory returned by memory_region_get_ram_ptr().
1355 * This function marks the range dirty and invalidates TBs so that TCG can
1356 * detect self-modifying code.
1358 * @mr: the region being flushed.
1359 * @addr: the start, relative to the start of the region, of the range being
1361 * @size: the size, in bytes, of the range being flushed.
1363 void memory_region_flush_rom_device(MemoryRegion
*mr
, hwaddr addr
, hwaddr size
);
1366 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
1368 * Allows a memory region to be marked as read-only (turning it into a ROM).
1369 * only useful on RAM regions.
1371 * @mr: the region being updated.
1372 * @readonly: whether rhe region is to be ROM or RAM.
1374 void memory_region_set_readonly(MemoryRegion
*mr
, bool readonly
);
1377 * memory_region_set_nonvolatile: Turn a memory region non-volatile
1379 * Allows a memory region to be marked as non-volatile.
1380 * only useful on RAM regions.
1382 * @mr: the region being updated.
1383 * @nonvolatile: whether rhe region is to be non-volatile.
1385 void memory_region_set_nonvolatile(MemoryRegion
*mr
, bool nonvolatile
);
1388 * memory_region_rom_device_set_romd: enable/disable ROMD mode
1390 * Allows a ROM device (initialized with memory_region_init_rom_device() to
1391 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
1392 * device is mapped to guest memory and satisfies read access directly.
1393 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
1394 * Writes are always handled by the #MemoryRegion.write function.
1396 * @mr: the memory region to be updated
1397 * @romd_mode: %true to put the region into ROMD mode
1399 void memory_region_rom_device_set_romd(MemoryRegion
*mr
, bool romd_mode
);
1402 * memory_region_set_coalescing: Enable memory coalescing for the region.
1404 * Enabled writes to a region to be queued for later processing. MMIO ->write
1405 * callbacks may be delayed until a non-coalesced MMIO is issued.
1406 * Only useful for IO regions. Roughly similar to write-combining hardware.
1408 * @mr: the memory region to be write coalesced
1410 void memory_region_set_coalescing(MemoryRegion
*mr
);
1413 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
1416 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
1417 * Multiple calls can be issued coalesced disjoint ranges.
1419 * @mr: the memory region to be updated.
1420 * @offset: the start of the range within the region to be coalesced.
1421 * @size: the size of the subrange to be coalesced.
1423 void memory_region_add_coalescing(MemoryRegion
*mr
,
1428 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
1430 * Disables any coalescing caused by memory_region_set_coalescing() or
1431 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
1434 * @mr: the memory region to be updated.
1436 void memory_region_clear_coalescing(MemoryRegion
*mr
);
1439 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
1442 * Ensure that pending coalesced MMIO request are flushed before the memory
1443 * region is accessed. This property is automatically enabled for all regions
1444 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
1446 * @mr: the memory region to be updated.
1448 void memory_region_set_flush_coalesced(MemoryRegion
*mr
);
1451 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
1454 * Clear the automatic coalesced MMIO flushing enabled via
1455 * memory_region_set_flush_coalesced. Note that this service has no effect on
1456 * memory regions that have MMIO coalescing enabled for themselves. For them,
1457 * automatic flushing will stop once coalescing is disabled.
1459 * @mr: the memory region to be updated.
1461 void memory_region_clear_flush_coalesced(MemoryRegion
*mr
);
1464 * memory_region_clear_global_locking: Declares that access processing does
1465 * not depend on the QEMU global lock.
1467 * By clearing this property, accesses to the memory region will be processed
1468 * outside of QEMU's global lock (unless the lock is held on when issuing the
1469 * access request). In this case, the device model implementing the access
1470 * handlers is responsible for synchronization of concurrency.
1472 * @mr: the memory region to be updated.
1474 void memory_region_clear_global_locking(MemoryRegion
*mr
);
1477 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
1478 * is written to a location.
1480 * Marks a word in an IO region (initialized with memory_region_init_io())
1481 * as a trigger for an eventfd event. The I/O callback will not be called.
1482 * The caller must be prepared to handle failure (that is, take the required
1483 * action if the callback _is_ called).
1485 * @mr: the memory region being updated.
1486 * @addr: the address within @mr that is to be monitored
1487 * @size: the size of the access to trigger the eventfd
1488 * @match_data: whether to match against @data, instead of just @addr
1489 * @data: the data to match against the guest write
1490 * @e: event notifier to be triggered when @addr, @size, and @data all match.
1492 void memory_region_add_eventfd(MemoryRegion
*mr
,
1500 * memory_region_del_eventfd: Cancel an eventfd.
1502 * Cancels an eventfd trigger requested by a previous
1503 * memory_region_add_eventfd() call.
1505 * @mr: the memory region being updated.
1506 * @addr: the address within @mr that is to be monitored
1507 * @size: the size of the access to trigger the eventfd
1508 * @match_data: whether to match against @data, instead of just @addr
1509 * @data: the data to match against the guest write
1510 * @e: event notifier to be triggered when @addr, @size, and @data all match.
1512 void memory_region_del_eventfd(MemoryRegion
*mr
,
1520 * memory_region_add_subregion: Add a subregion to a container.
1522 * Adds a subregion at @offset. The subregion may not overlap with other
1523 * subregions (except for those explicitly marked as overlapping). A region
1524 * may only be added once as a subregion (unless removed with
1525 * memory_region_del_subregion()); use memory_region_init_alias() if you
1526 * want a region to be a subregion in multiple locations.
1528 * @mr: the region to contain the new subregion; must be a container
1529 * initialized with memory_region_init().
1530 * @offset: the offset relative to @mr where @subregion is added.
1531 * @subregion: the subregion to be added.
1533 void memory_region_add_subregion(MemoryRegion
*mr
,
1535 MemoryRegion
*subregion
);
1537 * memory_region_add_subregion_overlap: Add a subregion to a container
1540 * Adds a subregion at @offset. The subregion may overlap with other
1541 * subregions. Conflicts are resolved by having a higher @priority hide a
1542 * lower @priority. Subregions without priority are taken as @priority 0.
1543 * A region may only be added once as a subregion (unless removed with
1544 * memory_region_del_subregion()); use memory_region_init_alias() if you
1545 * want a region to be a subregion in multiple locations.
1547 * @mr: the region to contain the new subregion; must be a container
1548 * initialized with memory_region_init().
1549 * @offset: the offset relative to @mr where @subregion is added.
1550 * @subregion: the subregion to be added.
1551 * @priority: used for resolving overlaps; highest priority wins.
1553 void memory_region_add_subregion_overlap(MemoryRegion
*mr
,
1555 MemoryRegion
*subregion
,
1559 * memory_region_get_ram_addr: Get the ram address associated with a memory
1562 ram_addr_t
memory_region_get_ram_addr(MemoryRegion
*mr
);
1564 uint64_t memory_region_get_alignment(const MemoryRegion
*mr
);
1566 * memory_region_del_subregion: Remove a subregion.
1568 * Removes a subregion from its container.
1570 * @mr: the container to be updated.
1571 * @subregion: the region being removed; must be a current subregion of @mr.
1573 void memory_region_del_subregion(MemoryRegion
*mr
,
1574 MemoryRegion
*subregion
);
1577 * memory_region_set_enabled: dynamically enable or disable a region
1579 * Enables or disables a memory region. A disabled memory region
1580 * ignores all accesses to itself and its subregions. It does not
1581 * obscure sibling subregions with lower priority - it simply behaves as
1582 * if it was removed from the hierarchy.
1584 * Regions default to being enabled.
1586 * @mr: the region to be updated
1587 * @enabled: whether to enable or disable the region
1589 void memory_region_set_enabled(MemoryRegion
*mr
, bool enabled
);
1592 * memory_region_set_address: dynamically update the address of a region
1594 * Dynamically updates the address of a region, relative to its container.
1595 * May be used on regions are currently part of a memory hierarchy.
1597 * @mr: the region to be updated
1598 * @addr: new address, relative to container region
1600 void memory_region_set_address(MemoryRegion
*mr
, hwaddr addr
);
1603 * memory_region_set_size: dynamically update the size of a region.
1605 * Dynamically updates the size of a region.
1607 * @mr: the region to be updated
1608 * @size: used size of the region.
1610 void memory_region_set_size(MemoryRegion
*mr
, uint64_t size
);
1613 * memory_region_set_alias_offset: dynamically update a memory alias's offset
1615 * Dynamically updates the offset into the target region that an alias points
1616 * to, as if the fourth argument to memory_region_init_alias() has changed.
1618 * @mr: the #MemoryRegion to be updated; should be an alias.
1619 * @offset: the new offset into the target memory region
1621 void memory_region_set_alias_offset(MemoryRegion
*mr
,
1625 * memory_region_present: checks if an address relative to a @container
1626 * translates into #MemoryRegion within @container
1628 * Answer whether a #MemoryRegion within @container covers the address
1631 * @container: a #MemoryRegion within which @addr is a relative address
1632 * @addr: the area within @container to be searched
1634 bool memory_region_present(MemoryRegion
*container
, hwaddr addr
);
1637 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
1638 * into any address space.
1640 * @mr: a #MemoryRegion which should be checked if it's mapped
1642 bool memory_region_is_mapped(MemoryRegion
*mr
);
1645 * memory_region_find: translate an address/size relative to a
1646 * MemoryRegion into a #MemoryRegionSection.
1648 * Locates the first #MemoryRegion within @mr that overlaps the range
1649 * given by @addr and @size.
1651 * Returns a #MemoryRegionSection that describes a contiguous overlap.
1652 * It will have the following characteristics:
1653 * .@size = 0 iff no overlap was found
1654 * .@mr is non-%NULL iff an overlap was found
1656 * Remember that in the return value the @offset_within_region is
1657 * relative to the returned region (in the .@mr field), not to the
1660 * Similarly, the .@offset_within_address_space is relative to the
1661 * address space that contains both regions, the passed and the
1662 * returned one. However, in the special case where the @mr argument
1663 * has no container (and thus is the root of the address space), the
1664 * following will hold:
1665 * .@offset_within_address_space >= @addr
1666 * .@offset_within_address_space + .@size <= @addr + @size
1668 * @mr: a MemoryRegion within which @addr is a relative address
1669 * @addr: start of the area within @as to be searched
1670 * @size: size of the area to be searched
1672 MemoryRegionSection
memory_region_find(MemoryRegion
*mr
,
1673 hwaddr addr
, uint64_t size
);
1676 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
1678 * Synchronizes the dirty page log for all address spaces.
1680 void memory_global_dirty_log_sync(void);
1683 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
1685 * Synchronizes the vCPUs with a thread that is reading the dirty bitmap.
1686 * This function must be called after the dirty log bitmap is cleared, and
1687 * before dirty guest memory pages are read. If you are using
1688 * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes
1689 * care of doing this.
1691 void memory_global_after_dirty_log_sync(void);
1694 * memory_region_transaction_begin: Start a transaction.
1696 * During a transaction, changes will be accumulated and made visible
1697 * only when the transaction ends (is committed).
1699 void memory_region_transaction_begin(void);
1702 * memory_region_transaction_commit: Commit a transaction and make changes
1703 * visible to the guest.
1705 void memory_region_transaction_commit(void);
1708 * memory_listener_register: register callbacks to be called when memory
1709 * sections are mapped or unmapped into an address
1712 * @listener: an object containing the callbacks to be called
1713 * @filter: if non-%NULL, only regions in this address space will be observed
1715 void memory_listener_register(MemoryListener
*listener
, AddressSpace
*filter
);
1718 * memory_listener_unregister: undo the effect of memory_listener_register()
1720 * @listener: an object containing the callbacks to be removed
1722 void memory_listener_unregister(MemoryListener
*listener
);
1725 * memory_global_dirty_log_start: begin dirty logging for all regions
1727 void memory_global_dirty_log_start(void);
1730 * memory_global_dirty_log_stop: end dirty logging for all regions
1732 void memory_global_dirty_log_stop(void);
1734 void mtree_info(bool flatview
, bool dispatch_tree
, bool owner
);
1737 * memory_region_dispatch_read: perform a read directly to the specified
1740 * @mr: #MemoryRegion to access
1741 * @addr: address within that region
1742 * @pval: pointer to uint64_t which the data is written to
1743 * @op: size, sign, and endianness of the memory operation
1744 * @attrs: memory transaction attributes to use for the access
1746 MemTxResult
memory_region_dispatch_read(MemoryRegion
*mr
,
1752 * memory_region_dispatch_write: perform a write directly to the specified
1755 * @mr: #MemoryRegion to access
1756 * @addr: address within that region
1757 * @data: data to write
1758 * @op: size, sign, and endianness of the memory operation
1759 * @attrs: memory transaction attributes to use for the access
1761 MemTxResult
memory_region_dispatch_write(MemoryRegion
*mr
,
1768 * address_space_init: initializes an address space
1770 * @as: an uninitialized #AddressSpace
1771 * @root: a #MemoryRegion that routes addresses for the address space
1772 * @name: an address space name. The name is only used for debugging
1775 void address_space_init(AddressSpace
*as
, MemoryRegion
*root
, const char *name
);
1778 * address_space_destroy: destroy an address space
1780 * Releases all resources associated with an address space. After an address space
1781 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
1784 * @as: address space to be destroyed
1786 void address_space_destroy(AddressSpace
*as
);
1789 * address_space_remove_listeners: unregister all listeners of an address space
1791 * Removes all callbacks previously registered with memory_listener_register()
1794 * @as: an initialized #AddressSpace
1796 void address_space_remove_listeners(AddressSpace
*as
);
1799 * address_space_rw: read from or write to an address space.
1801 * Return a MemTxResult indicating whether the operation succeeded
1802 * or failed (eg unassigned memory, device rejected the transaction,
1805 * @as: #AddressSpace to be accessed
1806 * @addr: address within that address space
1807 * @attrs: memory transaction attributes
1808 * @buf: buffer with the data transferred
1809 * @len: the number of bytes to read or write
1810 * @is_write: indicates the transfer direction
1812 MemTxResult
address_space_rw(AddressSpace
*as
, hwaddr addr
,
1813 MemTxAttrs attrs
, uint8_t *buf
,
1814 hwaddr len
, bool is_write
);
1817 * address_space_write: write to address space.
1819 * Return a MemTxResult indicating whether the operation succeeded
1820 * or failed (eg unassigned memory, device rejected the transaction,
1823 * @as: #AddressSpace to be accessed
1824 * @addr: address within that address space
1825 * @attrs: memory transaction attributes
1826 * @buf: buffer with the data transferred
1827 * @len: the number of bytes to write
1829 MemTxResult
address_space_write(AddressSpace
*as
, hwaddr addr
,
1831 const uint8_t *buf
, hwaddr len
);
1834 * address_space_write_rom: write to address space, including ROM.
1836 * This function writes to the specified address space, but will
1837 * write data to both ROM and RAM. This is used for non-guest
1838 * writes like writes from the gdb debug stub or initial loading
1841 * Note that portions of the write which attempt to write data to
1842 * a device will be silently ignored -- only real RAM and ROM will
1845 * Return a MemTxResult indicating whether the operation succeeded
1846 * or failed (eg unassigned memory, device rejected the transaction,
1849 * @as: #AddressSpace to be accessed
1850 * @addr: address within that address space
1851 * @attrs: memory transaction attributes
1852 * @buf: buffer with the data transferred
1853 * @len: the number of bytes to write
1855 MemTxResult
address_space_write_rom(AddressSpace
*as
, hwaddr addr
,
1857 const uint8_t *buf
, hwaddr len
);
1859 /* address_space_ld*: load from an address space
1860 * address_space_st*: store to an address space
1862 * These functions perform a load or store of the byte, word,
1863 * longword or quad to the specified address within the AddressSpace.
1864 * The _le suffixed functions treat the data as little endian;
1865 * _be indicates big endian; no suffix indicates "same endianness
1868 * The "guest CPU endianness" accessors are deprecated for use outside
1869 * target-* code; devices should be CPU-agnostic and use either the LE
1870 * or the BE accessors.
1872 * @as #AddressSpace to be accessed
1873 * @addr: address within that address space
1874 * @val: data value, for stores
1875 * @attrs: memory transaction attributes
1876 * @result: location to write the success/failure of the transaction;
1877 * if NULL, this information is discarded
1882 #define ARG1_DECL AddressSpace *as
1883 #include "exec/memory_ldst.inc.h"
1887 #define ARG1_DECL AddressSpace *as
1888 #include "exec/memory_ldst_phys.inc.h"
1890 struct MemoryRegionCache
{
1895 MemoryRegionSection mrs
;
1899 #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL })
1902 /* address_space_ld*_cached: load from a cached #MemoryRegion
1903 * address_space_st*_cached: store into a cached #MemoryRegion
1905 * These functions perform a load or store of the byte, word,
1906 * longword or quad to the specified address. The address is
1907 * a physical address in the AddressSpace, but it must lie within
1908 * a #MemoryRegion that was mapped with address_space_cache_init.
1910 * The _le suffixed functions treat the data as little endian;
1911 * _be indicates big endian; no suffix indicates "same endianness
1914 * The "guest CPU endianness" accessors are deprecated for use outside
1915 * target-* code; devices should be CPU-agnostic and use either the LE
1916 * or the BE accessors.
1918 * @cache: previously initialized #MemoryRegionCache to be accessed
1919 * @addr: address within the address space
1920 * @val: data value, for stores
1921 * @attrs: memory transaction attributes
1922 * @result: location to write the success/failure of the transaction;
1923 * if NULL, this information is discarded
1926 #define SUFFIX _cached_slow
1928 #define ARG1_DECL MemoryRegionCache *cache
1929 #include "exec/memory_ldst.inc.h"
1931 /* Inline fast path for direct RAM access. */
1932 static inline uint8_t address_space_ldub_cached(MemoryRegionCache
*cache
,
1933 hwaddr addr
, MemTxAttrs attrs
, MemTxResult
*result
)
1935 assert(addr
< cache
->len
);
1936 if (likely(cache
->ptr
)) {
1937 return ldub_p(cache
->ptr
+ addr
);
1939 return address_space_ldub_cached_slow(cache
, addr
, attrs
, result
);
1943 static inline void address_space_stb_cached(MemoryRegionCache
*cache
,
1944 hwaddr addr
, uint32_t val
, MemTxAttrs attrs
, MemTxResult
*result
)
1946 assert(addr
< cache
->len
);
1947 if (likely(cache
->ptr
)) {
1948 stb_p(cache
->ptr
+ addr
, val
);
1950 address_space_stb_cached_slow(cache
, addr
, val
, attrs
, result
);
1954 #define ENDIANNESS _le
1955 #include "exec/memory_ldst_cached.inc.h"
1957 #define ENDIANNESS _be
1958 #include "exec/memory_ldst_cached.inc.h"
1960 #define SUFFIX _cached
1962 #define ARG1_DECL MemoryRegionCache *cache
1963 #include "exec/memory_ldst_phys.inc.h"
1965 /* address_space_cache_init: prepare for repeated access to a physical
1968 * @cache: #MemoryRegionCache to be filled
1969 * @as: #AddressSpace to be accessed
1970 * @addr: address within that address space
1971 * @len: length of buffer
1972 * @is_write: indicates the transfer direction
1974 * Will only work with RAM, and may map a subset of the requested range by
1975 * returning a value that is less than @len. On failure, return a negative
1978 * Because it only works with RAM, this function can be used for
1979 * read-modify-write operations. In this case, is_write should be %true.
1981 * Note that addresses passed to the address_space_*_cached functions
1982 * are relative to @addr.
1984 int64_t address_space_cache_init(MemoryRegionCache
*cache
,
1991 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
1993 * @cache: The #MemoryRegionCache to operate on.
1994 * @addr: The first physical address that was written, relative to the
1995 * address that was passed to @address_space_cache_init.
1996 * @access_len: The number of bytes that were written starting at @addr.
1998 void address_space_cache_invalidate(MemoryRegionCache
*cache
,
2003 * address_space_cache_destroy: free a #MemoryRegionCache
2005 * @cache: The #MemoryRegionCache whose memory should be released.
2007 void address_space_cache_destroy(MemoryRegionCache
*cache
);
2009 /* address_space_get_iotlb_entry: translate an address into an IOTLB
2010 * entry. Should be called from an RCU critical section.
2012 IOMMUTLBEntry
address_space_get_iotlb_entry(AddressSpace
*as
, hwaddr addr
,
2013 bool is_write
, MemTxAttrs attrs
);
2015 /* address_space_translate: translate an address range into an address space
2016 * into a MemoryRegion and an address range into that section. Should be
2017 * called from an RCU critical section, to avoid that the last reference
2018 * to the returned region disappears after address_space_translate returns.
2020 * @fv: #FlatView to be accessed
2021 * @addr: address within that address space
2022 * @xlat: pointer to address within the returned memory region section's
2024 * @len: pointer to length
2025 * @is_write: indicates the transfer direction
2026 * @attrs: memory attributes
2028 MemoryRegion
*flatview_translate(FlatView
*fv
,
2029 hwaddr addr
, hwaddr
*xlat
,
2030 hwaddr
*len
, bool is_write
,
2033 static inline MemoryRegion
*address_space_translate(AddressSpace
*as
,
2034 hwaddr addr
, hwaddr
*xlat
,
2035 hwaddr
*len
, bool is_write
,
2038 return flatview_translate(address_space_to_flatview(as
),
2039 addr
, xlat
, len
, is_write
, attrs
);
2042 /* address_space_access_valid: check for validity of accessing an address
2045 * Check whether memory is assigned to the given address space range, and
2046 * access is permitted by any IOMMU regions that are active for the address
2049 * For now, addr and len should be aligned to a page size. This limitation
2050 * will be lifted in the future.
2052 * @as: #AddressSpace to be accessed
2053 * @addr: address within that address space
2054 * @len: length of the area to be checked
2055 * @is_write: indicates the transfer direction
2056 * @attrs: memory attributes
2058 bool address_space_access_valid(AddressSpace
*as
, hwaddr addr
, hwaddr len
,
2059 bool is_write
, MemTxAttrs attrs
);
2061 /* address_space_map: map a physical memory region into a host virtual address
2063 * May map a subset of the requested range, given by and returned in @plen.
2064 * May return %NULL if resources needed to perform the mapping are exhausted.
2065 * Use only for reads OR writes - not for read-modify-write operations.
2066 * Use cpu_register_map_client() to know when retrying the map operation is
2067 * likely to succeed.
2069 * @as: #AddressSpace to be accessed
2070 * @addr: address within that address space
2071 * @plen: pointer to length of buffer; updated on return
2072 * @is_write: indicates the transfer direction
2073 * @attrs: memory attributes
2075 void *address_space_map(AddressSpace
*as
, hwaddr addr
,
2076 hwaddr
*plen
, bool is_write
, MemTxAttrs attrs
);
2078 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
2080 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
2081 * the amount of memory that was actually read or written by the caller.
2083 * @as: #AddressSpace used
2084 * @buffer: host pointer as returned by address_space_map()
2085 * @len: buffer length as returned by address_space_map()
2086 * @access_len: amount of data actually transferred
2087 * @is_write: indicates the transfer direction
2089 void address_space_unmap(AddressSpace
*as
, void *buffer
, hwaddr len
,
2090 int is_write
, hwaddr access_len
);
2093 /* Internal functions, part of the implementation of address_space_read. */
2094 MemTxResult
address_space_read_full(AddressSpace
*as
, hwaddr addr
,
2095 MemTxAttrs attrs
, uint8_t *buf
, hwaddr len
);
2096 MemTxResult
flatview_read_continue(FlatView
*fv
, hwaddr addr
,
2097 MemTxAttrs attrs
, uint8_t *buf
,
2098 hwaddr len
, hwaddr addr1
, hwaddr l
,
2100 void *qemu_map_ram_ptr(RAMBlock
*ram_block
, ram_addr_t addr
);
2102 /* Internal functions, part of the implementation of address_space_read_cached
2103 * and address_space_write_cached. */
2104 void address_space_read_cached_slow(MemoryRegionCache
*cache
,
2105 hwaddr addr
, void *buf
, hwaddr len
);
2106 void address_space_write_cached_slow(MemoryRegionCache
*cache
,
2107 hwaddr addr
, const void *buf
, hwaddr len
);
2109 static inline bool memory_access_is_direct(MemoryRegion
*mr
, bool is_write
)
2112 return memory_region_is_ram(mr
) &&
2113 !mr
->readonly
&& !memory_region_is_ram_device(mr
);
2115 return (memory_region_is_ram(mr
) && !memory_region_is_ram_device(mr
)) ||
2116 memory_region_is_romd(mr
);
2121 * address_space_read: read from an address space.
2123 * Return a MemTxResult indicating whether the operation succeeded
2124 * or failed (eg unassigned memory, device rejected the transaction,
2125 * IOMMU fault). Called within RCU critical section.
2127 * @as: #AddressSpace to be accessed
2128 * @addr: address within that address space
2129 * @attrs: memory transaction attributes
2130 * @buf: buffer with the data transferred
2132 static inline __attribute__((__always_inline__
))
2133 MemTxResult
address_space_read(AddressSpace
*as
, hwaddr addr
,
2134 MemTxAttrs attrs
, uint8_t *buf
,
2137 MemTxResult result
= MEMTX_OK
;
2143 if (__builtin_constant_p(len
)) {
2146 fv
= address_space_to_flatview(as
);
2148 mr
= flatview_translate(fv
, addr
, &addr1
, &l
, false, attrs
);
2149 if (len
== l
&& memory_access_is_direct(mr
, false)) {
2150 ptr
= qemu_map_ram_ptr(mr
->ram_block
, addr1
);
2151 memcpy(buf
, ptr
, len
);
2153 result
= flatview_read_continue(fv
, addr
, attrs
, buf
, len
,
2159 result
= address_space_read_full(as
, addr
, attrs
, buf
, len
);
2165 * address_space_read_cached: read from a cached RAM region
2167 * @cache: Cached region to be addressed
2168 * @addr: address relative to the base of the RAM region
2169 * @buf: buffer with the data transferred
2170 * @len: length of the data transferred
2173 address_space_read_cached(MemoryRegionCache
*cache
, hwaddr addr
,
2174 void *buf
, hwaddr len
)
2176 assert(addr
< cache
->len
&& len
<= cache
->len
- addr
);
2177 if (likely(cache
->ptr
)) {
2178 memcpy(buf
, cache
->ptr
+ addr
, len
);
2180 address_space_read_cached_slow(cache
, addr
, buf
, len
);
2185 * address_space_write_cached: write to a cached RAM region
2187 * @cache: Cached region to be addressed
2188 * @addr: address relative to the base of the RAM region
2189 * @buf: buffer with the data transferred
2190 * @len: length of the data transferred
2193 address_space_write_cached(MemoryRegionCache
*cache
, hwaddr addr
,
2194 void *buf
, hwaddr len
)
2196 assert(addr
< cache
->len
&& len
<= cache
->len
- addr
);
2197 if (likely(cache
->ptr
)) {
2198 memcpy(cache
->ptr
+ addr
, buf
, len
);
2200 address_space_write_cached_slow(cache
, addr
, buf
, len
);
2205 /* enum device_endian to MemOp. */
2206 static inline MemOp
devend_memop(enum device_endian end
)
2208 QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN
!= DEVICE_LITTLE_ENDIAN
&&
2209 DEVICE_HOST_ENDIAN
!= DEVICE_BIG_ENDIAN
);
2211 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
2212 /* Swap if non-host endianness or native (target) endianness */
2213 return (end
== DEVICE_HOST_ENDIAN
) ? 0 : MO_BSWAP
;
2215 const int non_host_endianness
=
2216 DEVICE_LITTLE_ENDIAN
^ DEVICE_BIG_ENDIAN
^ DEVICE_HOST_ENDIAN
;
2218 /* In this case, native (target) endianness needs no swap. */
2219 return (end
== non_host_endianness
) ? MO_BSWAP
: 0;