]> git.proxmox.com Git - mirror_qemu.git/blob - include/exec/memory.h
Merge remote-tracking branch 'remotes/lersek/tags/edk2-pull-2019-04-22' into staging
[mirror_qemu.git] / include / exec / memory.h
1 /*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef MEMORY_H
15 #define MEMORY_H
16
17 #ifndef CONFIG_USER_ONLY
18
19 #include "exec/cpu-common.h"
20 #include "exec/hwaddr.h"
21 #include "exec/memattrs.h"
22 #include "exec/ramlist.h"
23 #include "qemu/queue.h"
24 #include "qemu/int128.h"
25 #include "qemu/notify.h"
26 #include "qom/object.h"
27 #include "qemu/rcu.h"
28 #include "hw/qdev-core.h"
29
30 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
31
32 #define MAX_PHYS_ADDR_SPACE_BITS 62
33 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
34
35 #define TYPE_MEMORY_REGION "qemu:memory-region"
36 #define MEMORY_REGION(obj) \
37 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
38
39 #define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region"
40 #define IOMMU_MEMORY_REGION(obj) \
41 OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_IOMMU_MEMORY_REGION)
42 #define IOMMU_MEMORY_REGION_CLASS(klass) \
43 OBJECT_CLASS_CHECK(IOMMUMemoryRegionClass, (klass), \
44 TYPE_IOMMU_MEMORY_REGION)
45 #define IOMMU_MEMORY_REGION_GET_CLASS(obj) \
46 OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \
47 TYPE_IOMMU_MEMORY_REGION)
48
49 typedef struct MemoryRegionOps MemoryRegionOps;
50 typedef struct MemoryRegionMmio MemoryRegionMmio;
51
52 struct MemoryRegionMmio {
53 CPUReadMemoryFunc *read[3];
54 CPUWriteMemoryFunc *write[3];
55 };
56
57 typedef struct IOMMUTLBEntry IOMMUTLBEntry;
58
59 /* See address_space_translate: bit 0 is read, bit 1 is write. */
60 typedef enum {
61 IOMMU_NONE = 0,
62 IOMMU_RO = 1,
63 IOMMU_WO = 2,
64 IOMMU_RW = 3,
65 } IOMMUAccessFlags;
66
67 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
68
69 struct IOMMUTLBEntry {
70 AddressSpace *target_as;
71 hwaddr iova;
72 hwaddr translated_addr;
73 hwaddr addr_mask; /* 0xfff = 4k translation */
74 IOMMUAccessFlags perm;
75 };
76
77 /*
78 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
79 * register with one or multiple IOMMU Notifier capability bit(s).
80 */
81 typedef enum {
82 IOMMU_NOTIFIER_NONE = 0,
83 /* Notify cache invalidations */
84 IOMMU_NOTIFIER_UNMAP = 0x1,
85 /* Notify entry changes (newly created entries) */
86 IOMMU_NOTIFIER_MAP = 0x2,
87 } IOMMUNotifierFlag;
88
89 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
90
91 struct IOMMUNotifier;
92 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
93 IOMMUTLBEntry *data);
94
95 struct IOMMUNotifier {
96 IOMMUNotify notify;
97 IOMMUNotifierFlag notifier_flags;
98 /* Notify for address space range start <= addr <= end */
99 hwaddr start;
100 hwaddr end;
101 int iommu_idx;
102 QLIST_ENTRY(IOMMUNotifier) node;
103 };
104 typedef struct IOMMUNotifier IOMMUNotifier;
105
106 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
107 #define RAM_PREALLOC (1 << 0)
108
109 /* RAM is mmap-ed with MAP_SHARED */
110 #define RAM_SHARED (1 << 1)
111
112 /* Only a portion of RAM (used_length) is actually used, and migrated.
113 * This used_length size can change across reboots.
114 */
115 #define RAM_RESIZEABLE (1 << 2)
116
117 /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
118 * zero the page and wake waiting processes.
119 * (Set during postcopy)
120 */
121 #define RAM_UF_ZEROPAGE (1 << 3)
122
123 /* RAM can be migrated */
124 #define RAM_MIGRATABLE (1 << 4)
125
126 /* RAM is a persistent kind memory */
127 #define RAM_PMEM (1 << 5)
128
129 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
130 IOMMUNotifierFlag flags,
131 hwaddr start, hwaddr end,
132 int iommu_idx)
133 {
134 n->notify = fn;
135 n->notifier_flags = flags;
136 n->start = start;
137 n->end = end;
138 n->iommu_idx = iommu_idx;
139 }
140
141 /*
142 * Memory region callbacks
143 */
144 struct MemoryRegionOps {
145 /* Read from the memory region. @addr is relative to @mr; @size is
146 * in bytes. */
147 uint64_t (*read)(void *opaque,
148 hwaddr addr,
149 unsigned size);
150 /* Write to the memory region. @addr is relative to @mr; @size is
151 * in bytes. */
152 void (*write)(void *opaque,
153 hwaddr addr,
154 uint64_t data,
155 unsigned size);
156
157 MemTxResult (*read_with_attrs)(void *opaque,
158 hwaddr addr,
159 uint64_t *data,
160 unsigned size,
161 MemTxAttrs attrs);
162 MemTxResult (*write_with_attrs)(void *opaque,
163 hwaddr addr,
164 uint64_t data,
165 unsigned size,
166 MemTxAttrs attrs);
167
168 enum device_endian endianness;
169 /* Guest-visible constraints: */
170 struct {
171 /* If nonzero, specify bounds on access sizes beyond which a machine
172 * check is thrown.
173 */
174 unsigned min_access_size;
175 unsigned max_access_size;
176 /* If true, unaligned accesses are supported. Otherwise unaligned
177 * accesses throw machine checks.
178 */
179 bool unaligned;
180 /*
181 * If present, and returns #false, the transaction is not accepted
182 * by the device (and results in machine dependent behaviour such
183 * as a machine check exception).
184 */
185 bool (*accepts)(void *opaque, hwaddr addr,
186 unsigned size, bool is_write,
187 MemTxAttrs attrs);
188 } valid;
189 /* Internal implementation constraints: */
190 struct {
191 /* If nonzero, specifies the minimum size implemented. Smaller sizes
192 * will be rounded upwards and a partial result will be returned.
193 */
194 unsigned min_access_size;
195 /* If nonzero, specifies the maximum size implemented. Larger sizes
196 * will be done as a series of accesses with smaller sizes.
197 */
198 unsigned max_access_size;
199 /* If true, unaligned accesses are supported. Otherwise all accesses
200 * are converted to (possibly multiple) naturally aligned accesses.
201 */
202 bool unaligned;
203 } impl;
204 };
205
206 enum IOMMUMemoryRegionAttr {
207 IOMMU_ATTR_SPAPR_TCE_FD
208 };
209
210 /**
211 * IOMMUMemoryRegionClass:
212 *
213 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
214 * and provide an implementation of at least the @translate method here
215 * to handle requests to the memory region. Other methods are optional.
216 *
217 * The IOMMU implementation must use the IOMMU notifier infrastructure
218 * to report whenever mappings are changed, by calling
219 * memory_region_notify_iommu() (or, if necessary, by calling
220 * memory_region_notify_one() for each registered notifier).
221 *
222 * Conceptually an IOMMU provides a mapping from input address
223 * to an output TLB entry. If the IOMMU is aware of memory transaction
224 * attributes and the output TLB entry depends on the transaction
225 * attributes, we represent this using IOMMU indexes. Each index
226 * selects a particular translation table that the IOMMU has:
227 * @attrs_to_index returns the IOMMU index for a set of transaction attributes
228 * @translate takes an input address and an IOMMU index
229 * and the mapping returned can only depend on the input address and the
230 * IOMMU index.
231 *
232 * Most IOMMUs don't care about the transaction attributes and support
233 * only a single IOMMU index. A more complex IOMMU might have one index
234 * for secure transactions and one for non-secure transactions.
235 */
236 typedef struct IOMMUMemoryRegionClass {
237 /* private */
238 struct DeviceClass parent_class;
239
240 /*
241 * Return a TLB entry that contains a given address.
242 *
243 * The IOMMUAccessFlags indicated via @flag are optional and may
244 * be specified as IOMMU_NONE to indicate that the caller needs
245 * the full translation information for both reads and writes. If
246 * the access flags are specified then the IOMMU implementation
247 * may use this as an optimization, to stop doing a page table
248 * walk as soon as it knows that the requested permissions are not
249 * allowed. If IOMMU_NONE is passed then the IOMMU must do the
250 * full page table walk and report the permissions in the returned
251 * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
252 * return different mappings for reads and writes.)
253 *
254 * The returned information remains valid while the caller is
255 * holding the big QEMU lock or is inside an RCU critical section;
256 * if the caller wishes to cache the mapping beyond that it must
257 * register an IOMMU notifier so it can invalidate its cached
258 * information when the IOMMU mapping changes.
259 *
260 * @iommu: the IOMMUMemoryRegion
261 * @hwaddr: address to be translated within the memory region
262 * @flag: requested access permissions
263 * @iommu_idx: IOMMU index for the translation
264 */
265 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
266 IOMMUAccessFlags flag, int iommu_idx);
267 /* Returns minimum supported page size in bytes.
268 * If this method is not provided then the minimum is assumed to
269 * be TARGET_PAGE_SIZE.
270 *
271 * @iommu: the IOMMUMemoryRegion
272 */
273 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
274 /* Called when IOMMU Notifier flag changes (ie when the set of
275 * events which IOMMU users are requesting notification for changes).
276 * Optional method -- need not be provided if the IOMMU does not
277 * need to know exactly which events must be notified.
278 *
279 * @iommu: the IOMMUMemoryRegion
280 * @old_flags: events which previously needed to be notified
281 * @new_flags: events which now need to be notified
282 */
283 void (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
284 IOMMUNotifierFlag old_flags,
285 IOMMUNotifierFlag new_flags);
286 /* Called to handle memory_region_iommu_replay().
287 *
288 * The default implementation of memory_region_iommu_replay() is to
289 * call the IOMMU translate method for every page in the address space
290 * with flag == IOMMU_NONE and then call the notifier if translate
291 * returns a valid mapping. If this method is implemented then it
292 * overrides the default behaviour, and must provide the full semantics
293 * of memory_region_iommu_replay(), by calling @notifier for every
294 * translation present in the IOMMU.
295 *
296 * Optional method -- an IOMMU only needs to provide this method
297 * if the default is inefficient or produces undesirable side effects.
298 *
299 * Note: this is not related to record-and-replay functionality.
300 */
301 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
302
303 /* Get IOMMU misc attributes. This is an optional method that
304 * can be used to allow users of the IOMMU to get implementation-specific
305 * information. The IOMMU implements this method to handle calls
306 * by IOMMU users to memory_region_iommu_get_attr() by filling in
307 * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
308 * the IOMMU supports. If the method is unimplemented then
309 * memory_region_iommu_get_attr() will always return -EINVAL.
310 *
311 * @iommu: the IOMMUMemoryRegion
312 * @attr: attribute being queried
313 * @data: memory to fill in with the attribute data
314 *
315 * Returns 0 on success, or a negative errno; in particular
316 * returns -EINVAL for unrecognized or unimplemented attribute types.
317 */
318 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
319 void *data);
320
321 /* Return the IOMMU index to use for a given set of transaction attributes.
322 *
323 * Optional method: if an IOMMU only supports a single IOMMU index then
324 * the default implementation of memory_region_iommu_attrs_to_index()
325 * will return 0.
326 *
327 * The indexes supported by an IOMMU must be contiguous, starting at 0.
328 *
329 * @iommu: the IOMMUMemoryRegion
330 * @attrs: memory transaction attributes
331 */
332 int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
333
334 /* Return the number of IOMMU indexes this IOMMU supports.
335 *
336 * Optional method: if this method is not provided, then
337 * memory_region_iommu_num_indexes() will return 1, indicating that
338 * only a single IOMMU index is supported.
339 *
340 * @iommu: the IOMMUMemoryRegion
341 */
342 int (*num_indexes)(IOMMUMemoryRegion *iommu);
343 } IOMMUMemoryRegionClass;
344
345 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
346 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
347
348 struct MemoryRegion {
349 Object parent_obj;
350
351 /* All fields are private - violators will be prosecuted */
352
353 /* The following fields should fit in a cache line */
354 bool romd_mode;
355 bool ram;
356 bool subpage;
357 bool readonly; /* For RAM regions */
358 bool nonvolatile;
359 bool rom_device;
360 bool flush_coalesced_mmio;
361 bool global_locking;
362 uint8_t dirty_log_mask;
363 bool is_iommu;
364 RAMBlock *ram_block;
365 Object *owner;
366
367 const MemoryRegionOps *ops;
368 void *opaque;
369 MemoryRegion *container;
370 Int128 size;
371 hwaddr addr;
372 void (*destructor)(MemoryRegion *mr);
373 uint64_t align;
374 bool terminates;
375 bool ram_device;
376 bool enabled;
377 bool warning_printed; /* For reservations */
378 uint8_t vga_logging_count;
379 MemoryRegion *alias;
380 hwaddr alias_offset;
381 int32_t priority;
382 QTAILQ_HEAD(, MemoryRegion) subregions;
383 QTAILQ_ENTRY(MemoryRegion) subregions_link;
384 QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
385 const char *name;
386 unsigned ioeventfd_nb;
387 MemoryRegionIoeventfd *ioeventfds;
388 };
389
390 struct IOMMUMemoryRegion {
391 MemoryRegion parent_obj;
392
393 QLIST_HEAD(, IOMMUNotifier) iommu_notify;
394 IOMMUNotifierFlag iommu_notify_flags;
395 };
396
397 #define IOMMU_NOTIFIER_FOREACH(n, mr) \
398 QLIST_FOREACH((n), &(mr)->iommu_notify, node)
399
400 /**
401 * MemoryListener: callbacks structure for updates to the physical memory map
402 *
403 * Allows a component to adjust to changes in the guest-visible memory map.
404 * Use with memory_listener_register() and memory_listener_unregister().
405 */
406 struct MemoryListener {
407 void (*begin)(MemoryListener *listener);
408 void (*commit)(MemoryListener *listener);
409 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
410 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
411 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
412 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
413 int old, int new);
414 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
415 int old, int new);
416 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
417 void (*log_global_start)(MemoryListener *listener);
418 void (*log_global_stop)(MemoryListener *listener);
419 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
420 bool match_data, uint64_t data, EventNotifier *e);
421 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
422 bool match_data, uint64_t data, EventNotifier *e);
423 void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
424 hwaddr addr, hwaddr len);
425 void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
426 hwaddr addr, hwaddr len);
427 /* Lower = earlier (during add), later (during del) */
428 unsigned priority;
429 AddressSpace *address_space;
430 QTAILQ_ENTRY(MemoryListener) link;
431 QTAILQ_ENTRY(MemoryListener) link_as;
432 };
433
434 /**
435 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
436 */
437 struct AddressSpace {
438 /* All fields are private. */
439 struct rcu_head rcu;
440 char *name;
441 MemoryRegion *root;
442
443 /* Accessed via RCU. */
444 struct FlatView *current_map;
445
446 int ioeventfd_nb;
447 struct MemoryRegionIoeventfd *ioeventfds;
448 QTAILQ_HEAD(, MemoryListener) listeners;
449 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
450 };
451
452 typedef struct AddressSpaceDispatch AddressSpaceDispatch;
453 typedef struct FlatRange FlatRange;
454
455 /* Flattened global view of current active memory hierarchy. Kept in sorted
456 * order.
457 */
458 struct FlatView {
459 struct rcu_head rcu;
460 unsigned ref;
461 FlatRange *ranges;
462 unsigned nr;
463 unsigned nr_allocated;
464 struct AddressSpaceDispatch *dispatch;
465 MemoryRegion *root;
466 };
467
468 static inline FlatView *address_space_to_flatview(AddressSpace *as)
469 {
470 return atomic_rcu_read(&as->current_map);
471 }
472
473
474 /**
475 * MemoryRegionSection: describes a fragment of a #MemoryRegion
476 *
477 * @mr: the region, or %NULL if empty
478 * @fv: the flat view of the address space the region is mapped in
479 * @offset_within_region: the beginning of the section, relative to @mr's start
480 * @size: the size of the section; will not exceed @mr's boundaries
481 * @offset_within_address_space: the address of the first byte of the section
482 * relative to the region's address space
483 * @readonly: writes to this section are ignored
484 * @nonvolatile: this section is non-volatile
485 */
486 struct MemoryRegionSection {
487 MemoryRegion *mr;
488 FlatView *fv;
489 hwaddr offset_within_region;
490 Int128 size;
491 hwaddr offset_within_address_space;
492 bool readonly;
493 bool nonvolatile;
494 };
495
496 /**
497 * memory_region_init: Initialize a memory region
498 *
499 * The region typically acts as a container for other memory regions. Use
500 * memory_region_add_subregion() to add subregions.
501 *
502 * @mr: the #MemoryRegion to be initialized
503 * @owner: the object that tracks the region's reference count
504 * @name: used for debugging; not visible to the user or ABI
505 * @size: size of the region; any subregions beyond this size will be clipped
506 */
507 void memory_region_init(MemoryRegion *mr,
508 struct Object *owner,
509 const char *name,
510 uint64_t size);
511
512 /**
513 * memory_region_ref: Add 1 to a memory region's reference count
514 *
515 * Whenever memory regions are accessed outside the BQL, they need to be
516 * preserved against hot-unplug. MemoryRegions actually do not have their
517 * own reference count; they piggyback on a QOM object, their "owner".
518 * This function adds a reference to the owner.
519 *
520 * All MemoryRegions must have an owner if they can disappear, even if the
521 * device they belong to operates exclusively under the BQL. This is because
522 * the region could be returned at any time by memory_region_find, and this
523 * is usually under guest control.
524 *
525 * @mr: the #MemoryRegion
526 */
527 void memory_region_ref(MemoryRegion *mr);
528
529 /**
530 * memory_region_unref: Remove 1 to a memory region's reference count
531 *
532 * Whenever memory regions are accessed outside the BQL, they need to be
533 * preserved against hot-unplug. MemoryRegions actually do not have their
534 * own reference count; they piggyback on a QOM object, their "owner".
535 * This function removes a reference to the owner and possibly destroys it.
536 *
537 * @mr: the #MemoryRegion
538 */
539 void memory_region_unref(MemoryRegion *mr);
540
541 /**
542 * memory_region_init_io: Initialize an I/O memory region.
543 *
544 * Accesses into the region will cause the callbacks in @ops to be called.
545 * if @size is nonzero, subregions will be clipped to @size.
546 *
547 * @mr: the #MemoryRegion to be initialized.
548 * @owner: the object that tracks the region's reference count
549 * @ops: a structure containing read and write callbacks to be used when
550 * I/O is performed on the region.
551 * @opaque: passed to the read and write callbacks of the @ops structure.
552 * @name: used for debugging; not visible to the user or ABI
553 * @size: size of the region.
554 */
555 void memory_region_init_io(MemoryRegion *mr,
556 struct Object *owner,
557 const MemoryRegionOps *ops,
558 void *opaque,
559 const char *name,
560 uint64_t size);
561
562 /**
563 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
564 * into the region will modify memory
565 * directly.
566 *
567 * @mr: the #MemoryRegion to be initialized.
568 * @owner: the object that tracks the region's reference count
569 * @name: Region name, becomes part of RAMBlock name used in migration stream
570 * must be unique within any device
571 * @size: size of the region.
572 * @errp: pointer to Error*, to store an error if it happens.
573 *
574 * Note that this function does not do anything to cause the data in the
575 * RAM memory region to be migrated; that is the responsibility of the caller.
576 */
577 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
578 struct Object *owner,
579 const char *name,
580 uint64_t size,
581 Error **errp);
582
583 /**
584 * memory_region_init_ram_shared_nomigrate: Initialize RAM memory region.
585 * Accesses into the region will
586 * modify memory directly.
587 *
588 * @mr: the #MemoryRegion to be initialized.
589 * @owner: the object that tracks the region's reference count
590 * @name: Region name, becomes part of RAMBlock name used in migration stream
591 * must be unique within any device
592 * @size: size of the region.
593 * @share: allow remapping RAM to different addresses
594 * @errp: pointer to Error*, to store an error if it happens.
595 *
596 * Note that this function is similar to memory_region_init_ram_nomigrate.
597 * The only difference is part of the RAM region can be remapped.
598 */
599 void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
600 struct Object *owner,
601 const char *name,
602 uint64_t size,
603 bool share,
604 Error **errp);
605
606 /**
607 * memory_region_init_resizeable_ram: Initialize memory region with resizeable
608 * RAM. Accesses into the region will
609 * modify memory directly. Only an initial
610 * portion of this RAM is actually used.
611 * The used size can change across reboots.
612 *
613 * @mr: the #MemoryRegion to be initialized.
614 * @owner: the object that tracks the region's reference count
615 * @name: Region name, becomes part of RAMBlock name used in migration stream
616 * must be unique within any device
617 * @size: used size of the region.
618 * @max_size: max size of the region.
619 * @resized: callback to notify owner about used size change.
620 * @errp: pointer to Error*, to store an error if it happens.
621 *
622 * Note that this function does not do anything to cause the data in the
623 * RAM memory region to be migrated; that is the responsibility of the caller.
624 */
625 void memory_region_init_resizeable_ram(MemoryRegion *mr,
626 struct Object *owner,
627 const char *name,
628 uint64_t size,
629 uint64_t max_size,
630 void (*resized)(const char*,
631 uint64_t length,
632 void *host),
633 Error **errp);
634 #ifdef CONFIG_POSIX
635
636 /**
637 * memory_region_init_ram_from_file: Initialize RAM memory region with a
638 * mmap-ed backend.
639 *
640 * @mr: the #MemoryRegion to be initialized.
641 * @owner: the object that tracks the region's reference count
642 * @name: Region name, becomes part of RAMBlock name used in migration stream
643 * must be unique within any device
644 * @size: size of the region.
645 * @align: alignment of the region base address; if 0, the default alignment
646 * (getpagesize()) will be used.
647 * @ram_flags: Memory region features:
648 * - RAM_SHARED: memory must be mmaped with the MAP_SHARED flag
649 * - RAM_PMEM: the memory is persistent memory
650 * Other bits are ignored now.
651 * @path: the path in which to allocate the RAM.
652 * @errp: pointer to Error*, to store an error if it happens.
653 *
654 * Note that this function does not do anything to cause the data in the
655 * RAM memory region to be migrated; that is the responsibility of the caller.
656 */
657 void memory_region_init_ram_from_file(MemoryRegion *mr,
658 struct Object *owner,
659 const char *name,
660 uint64_t size,
661 uint64_t align,
662 uint32_t ram_flags,
663 const char *path,
664 Error **errp);
665
666 /**
667 * memory_region_init_ram_from_fd: Initialize RAM memory region with a
668 * mmap-ed backend.
669 *
670 * @mr: the #MemoryRegion to be initialized.
671 * @owner: the object that tracks the region's reference count
672 * @name: the name of the region.
673 * @size: size of the region.
674 * @share: %true if memory must be mmaped with the MAP_SHARED flag
675 * @fd: the fd to mmap.
676 * @errp: pointer to Error*, to store an error if it happens.
677 *
678 * Note that this function does not do anything to cause the data in the
679 * RAM memory region to be migrated; that is the responsibility of the caller.
680 */
681 void memory_region_init_ram_from_fd(MemoryRegion *mr,
682 struct Object *owner,
683 const char *name,
684 uint64_t size,
685 bool share,
686 int fd,
687 Error **errp);
688 #endif
689
690 /**
691 * memory_region_init_ram_ptr: Initialize RAM memory region from a
692 * user-provided pointer. Accesses into the
693 * region will modify memory directly.
694 *
695 * @mr: the #MemoryRegion to be initialized.
696 * @owner: the object that tracks the region's reference count
697 * @name: Region name, becomes part of RAMBlock name used in migration stream
698 * must be unique within any device
699 * @size: size of the region.
700 * @ptr: memory to be mapped; must contain at least @size bytes.
701 *
702 * Note that this function does not do anything to cause the data in the
703 * RAM memory region to be migrated; that is the responsibility of the caller.
704 */
705 void memory_region_init_ram_ptr(MemoryRegion *mr,
706 struct Object *owner,
707 const char *name,
708 uint64_t size,
709 void *ptr);
710
711 /**
712 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
713 * a user-provided pointer.
714 *
715 * A RAM device represents a mapping to a physical device, such as to a PCI
716 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
717 * into the VM address space and access to the region will modify memory
718 * directly. However, the memory region should not be included in a memory
719 * dump (device may not be enabled/mapped at the time of the dump), and
720 * operations incompatible with manipulating MMIO should be avoided. Replaces
721 * skip_dump flag.
722 *
723 * @mr: the #MemoryRegion to be initialized.
724 * @owner: the object that tracks the region's reference count
725 * @name: the name of the region.
726 * @size: size of the region.
727 * @ptr: memory to be mapped; must contain at least @size bytes.
728 *
729 * Note that this function does not do anything to cause the data in the
730 * RAM memory region to be migrated; that is the responsibility of the caller.
731 * (For RAM device memory regions, migrating the contents rarely makes sense.)
732 */
733 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
734 struct Object *owner,
735 const char *name,
736 uint64_t size,
737 void *ptr);
738
739 /**
740 * memory_region_init_alias: Initialize a memory region that aliases all or a
741 * part of another memory region.
742 *
743 * @mr: the #MemoryRegion to be initialized.
744 * @owner: the object that tracks the region's reference count
745 * @name: used for debugging; not visible to the user or ABI
746 * @orig: the region to be referenced; @mr will be equivalent to
747 * @orig between @offset and @offset + @size - 1.
748 * @offset: start of the section in @orig to be referenced.
749 * @size: size of the region.
750 */
751 void memory_region_init_alias(MemoryRegion *mr,
752 struct Object *owner,
753 const char *name,
754 MemoryRegion *orig,
755 hwaddr offset,
756 uint64_t size);
757
758 /**
759 * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
760 *
761 * This has the same effect as calling memory_region_init_ram_nomigrate()
762 * and then marking the resulting region read-only with
763 * memory_region_set_readonly().
764 *
765 * Note that this function does not do anything to cause the data in the
766 * RAM side of the memory region to be migrated; that is the responsibility
767 * of the caller.
768 *
769 * @mr: the #MemoryRegion to be initialized.
770 * @owner: the object that tracks the region's reference count
771 * @name: Region name, becomes part of RAMBlock name used in migration stream
772 * must be unique within any device
773 * @size: size of the region.
774 * @errp: pointer to Error*, to store an error if it happens.
775 */
776 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
777 struct Object *owner,
778 const char *name,
779 uint64_t size,
780 Error **errp);
781
782 /**
783 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
784 * Writes are handled via callbacks.
785 *
786 * Note that this function does not do anything to cause the data in the
787 * RAM side of the memory region to be migrated; that is the responsibility
788 * of the caller.
789 *
790 * @mr: the #MemoryRegion to be initialized.
791 * @owner: the object that tracks the region's reference count
792 * @ops: callbacks for write access handling (must not be NULL).
793 * @opaque: passed to the read and write callbacks of the @ops structure.
794 * @name: Region name, becomes part of RAMBlock name used in migration stream
795 * must be unique within any device
796 * @size: size of the region.
797 * @errp: pointer to Error*, to store an error if it happens.
798 */
799 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
800 struct Object *owner,
801 const MemoryRegionOps *ops,
802 void *opaque,
803 const char *name,
804 uint64_t size,
805 Error **errp);
806
807 /**
808 * memory_region_init_iommu: Initialize a memory region of a custom type
809 * that translates addresses
810 *
811 * An IOMMU region translates addresses and forwards accesses to a target
812 * memory region.
813 *
814 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
815 * @_iommu_mr should be a pointer to enough memory for an instance of
816 * that subclass, @instance_size is the size of that subclass, and
817 * @mrtypename is its name. This function will initialize @_iommu_mr as an
818 * instance of the subclass, and its methods will then be called to handle
819 * accesses to the memory region. See the documentation of
820 * #IOMMUMemoryRegionClass for further details.
821 *
822 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
823 * @instance_size: the IOMMUMemoryRegion subclass instance size
824 * @mrtypename: the type name of the #IOMMUMemoryRegion
825 * @owner: the object that tracks the region's reference count
826 * @name: used for debugging; not visible to the user or ABI
827 * @size: size of the region.
828 */
829 void memory_region_init_iommu(void *_iommu_mr,
830 size_t instance_size,
831 const char *mrtypename,
832 Object *owner,
833 const char *name,
834 uint64_t size);
835
836 /**
837 * memory_region_init_ram - Initialize RAM memory region. Accesses into the
838 * region will modify memory directly.
839 *
840 * @mr: the #MemoryRegion to be initialized
841 * @owner: the object that tracks the region's reference count (must be
842 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
843 * @name: name of the memory region
844 * @size: size of the region in bytes
845 * @errp: pointer to Error*, to store an error if it happens.
846 *
847 * This function allocates RAM for a board model or device, and
848 * arranges for it to be migrated (by calling vmstate_register_ram()
849 * if @owner is a DeviceState, or vmstate_register_ram_global() if
850 * @owner is NULL).
851 *
852 * TODO: Currently we restrict @owner to being either NULL (for
853 * global RAM regions with no owner) or devices, so that we can
854 * give the RAM block a unique name for migration purposes.
855 * We should lift this restriction and allow arbitrary Objects.
856 * If you pass a non-NULL non-device @owner then we will assert.
857 */
858 void memory_region_init_ram(MemoryRegion *mr,
859 struct Object *owner,
860 const char *name,
861 uint64_t size,
862 Error **errp);
863
864 /**
865 * memory_region_init_rom: Initialize a ROM memory region.
866 *
867 * This has the same effect as calling memory_region_init_ram()
868 * and then marking the resulting region read-only with
869 * memory_region_set_readonly(). This includes arranging for the
870 * contents to be migrated.
871 *
872 * TODO: Currently we restrict @owner to being either NULL (for
873 * global RAM regions with no owner) or devices, so that we can
874 * give the RAM block a unique name for migration purposes.
875 * We should lift this restriction and allow arbitrary Objects.
876 * If you pass a non-NULL non-device @owner then we will assert.
877 *
878 * @mr: the #MemoryRegion to be initialized.
879 * @owner: the object that tracks the region's reference count
880 * @name: Region name, becomes part of RAMBlock name used in migration stream
881 * must be unique within any device
882 * @size: size of the region.
883 * @errp: pointer to Error*, to store an error if it happens.
884 */
885 void memory_region_init_rom(MemoryRegion *mr,
886 struct Object *owner,
887 const char *name,
888 uint64_t size,
889 Error **errp);
890
891 /**
892 * memory_region_init_rom_device: Initialize a ROM memory region.
893 * Writes are handled via callbacks.
894 *
895 * This function initializes a memory region backed by RAM for reads
896 * and callbacks for writes, and arranges for the RAM backing to
897 * be migrated (by calling vmstate_register_ram()
898 * if @owner is a DeviceState, or vmstate_register_ram_global() if
899 * @owner is NULL).
900 *
901 * TODO: Currently we restrict @owner to being either NULL (for
902 * global RAM regions with no owner) or devices, so that we can
903 * give the RAM block a unique name for migration purposes.
904 * We should lift this restriction and allow arbitrary Objects.
905 * If you pass a non-NULL non-device @owner then we will assert.
906 *
907 * @mr: the #MemoryRegion to be initialized.
908 * @owner: the object that tracks the region's reference count
909 * @ops: callbacks for write access handling (must not be NULL).
910 * @name: Region name, becomes part of RAMBlock name used in migration stream
911 * must be unique within any device
912 * @size: size of the region.
913 * @errp: pointer to Error*, to store an error if it happens.
914 */
915 void memory_region_init_rom_device(MemoryRegion *mr,
916 struct Object *owner,
917 const MemoryRegionOps *ops,
918 void *opaque,
919 const char *name,
920 uint64_t size,
921 Error **errp);
922
923
924 /**
925 * memory_region_owner: get a memory region's owner.
926 *
927 * @mr: the memory region being queried.
928 */
929 struct Object *memory_region_owner(MemoryRegion *mr);
930
931 /**
932 * memory_region_size: get a memory region's size.
933 *
934 * @mr: the memory region being queried.
935 */
936 uint64_t memory_region_size(MemoryRegion *mr);
937
938 /**
939 * memory_region_is_ram: check whether a memory region is random access
940 *
941 * Returns %true if a memory region is random access.
942 *
943 * @mr: the memory region being queried
944 */
945 static inline bool memory_region_is_ram(MemoryRegion *mr)
946 {
947 return mr->ram;
948 }
949
950 /**
951 * memory_region_is_ram_device: check whether a memory region is a ram device
952 *
953 * Returns %true if a memory region is a device backed ram region
954 *
955 * @mr: the memory region being queried
956 */
957 bool memory_region_is_ram_device(MemoryRegion *mr);
958
959 /**
960 * memory_region_is_romd: check whether a memory region is in ROMD mode
961 *
962 * Returns %true if a memory region is a ROM device and currently set to allow
963 * direct reads.
964 *
965 * @mr: the memory region being queried
966 */
967 static inline bool memory_region_is_romd(MemoryRegion *mr)
968 {
969 return mr->rom_device && mr->romd_mode;
970 }
971
972 /**
973 * memory_region_get_iommu: check whether a memory region is an iommu
974 *
975 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
976 * otherwise NULL.
977 *
978 * @mr: the memory region being queried
979 */
980 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
981 {
982 if (mr->alias) {
983 return memory_region_get_iommu(mr->alias);
984 }
985 if (mr->is_iommu) {
986 return (IOMMUMemoryRegion *) mr;
987 }
988 return NULL;
989 }
990
991 /**
992 * memory_region_get_iommu_class_nocheck: returns iommu memory region class
993 * if an iommu or NULL if not
994 *
995 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
996 * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
997 *
998 * @mr: the memory region being queried
999 */
1000 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
1001 IOMMUMemoryRegion *iommu_mr)
1002 {
1003 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
1004 }
1005
1006 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
1007
1008 /**
1009 * memory_region_iommu_get_min_page_size: get minimum supported page size
1010 * for an iommu
1011 *
1012 * Returns minimum supported page size for an iommu.
1013 *
1014 * @iommu_mr: the memory region being queried
1015 */
1016 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
1017
1018 /**
1019 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
1020 *
1021 * The notification type will be decided by entry.perm bits:
1022 *
1023 * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE.
1024 * - For MAP (newly added entry) notifies: set entry.perm to the
1025 * permission of the page (which is definitely !IOMMU_NONE).
1026 *
1027 * Note: for any IOMMU implementation, an in-place mapping change
1028 * should be notified with an UNMAP followed by a MAP.
1029 *
1030 * @iommu_mr: the memory region that was changed
1031 * @iommu_idx: the IOMMU index for the translation table which has changed
1032 * @entry: the new entry in the IOMMU translation table. The entry
1033 * replaces all old entries for the same virtual I/O address range.
1034 * Deleted entries have .@perm == 0.
1035 */
1036 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1037 int iommu_idx,
1038 IOMMUTLBEntry entry);
1039
1040 /**
1041 * memory_region_notify_one: notify a change in an IOMMU translation
1042 * entry to a single notifier
1043 *
1044 * This works just like memory_region_notify_iommu(), but it only
1045 * notifies a specific notifier, not all of them.
1046 *
1047 * @notifier: the notifier to be notified
1048 * @entry: the new entry in the IOMMU translation table. The entry
1049 * replaces all old entries for the same virtual I/O address range.
1050 * Deleted entries have .@perm == 0.
1051 */
1052 void memory_region_notify_one(IOMMUNotifier *notifier,
1053 IOMMUTLBEntry *entry);
1054
1055 /**
1056 * memory_region_register_iommu_notifier: register a notifier for changes to
1057 * IOMMU translation entries.
1058 *
1059 * @mr: the memory region to observe
1060 * @n: the IOMMUNotifier to be added; the notify callback receives a
1061 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
1062 * ceases to be valid on exit from the notifier.
1063 */
1064 void memory_region_register_iommu_notifier(MemoryRegion *mr,
1065 IOMMUNotifier *n);
1066
1067 /**
1068 * memory_region_iommu_replay: replay existing IOMMU translations to
1069 * a notifier with the minimum page granularity returned by
1070 * mr->iommu_ops->get_page_size().
1071 *
1072 * Note: this is not related to record-and-replay functionality.
1073 *
1074 * @iommu_mr: the memory region to observe
1075 * @n: the notifier to which to replay iommu mappings
1076 */
1077 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
1078
1079 /**
1080 * memory_region_iommu_replay_all: replay existing IOMMU translations
1081 * to all the notifiers registered.
1082 *
1083 * Note: this is not related to record-and-replay functionality.
1084 *
1085 * @iommu_mr: the memory region to observe
1086 */
1087 void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr);
1088
1089 /**
1090 * memory_region_unregister_iommu_notifier: unregister a notifier for
1091 * changes to IOMMU translation entries.
1092 *
1093 * @mr: the memory region which was observed and for which notity_stopped()
1094 * needs to be called
1095 * @n: the notifier to be removed.
1096 */
1097 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1098 IOMMUNotifier *n);
1099
1100 /**
1101 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
1102 * defined on the IOMMU.
1103 *
1104 * Returns 0 on success, or a negative errno otherwise. In particular,
1105 * -EINVAL indicates that the IOMMU does not support the requested
1106 * attribute.
1107 *
1108 * @iommu_mr: the memory region
1109 * @attr: the requested attribute
1110 * @data: a pointer to the requested attribute data
1111 */
1112 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1113 enum IOMMUMemoryRegionAttr attr,
1114 void *data);
1115
1116 /**
1117 * memory_region_iommu_attrs_to_index: return the IOMMU index to
1118 * use for translations with the given memory transaction attributes.
1119 *
1120 * @iommu_mr: the memory region
1121 * @attrs: the memory transaction attributes
1122 */
1123 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1124 MemTxAttrs attrs);
1125
1126 /**
1127 * memory_region_iommu_num_indexes: return the total number of IOMMU
1128 * indexes that this IOMMU supports.
1129 *
1130 * @iommu_mr: the memory region
1131 */
1132 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
1133
1134 /**
1135 * memory_region_name: get a memory region's name
1136 *
1137 * Returns the string that was used to initialize the memory region.
1138 *
1139 * @mr: the memory region being queried
1140 */
1141 const char *memory_region_name(const MemoryRegion *mr);
1142
1143 /**
1144 * memory_region_is_logging: return whether a memory region is logging writes
1145 *
1146 * Returns %true if the memory region is logging writes for the given client
1147 *
1148 * @mr: the memory region being queried
1149 * @client: the client being queried
1150 */
1151 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
1152
1153 /**
1154 * memory_region_get_dirty_log_mask: return the clients for which a
1155 * memory region is logging writes.
1156 *
1157 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
1158 * are the bit indices.
1159 *
1160 * @mr: the memory region being queried
1161 */
1162 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
1163
1164 /**
1165 * memory_region_is_rom: check whether a memory region is ROM
1166 *
1167 * Returns %true if a memory region is read-only memory.
1168 *
1169 * @mr: the memory region being queried
1170 */
1171 static inline bool memory_region_is_rom(MemoryRegion *mr)
1172 {
1173 return mr->ram && mr->readonly;
1174 }
1175
1176 /**
1177 * memory_region_is_nonvolatile: check whether a memory region is non-volatile
1178 *
1179 * Returns %true is a memory region is non-volatile memory.
1180 *
1181 * @mr: the memory region being queried
1182 */
1183 static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
1184 {
1185 return mr->nonvolatile;
1186 }
1187
1188 /**
1189 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
1190 *
1191 * Returns a file descriptor backing a file-based RAM memory region,
1192 * or -1 if the region is not a file-based RAM memory region.
1193 *
1194 * @mr: the RAM or alias memory region being queried.
1195 */
1196 int memory_region_get_fd(MemoryRegion *mr);
1197
1198 /**
1199 * memory_region_from_host: Convert a pointer into a RAM memory region
1200 * and an offset within it.
1201 *
1202 * Given a host pointer inside a RAM memory region (created with
1203 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
1204 * the MemoryRegion and the offset within it.
1205 *
1206 * Use with care; by the time this function returns, the returned pointer is
1207 * not protected by RCU anymore. If the caller is not within an RCU critical
1208 * section and does not hold the iothread lock, it must have other means of
1209 * protecting the pointer, such as a reference to the region that includes
1210 * the incoming ram_addr_t.
1211 *
1212 * @ptr: the host pointer to be converted
1213 * @offset: the offset within memory region
1214 */
1215 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
1216
1217 /**
1218 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
1219 *
1220 * Returns a host pointer to a RAM memory region (created with
1221 * memory_region_init_ram() or memory_region_init_ram_ptr()).
1222 *
1223 * Use with care; by the time this function returns, the returned pointer is
1224 * not protected by RCU anymore. If the caller is not within an RCU critical
1225 * section and does not hold the iothread lock, it must have other means of
1226 * protecting the pointer, such as a reference to the region that includes
1227 * the incoming ram_addr_t.
1228 *
1229 * @mr: the memory region being queried.
1230 */
1231 void *memory_region_get_ram_ptr(MemoryRegion *mr);
1232
1233 /* memory_region_ram_resize: Resize a RAM region.
1234 *
1235 * Only legal before guest might have detected the memory size: e.g. on
1236 * incoming migration, or right after reset.
1237 *
1238 * @mr: a memory region created with @memory_region_init_resizeable_ram.
1239 * @newsize: the new size the region
1240 * @errp: pointer to Error*, to store an error if it happens.
1241 */
1242 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
1243 Error **errp);
1244
1245 /**
1246 * memory_region_set_log: Turn dirty logging on or off for a region.
1247 *
1248 * Turns dirty logging on or off for a specified client (display, migration).
1249 * Only meaningful for RAM regions.
1250 *
1251 * @mr: the memory region being updated.
1252 * @log: whether dirty logging is to be enabled or disabled.
1253 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
1254 */
1255 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
1256
1257 /**
1258 * memory_region_get_dirty: Check whether a range of bytes is dirty
1259 * for a specified client.
1260 *
1261 * Checks whether a range of bytes has been written to since the last
1262 * call to memory_region_reset_dirty() with the same @client. Dirty logging
1263 * must be enabled.
1264 *
1265 * @mr: the memory region being queried.
1266 * @addr: the address (relative to the start of the region) being queried.
1267 * @size: the size of the range being queried.
1268 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1269 * %DIRTY_MEMORY_VGA.
1270 */
1271 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1272 hwaddr size, unsigned client);
1273
1274 /**
1275 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
1276 *
1277 * Marks a range of bytes as dirty, after it has been dirtied outside
1278 * guest code.
1279 *
1280 * @mr: the memory region being dirtied.
1281 * @addr: the address (relative to the start of the region) being dirtied.
1282 * @size: size of the range being dirtied.
1283 */
1284 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1285 hwaddr size);
1286
1287 /**
1288 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
1289 * bitmap and clear it.
1290 *
1291 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
1292 * returns the snapshot. The snapshot can then be used to query dirty
1293 * status, using memory_region_snapshot_get_dirty. Snapshotting allows
1294 * querying the same page multiple times, which is especially useful for
1295 * display updates where the scanlines often are not page aligned.
1296 *
1297 * The dirty bitmap region which gets copyed into the snapshot (and
1298 * cleared afterwards) can be larger than requested. The boundaries
1299 * are rounded up/down so complete bitmap longs (covering 64 pages on
1300 * 64bit hosts) can be copied over into the bitmap snapshot. Which
1301 * isn't a problem for display updates as the extra pages are outside
1302 * the visible area, and in case the visible area changes a full
1303 * display redraw is due anyway. Should other use cases for this
1304 * function emerge we might have to revisit this implementation
1305 * detail.
1306 *
1307 * Use g_free to release DirtyBitmapSnapshot.
1308 *
1309 * @mr: the memory region being queried.
1310 * @addr: the address (relative to the start of the region) being queried.
1311 * @size: the size of the range being queried.
1312 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
1313 */
1314 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1315 hwaddr addr,
1316 hwaddr size,
1317 unsigned client);
1318
1319 /**
1320 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
1321 * in the specified dirty bitmap snapshot.
1322 *
1323 * @mr: the memory region being queried.
1324 * @snap: the dirty bitmap snapshot
1325 * @addr: the address (relative to the start of the region) being queried.
1326 * @size: the size of the range being queried.
1327 */
1328 bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
1329 DirtyBitmapSnapshot *snap,
1330 hwaddr addr, hwaddr size);
1331
1332 /**
1333 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
1334 * client.
1335 *
1336 * Marks a range of pages as no longer dirty.
1337 *
1338 * @mr: the region being updated.
1339 * @addr: the start of the subrange being cleaned.
1340 * @size: the size of the subrange being cleaned.
1341 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1342 * %DIRTY_MEMORY_VGA.
1343 */
1344 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1345 hwaddr size, unsigned client);
1346
1347 /**
1348 * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
1349 * TBs (for self-modifying code).
1350 *
1351 * The MemoryRegionOps->write() callback of a ROM device must use this function
1352 * to mark byte ranges that have been modified internally, such as by directly
1353 * accessing the memory returned by memory_region_get_ram_ptr().
1354 *
1355 * This function marks the range dirty and invalidates TBs so that TCG can
1356 * detect self-modifying code.
1357 *
1358 * @mr: the region being flushed.
1359 * @addr: the start, relative to the start of the region, of the range being
1360 * flushed.
1361 * @size: the size, in bytes, of the range being flushed.
1362 */
1363 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
1364
1365 /**
1366 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
1367 *
1368 * Allows a memory region to be marked as read-only (turning it into a ROM).
1369 * only useful on RAM regions.
1370 *
1371 * @mr: the region being updated.
1372 * @readonly: whether rhe region is to be ROM or RAM.
1373 */
1374 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
1375
1376 /**
1377 * memory_region_set_nonvolatile: Turn a memory region non-volatile
1378 *
1379 * Allows a memory region to be marked as non-volatile.
1380 * only useful on RAM regions.
1381 *
1382 * @mr: the region being updated.
1383 * @nonvolatile: whether rhe region is to be non-volatile.
1384 */
1385 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
1386
1387 /**
1388 * memory_region_rom_device_set_romd: enable/disable ROMD mode
1389 *
1390 * Allows a ROM device (initialized with memory_region_init_rom_device() to
1391 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
1392 * device is mapped to guest memory and satisfies read access directly.
1393 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
1394 * Writes are always handled by the #MemoryRegion.write function.
1395 *
1396 * @mr: the memory region to be updated
1397 * @romd_mode: %true to put the region into ROMD mode
1398 */
1399 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
1400
1401 /**
1402 * memory_region_set_coalescing: Enable memory coalescing for the region.
1403 *
1404 * Enabled writes to a region to be queued for later processing. MMIO ->write
1405 * callbacks may be delayed until a non-coalesced MMIO is issued.
1406 * Only useful for IO regions. Roughly similar to write-combining hardware.
1407 *
1408 * @mr: the memory region to be write coalesced
1409 */
1410 void memory_region_set_coalescing(MemoryRegion *mr);
1411
1412 /**
1413 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
1414 * a region.
1415 *
1416 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
1417 * Multiple calls can be issued coalesced disjoint ranges.
1418 *
1419 * @mr: the memory region to be updated.
1420 * @offset: the start of the range within the region to be coalesced.
1421 * @size: the size of the subrange to be coalesced.
1422 */
1423 void memory_region_add_coalescing(MemoryRegion *mr,
1424 hwaddr offset,
1425 uint64_t size);
1426
1427 /**
1428 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
1429 *
1430 * Disables any coalescing caused by memory_region_set_coalescing() or
1431 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
1432 * hardware.
1433 *
1434 * @mr: the memory region to be updated.
1435 */
1436 void memory_region_clear_coalescing(MemoryRegion *mr);
1437
1438 /**
1439 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
1440 * accesses.
1441 *
1442 * Ensure that pending coalesced MMIO request are flushed before the memory
1443 * region is accessed. This property is automatically enabled for all regions
1444 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
1445 *
1446 * @mr: the memory region to be updated.
1447 */
1448 void memory_region_set_flush_coalesced(MemoryRegion *mr);
1449
1450 /**
1451 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
1452 * accesses.
1453 *
1454 * Clear the automatic coalesced MMIO flushing enabled via
1455 * memory_region_set_flush_coalesced. Note that this service has no effect on
1456 * memory regions that have MMIO coalescing enabled for themselves. For them,
1457 * automatic flushing will stop once coalescing is disabled.
1458 *
1459 * @mr: the memory region to be updated.
1460 */
1461 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
1462
1463 /**
1464 * memory_region_clear_global_locking: Declares that access processing does
1465 * not depend on the QEMU global lock.
1466 *
1467 * By clearing this property, accesses to the memory region will be processed
1468 * outside of QEMU's global lock (unless the lock is held on when issuing the
1469 * access request). In this case, the device model implementing the access
1470 * handlers is responsible for synchronization of concurrency.
1471 *
1472 * @mr: the memory region to be updated.
1473 */
1474 void memory_region_clear_global_locking(MemoryRegion *mr);
1475
1476 /**
1477 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
1478 * is written to a location.
1479 *
1480 * Marks a word in an IO region (initialized with memory_region_init_io())
1481 * as a trigger for an eventfd event. The I/O callback will not be called.
1482 * The caller must be prepared to handle failure (that is, take the required
1483 * action if the callback _is_ called).
1484 *
1485 * @mr: the memory region being updated.
1486 * @addr: the address within @mr that is to be monitored
1487 * @size: the size of the access to trigger the eventfd
1488 * @match_data: whether to match against @data, instead of just @addr
1489 * @data: the data to match against the guest write
1490 * @e: event notifier to be triggered when @addr, @size, and @data all match.
1491 **/
1492 void memory_region_add_eventfd(MemoryRegion *mr,
1493 hwaddr addr,
1494 unsigned size,
1495 bool match_data,
1496 uint64_t data,
1497 EventNotifier *e);
1498
1499 /**
1500 * memory_region_del_eventfd: Cancel an eventfd.
1501 *
1502 * Cancels an eventfd trigger requested by a previous
1503 * memory_region_add_eventfd() call.
1504 *
1505 * @mr: the memory region being updated.
1506 * @addr: the address within @mr that is to be monitored
1507 * @size: the size of the access to trigger the eventfd
1508 * @match_data: whether to match against @data, instead of just @addr
1509 * @data: the data to match against the guest write
1510 * @e: event notifier to be triggered when @addr, @size, and @data all match.
1511 */
1512 void memory_region_del_eventfd(MemoryRegion *mr,
1513 hwaddr addr,
1514 unsigned size,
1515 bool match_data,
1516 uint64_t data,
1517 EventNotifier *e);
1518
1519 /**
1520 * memory_region_add_subregion: Add a subregion to a container.
1521 *
1522 * Adds a subregion at @offset. The subregion may not overlap with other
1523 * subregions (except for those explicitly marked as overlapping). A region
1524 * may only be added once as a subregion (unless removed with
1525 * memory_region_del_subregion()); use memory_region_init_alias() if you
1526 * want a region to be a subregion in multiple locations.
1527 *
1528 * @mr: the region to contain the new subregion; must be a container
1529 * initialized with memory_region_init().
1530 * @offset: the offset relative to @mr where @subregion is added.
1531 * @subregion: the subregion to be added.
1532 */
1533 void memory_region_add_subregion(MemoryRegion *mr,
1534 hwaddr offset,
1535 MemoryRegion *subregion);
1536 /**
1537 * memory_region_add_subregion_overlap: Add a subregion to a container
1538 * with overlap.
1539 *
1540 * Adds a subregion at @offset. The subregion may overlap with other
1541 * subregions. Conflicts are resolved by having a higher @priority hide a
1542 * lower @priority. Subregions without priority are taken as @priority 0.
1543 * A region may only be added once as a subregion (unless removed with
1544 * memory_region_del_subregion()); use memory_region_init_alias() if you
1545 * want a region to be a subregion in multiple locations.
1546 *
1547 * @mr: the region to contain the new subregion; must be a container
1548 * initialized with memory_region_init().
1549 * @offset: the offset relative to @mr where @subregion is added.
1550 * @subregion: the subregion to be added.
1551 * @priority: used for resolving overlaps; highest priority wins.
1552 */
1553 void memory_region_add_subregion_overlap(MemoryRegion *mr,
1554 hwaddr offset,
1555 MemoryRegion *subregion,
1556 int priority);
1557
1558 /**
1559 * memory_region_get_ram_addr: Get the ram address associated with a memory
1560 * region
1561 */
1562 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
1563
1564 uint64_t memory_region_get_alignment(const MemoryRegion *mr);
1565 /**
1566 * memory_region_del_subregion: Remove a subregion.
1567 *
1568 * Removes a subregion from its container.
1569 *
1570 * @mr: the container to be updated.
1571 * @subregion: the region being removed; must be a current subregion of @mr.
1572 */
1573 void memory_region_del_subregion(MemoryRegion *mr,
1574 MemoryRegion *subregion);
1575
1576 /*
1577 * memory_region_set_enabled: dynamically enable or disable a region
1578 *
1579 * Enables or disables a memory region. A disabled memory region
1580 * ignores all accesses to itself and its subregions. It does not
1581 * obscure sibling subregions with lower priority - it simply behaves as
1582 * if it was removed from the hierarchy.
1583 *
1584 * Regions default to being enabled.
1585 *
1586 * @mr: the region to be updated
1587 * @enabled: whether to enable or disable the region
1588 */
1589 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
1590
1591 /*
1592 * memory_region_set_address: dynamically update the address of a region
1593 *
1594 * Dynamically updates the address of a region, relative to its container.
1595 * May be used on regions are currently part of a memory hierarchy.
1596 *
1597 * @mr: the region to be updated
1598 * @addr: new address, relative to container region
1599 */
1600 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
1601
1602 /*
1603 * memory_region_set_size: dynamically update the size of a region.
1604 *
1605 * Dynamically updates the size of a region.
1606 *
1607 * @mr: the region to be updated
1608 * @size: used size of the region.
1609 */
1610 void memory_region_set_size(MemoryRegion *mr, uint64_t size);
1611
1612 /*
1613 * memory_region_set_alias_offset: dynamically update a memory alias's offset
1614 *
1615 * Dynamically updates the offset into the target region that an alias points
1616 * to, as if the fourth argument to memory_region_init_alias() has changed.
1617 *
1618 * @mr: the #MemoryRegion to be updated; should be an alias.
1619 * @offset: the new offset into the target memory region
1620 */
1621 void memory_region_set_alias_offset(MemoryRegion *mr,
1622 hwaddr offset);
1623
1624 /**
1625 * memory_region_present: checks if an address relative to a @container
1626 * translates into #MemoryRegion within @container
1627 *
1628 * Answer whether a #MemoryRegion within @container covers the address
1629 * @addr.
1630 *
1631 * @container: a #MemoryRegion within which @addr is a relative address
1632 * @addr: the area within @container to be searched
1633 */
1634 bool memory_region_present(MemoryRegion *container, hwaddr addr);
1635
1636 /**
1637 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
1638 * into any address space.
1639 *
1640 * @mr: a #MemoryRegion which should be checked if it's mapped
1641 */
1642 bool memory_region_is_mapped(MemoryRegion *mr);
1643
1644 /**
1645 * memory_region_find: translate an address/size relative to a
1646 * MemoryRegion into a #MemoryRegionSection.
1647 *
1648 * Locates the first #MemoryRegion within @mr that overlaps the range
1649 * given by @addr and @size.
1650 *
1651 * Returns a #MemoryRegionSection that describes a contiguous overlap.
1652 * It will have the following characteristics:
1653 * .@size = 0 iff no overlap was found
1654 * .@mr is non-%NULL iff an overlap was found
1655 *
1656 * Remember that in the return value the @offset_within_region is
1657 * relative to the returned region (in the .@mr field), not to the
1658 * @mr argument.
1659 *
1660 * Similarly, the .@offset_within_address_space is relative to the
1661 * address space that contains both regions, the passed and the
1662 * returned one. However, in the special case where the @mr argument
1663 * has no container (and thus is the root of the address space), the
1664 * following will hold:
1665 * .@offset_within_address_space >= @addr
1666 * .@offset_within_address_space + .@size <= @addr + @size
1667 *
1668 * @mr: a MemoryRegion within which @addr is a relative address
1669 * @addr: start of the area within @as to be searched
1670 * @size: size of the area to be searched
1671 */
1672 MemoryRegionSection memory_region_find(MemoryRegion *mr,
1673 hwaddr addr, uint64_t size);
1674
1675 /**
1676 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
1677 *
1678 * Synchronizes the dirty page log for all address spaces.
1679 */
1680 void memory_global_dirty_log_sync(void);
1681
1682 /**
1683 * memory_region_transaction_begin: Start a transaction.
1684 *
1685 * During a transaction, changes will be accumulated and made visible
1686 * only when the transaction ends (is committed).
1687 */
1688 void memory_region_transaction_begin(void);
1689
1690 /**
1691 * memory_region_transaction_commit: Commit a transaction and make changes
1692 * visible to the guest.
1693 */
1694 void memory_region_transaction_commit(void);
1695
1696 /**
1697 * memory_listener_register: register callbacks to be called when memory
1698 * sections are mapped or unmapped into an address
1699 * space
1700 *
1701 * @listener: an object containing the callbacks to be called
1702 * @filter: if non-%NULL, only regions in this address space will be observed
1703 */
1704 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
1705
1706 /**
1707 * memory_listener_unregister: undo the effect of memory_listener_register()
1708 *
1709 * @listener: an object containing the callbacks to be removed
1710 */
1711 void memory_listener_unregister(MemoryListener *listener);
1712
1713 /**
1714 * memory_global_dirty_log_start: begin dirty logging for all regions
1715 */
1716 void memory_global_dirty_log_start(void);
1717
1718 /**
1719 * memory_global_dirty_log_stop: end dirty logging for all regions
1720 */
1721 void memory_global_dirty_log_stop(void);
1722
1723 void mtree_info(bool flatview, bool dispatch_tree, bool owner);
1724
1725 /**
1726 * memory_region_dispatch_read: perform a read directly to the specified
1727 * MemoryRegion.
1728 *
1729 * @mr: #MemoryRegion to access
1730 * @addr: address within that region
1731 * @pval: pointer to uint64_t which the data is written to
1732 * @size: size of the access in bytes
1733 * @attrs: memory transaction attributes to use for the access
1734 */
1735 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1736 hwaddr addr,
1737 uint64_t *pval,
1738 unsigned size,
1739 MemTxAttrs attrs);
1740 /**
1741 * memory_region_dispatch_write: perform a write directly to the specified
1742 * MemoryRegion.
1743 *
1744 * @mr: #MemoryRegion to access
1745 * @addr: address within that region
1746 * @data: data to write
1747 * @size: size of the access in bytes
1748 * @attrs: memory transaction attributes to use for the access
1749 */
1750 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1751 hwaddr addr,
1752 uint64_t data,
1753 unsigned size,
1754 MemTxAttrs attrs);
1755
1756 /**
1757 * address_space_init: initializes an address space
1758 *
1759 * @as: an uninitialized #AddressSpace
1760 * @root: a #MemoryRegion that routes addresses for the address space
1761 * @name: an address space name. The name is only used for debugging
1762 * output.
1763 */
1764 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
1765
1766 /**
1767 * address_space_destroy: destroy an address space
1768 *
1769 * Releases all resources associated with an address space. After an address space
1770 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
1771 * as well.
1772 *
1773 * @as: address space to be destroyed
1774 */
1775 void address_space_destroy(AddressSpace *as);
1776
1777 /**
1778 * address_space_rw: read from or write to an address space.
1779 *
1780 * Return a MemTxResult indicating whether the operation succeeded
1781 * or failed (eg unassigned memory, device rejected the transaction,
1782 * IOMMU fault).
1783 *
1784 * @as: #AddressSpace to be accessed
1785 * @addr: address within that address space
1786 * @attrs: memory transaction attributes
1787 * @buf: buffer with the data transferred
1788 * @len: the number of bytes to read or write
1789 * @is_write: indicates the transfer direction
1790 */
1791 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
1792 MemTxAttrs attrs, uint8_t *buf,
1793 hwaddr len, bool is_write);
1794
1795 /**
1796 * address_space_write: write to address space.
1797 *
1798 * Return a MemTxResult indicating whether the operation succeeded
1799 * or failed (eg unassigned memory, device rejected the transaction,
1800 * IOMMU fault).
1801 *
1802 * @as: #AddressSpace to be accessed
1803 * @addr: address within that address space
1804 * @attrs: memory transaction attributes
1805 * @buf: buffer with the data transferred
1806 * @len: the number of bytes to write
1807 */
1808 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
1809 MemTxAttrs attrs,
1810 const uint8_t *buf, hwaddr len);
1811
1812 /**
1813 * address_space_write_rom: write to address space, including ROM.
1814 *
1815 * This function writes to the specified address space, but will
1816 * write data to both ROM and RAM. This is used for non-guest
1817 * writes like writes from the gdb debug stub or initial loading
1818 * of ROM contents.
1819 *
1820 * Note that portions of the write which attempt to write data to
1821 * a device will be silently ignored -- only real RAM and ROM will
1822 * be written to.
1823 *
1824 * Return a MemTxResult indicating whether the operation succeeded
1825 * or failed (eg unassigned memory, device rejected the transaction,
1826 * IOMMU fault).
1827 *
1828 * @as: #AddressSpace to be accessed
1829 * @addr: address within that address space
1830 * @attrs: memory transaction attributes
1831 * @buf: buffer with the data transferred
1832 * @len: the number of bytes to write
1833 */
1834 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
1835 MemTxAttrs attrs,
1836 const uint8_t *buf, hwaddr len);
1837
1838 /* address_space_ld*: load from an address space
1839 * address_space_st*: store to an address space
1840 *
1841 * These functions perform a load or store of the byte, word,
1842 * longword or quad to the specified address within the AddressSpace.
1843 * The _le suffixed functions treat the data as little endian;
1844 * _be indicates big endian; no suffix indicates "same endianness
1845 * as guest CPU".
1846 *
1847 * The "guest CPU endianness" accessors are deprecated for use outside
1848 * target-* code; devices should be CPU-agnostic and use either the LE
1849 * or the BE accessors.
1850 *
1851 * @as #AddressSpace to be accessed
1852 * @addr: address within that address space
1853 * @val: data value, for stores
1854 * @attrs: memory transaction attributes
1855 * @result: location to write the success/failure of the transaction;
1856 * if NULL, this information is discarded
1857 */
1858
1859 #define SUFFIX
1860 #define ARG1 as
1861 #define ARG1_DECL AddressSpace *as
1862 #include "exec/memory_ldst.inc.h"
1863
1864 #define SUFFIX
1865 #define ARG1 as
1866 #define ARG1_DECL AddressSpace *as
1867 #include "exec/memory_ldst_phys.inc.h"
1868
1869 struct MemoryRegionCache {
1870 void *ptr;
1871 hwaddr xlat;
1872 hwaddr len;
1873 FlatView *fv;
1874 MemoryRegionSection mrs;
1875 bool is_write;
1876 };
1877
1878 #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL })
1879
1880
1881 /* address_space_ld*_cached: load from a cached #MemoryRegion
1882 * address_space_st*_cached: store into a cached #MemoryRegion
1883 *
1884 * These functions perform a load or store of the byte, word,
1885 * longword or quad to the specified address. The address is
1886 * a physical address in the AddressSpace, but it must lie within
1887 * a #MemoryRegion that was mapped with address_space_cache_init.
1888 *
1889 * The _le suffixed functions treat the data as little endian;
1890 * _be indicates big endian; no suffix indicates "same endianness
1891 * as guest CPU".
1892 *
1893 * The "guest CPU endianness" accessors are deprecated for use outside
1894 * target-* code; devices should be CPU-agnostic and use either the LE
1895 * or the BE accessors.
1896 *
1897 * @cache: previously initialized #MemoryRegionCache to be accessed
1898 * @addr: address within the address space
1899 * @val: data value, for stores
1900 * @attrs: memory transaction attributes
1901 * @result: location to write the success/failure of the transaction;
1902 * if NULL, this information is discarded
1903 */
1904
1905 #define SUFFIX _cached_slow
1906 #define ARG1 cache
1907 #define ARG1_DECL MemoryRegionCache *cache
1908 #include "exec/memory_ldst.inc.h"
1909
1910 /* Inline fast path for direct RAM access. */
1911 static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
1912 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
1913 {
1914 assert(addr < cache->len);
1915 if (likely(cache->ptr)) {
1916 return ldub_p(cache->ptr + addr);
1917 } else {
1918 return address_space_ldub_cached_slow(cache, addr, attrs, result);
1919 }
1920 }
1921
1922 static inline void address_space_stb_cached(MemoryRegionCache *cache,
1923 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
1924 {
1925 assert(addr < cache->len);
1926 if (likely(cache->ptr)) {
1927 stb_p(cache->ptr + addr, val);
1928 } else {
1929 address_space_stb_cached_slow(cache, addr, val, attrs, result);
1930 }
1931 }
1932
1933 #define ENDIANNESS _le
1934 #include "exec/memory_ldst_cached.inc.h"
1935
1936 #define ENDIANNESS _be
1937 #include "exec/memory_ldst_cached.inc.h"
1938
1939 #define SUFFIX _cached
1940 #define ARG1 cache
1941 #define ARG1_DECL MemoryRegionCache *cache
1942 #include "exec/memory_ldst_phys.inc.h"
1943
1944 /* address_space_cache_init: prepare for repeated access to a physical
1945 * memory region
1946 *
1947 * @cache: #MemoryRegionCache to be filled
1948 * @as: #AddressSpace to be accessed
1949 * @addr: address within that address space
1950 * @len: length of buffer
1951 * @is_write: indicates the transfer direction
1952 *
1953 * Will only work with RAM, and may map a subset of the requested range by
1954 * returning a value that is less than @len. On failure, return a negative
1955 * errno value.
1956 *
1957 * Because it only works with RAM, this function can be used for
1958 * read-modify-write operations. In this case, is_write should be %true.
1959 *
1960 * Note that addresses passed to the address_space_*_cached functions
1961 * are relative to @addr.
1962 */
1963 int64_t address_space_cache_init(MemoryRegionCache *cache,
1964 AddressSpace *as,
1965 hwaddr addr,
1966 hwaddr len,
1967 bool is_write);
1968
1969 /**
1970 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
1971 *
1972 * @cache: The #MemoryRegionCache to operate on.
1973 * @addr: The first physical address that was written, relative to the
1974 * address that was passed to @address_space_cache_init.
1975 * @access_len: The number of bytes that were written starting at @addr.
1976 */
1977 void address_space_cache_invalidate(MemoryRegionCache *cache,
1978 hwaddr addr,
1979 hwaddr access_len);
1980
1981 /**
1982 * address_space_cache_destroy: free a #MemoryRegionCache
1983 *
1984 * @cache: The #MemoryRegionCache whose memory should be released.
1985 */
1986 void address_space_cache_destroy(MemoryRegionCache *cache);
1987
1988 /* address_space_get_iotlb_entry: translate an address into an IOTLB
1989 * entry. Should be called from an RCU critical section.
1990 */
1991 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
1992 bool is_write, MemTxAttrs attrs);
1993
1994 /* address_space_translate: translate an address range into an address space
1995 * into a MemoryRegion and an address range into that section. Should be
1996 * called from an RCU critical section, to avoid that the last reference
1997 * to the returned region disappears after address_space_translate returns.
1998 *
1999 * @fv: #FlatView to be accessed
2000 * @addr: address within that address space
2001 * @xlat: pointer to address within the returned memory region section's
2002 * #MemoryRegion.
2003 * @len: pointer to length
2004 * @is_write: indicates the transfer direction
2005 * @attrs: memory attributes
2006 */
2007 MemoryRegion *flatview_translate(FlatView *fv,
2008 hwaddr addr, hwaddr *xlat,
2009 hwaddr *len, bool is_write,
2010 MemTxAttrs attrs);
2011
2012 static inline MemoryRegion *address_space_translate(AddressSpace *as,
2013 hwaddr addr, hwaddr *xlat,
2014 hwaddr *len, bool is_write,
2015 MemTxAttrs attrs)
2016 {
2017 return flatview_translate(address_space_to_flatview(as),
2018 addr, xlat, len, is_write, attrs);
2019 }
2020
2021 /* address_space_access_valid: check for validity of accessing an address
2022 * space range
2023 *
2024 * Check whether memory is assigned to the given address space range, and
2025 * access is permitted by any IOMMU regions that are active for the address
2026 * space.
2027 *
2028 * For now, addr and len should be aligned to a page size. This limitation
2029 * will be lifted in the future.
2030 *
2031 * @as: #AddressSpace to be accessed
2032 * @addr: address within that address space
2033 * @len: length of the area to be checked
2034 * @is_write: indicates the transfer direction
2035 * @attrs: memory attributes
2036 */
2037 bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
2038 bool is_write, MemTxAttrs attrs);
2039
2040 /* address_space_map: map a physical memory region into a host virtual address
2041 *
2042 * May map a subset of the requested range, given by and returned in @plen.
2043 * May return %NULL if resources needed to perform the mapping are exhausted.
2044 * Use only for reads OR writes - not for read-modify-write operations.
2045 * Use cpu_register_map_client() to know when retrying the map operation is
2046 * likely to succeed.
2047 *
2048 * @as: #AddressSpace to be accessed
2049 * @addr: address within that address space
2050 * @plen: pointer to length of buffer; updated on return
2051 * @is_write: indicates the transfer direction
2052 * @attrs: memory attributes
2053 */
2054 void *address_space_map(AddressSpace *as, hwaddr addr,
2055 hwaddr *plen, bool is_write, MemTxAttrs attrs);
2056
2057 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
2058 *
2059 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
2060 * the amount of memory that was actually read or written by the caller.
2061 *
2062 * @as: #AddressSpace used
2063 * @buffer: host pointer as returned by address_space_map()
2064 * @len: buffer length as returned by address_space_map()
2065 * @access_len: amount of data actually transferred
2066 * @is_write: indicates the transfer direction
2067 */
2068 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2069 int is_write, hwaddr access_len);
2070
2071
2072 /* Internal functions, part of the implementation of address_space_read. */
2073 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2074 MemTxAttrs attrs, uint8_t *buf, hwaddr len);
2075 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2076 MemTxAttrs attrs, uint8_t *buf,
2077 hwaddr len, hwaddr addr1, hwaddr l,
2078 MemoryRegion *mr);
2079 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
2080
2081 /* Internal functions, part of the implementation of address_space_read_cached
2082 * and address_space_write_cached. */
2083 void address_space_read_cached_slow(MemoryRegionCache *cache,
2084 hwaddr addr, void *buf, hwaddr len);
2085 void address_space_write_cached_slow(MemoryRegionCache *cache,
2086 hwaddr addr, const void *buf, hwaddr len);
2087
2088 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
2089 {
2090 if (is_write) {
2091 return memory_region_is_ram(mr) &&
2092 !mr->readonly && !memory_region_is_ram_device(mr);
2093 } else {
2094 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
2095 memory_region_is_romd(mr);
2096 }
2097 }
2098
2099 /**
2100 * address_space_read: read from an address space.
2101 *
2102 * Return a MemTxResult indicating whether the operation succeeded
2103 * or failed (eg unassigned memory, device rejected the transaction,
2104 * IOMMU fault). Called within RCU critical section.
2105 *
2106 * @as: #AddressSpace to be accessed
2107 * @addr: address within that address space
2108 * @attrs: memory transaction attributes
2109 * @buf: buffer with the data transferred
2110 */
2111 static inline __attribute__((__always_inline__))
2112 MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
2113 MemTxAttrs attrs, uint8_t *buf,
2114 hwaddr len)
2115 {
2116 MemTxResult result = MEMTX_OK;
2117 hwaddr l, addr1;
2118 void *ptr;
2119 MemoryRegion *mr;
2120 FlatView *fv;
2121
2122 if (__builtin_constant_p(len)) {
2123 if (len) {
2124 rcu_read_lock();
2125 fv = address_space_to_flatview(as);
2126 l = len;
2127 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
2128 if (len == l && memory_access_is_direct(mr, false)) {
2129 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2130 memcpy(buf, ptr, len);
2131 } else {
2132 result = flatview_read_continue(fv, addr, attrs, buf, len,
2133 addr1, l, mr);
2134 }
2135 rcu_read_unlock();
2136 }
2137 } else {
2138 result = address_space_read_full(as, addr, attrs, buf, len);
2139 }
2140 return result;
2141 }
2142
2143 /**
2144 * address_space_read_cached: read from a cached RAM region
2145 *
2146 * @cache: Cached region to be addressed
2147 * @addr: address relative to the base of the RAM region
2148 * @buf: buffer with the data transferred
2149 * @len: length of the data transferred
2150 */
2151 static inline void
2152 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
2153 void *buf, hwaddr len)
2154 {
2155 assert(addr < cache->len && len <= cache->len - addr);
2156 if (likely(cache->ptr)) {
2157 memcpy(buf, cache->ptr + addr, len);
2158 } else {
2159 address_space_read_cached_slow(cache, addr, buf, len);
2160 }
2161 }
2162
2163 /**
2164 * address_space_write_cached: write to a cached RAM region
2165 *
2166 * @cache: Cached region to be addressed
2167 * @addr: address relative to the base of the RAM region
2168 * @buf: buffer with the data transferred
2169 * @len: length of the data transferred
2170 */
2171 static inline void
2172 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
2173 void *buf, hwaddr len)
2174 {
2175 assert(addr < cache->len && len <= cache->len - addr);
2176 if (likely(cache->ptr)) {
2177 memcpy(cache->ptr + addr, buf, len);
2178 } else {
2179 address_space_write_cached_slow(cache, addr, buf, len);
2180 }
2181 }
2182
2183 #endif
2184
2185 #endif