]> git.proxmox.com Git - mirror_qemu.git/blob - include/exec/memory.h
memory: Introduce memory listener hook log_clear()
[mirror_qemu.git] / include / exec / memory.h
1 /*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef MEMORY_H
15 #define MEMORY_H
16
17 #ifndef CONFIG_USER_ONLY
18
19 #include "exec/cpu-common.h"
20 #include "exec/hwaddr.h"
21 #include "exec/memattrs.h"
22 #include "exec/ramlist.h"
23 #include "qemu/queue.h"
24 #include "qemu/int128.h"
25 #include "qemu/notify.h"
26 #include "qom/object.h"
27 #include "qemu/rcu.h"
28 #include "hw/qdev-core.h"
29
30 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
31
32 #define MAX_PHYS_ADDR_SPACE_BITS 62
33 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
34
35 #define TYPE_MEMORY_REGION "qemu:memory-region"
36 #define MEMORY_REGION(obj) \
37 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
38
39 #define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region"
40 #define IOMMU_MEMORY_REGION(obj) \
41 OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_IOMMU_MEMORY_REGION)
42 #define IOMMU_MEMORY_REGION_CLASS(klass) \
43 OBJECT_CLASS_CHECK(IOMMUMemoryRegionClass, (klass), \
44 TYPE_IOMMU_MEMORY_REGION)
45 #define IOMMU_MEMORY_REGION_GET_CLASS(obj) \
46 OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \
47 TYPE_IOMMU_MEMORY_REGION)
48
49 extern bool global_dirty_log;
50
51 typedef struct MemoryRegionOps MemoryRegionOps;
52 typedef struct MemoryRegionMmio MemoryRegionMmio;
53
54 struct MemoryRegionMmio {
55 CPUReadMemoryFunc *read[3];
56 CPUWriteMemoryFunc *write[3];
57 };
58
59 typedef struct IOMMUTLBEntry IOMMUTLBEntry;
60
61 /* See address_space_translate: bit 0 is read, bit 1 is write. */
62 typedef enum {
63 IOMMU_NONE = 0,
64 IOMMU_RO = 1,
65 IOMMU_WO = 2,
66 IOMMU_RW = 3,
67 } IOMMUAccessFlags;
68
69 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
70
71 struct IOMMUTLBEntry {
72 AddressSpace *target_as;
73 hwaddr iova;
74 hwaddr translated_addr;
75 hwaddr addr_mask; /* 0xfff = 4k translation */
76 IOMMUAccessFlags perm;
77 };
78
79 /*
80 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
81 * register with one or multiple IOMMU Notifier capability bit(s).
82 */
83 typedef enum {
84 IOMMU_NOTIFIER_NONE = 0,
85 /* Notify cache invalidations */
86 IOMMU_NOTIFIER_UNMAP = 0x1,
87 /* Notify entry changes (newly created entries) */
88 IOMMU_NOTIFIER_MAP = 0x2,
89 } IOMMUNotifierFlag;
90
91 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
92
93 struct IOMMUNotifier;
94 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
95 IOMMUTLBEntry *data);
96
97 struct IOMMUNotifier {
98 IOMMUNotify notify;
99 IOMMUNotifierFlag notifier_flags;
100 /* Notify for address space range start <= addr <= end */
101 hwaddr start;
102 hwaddr end;
103 int iommu_idx;
104 QLIST_ENTRY(IOMMUNotifier) node;
105 };
106 typedef struct IOMMUNotifier IOMMUNotifier;
107
108 /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
109 #define RAM_PREALLOC (1 << 0)
110
111 /* RAM is mmap-ed with MAP_SHARED */
112 #define RAM_SHARED (1 << 1)
113
114 /* Only a portion of RAM (used_length) is actually used, and migrated.
115 * This used_length size can change across reboots.
116 */
117 #define RAM_RESIZEABLE (1 << 2)
118
119 /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically
120 * zero the page and wake waiting processes.
121 * (Set during postcopy)
122 */
123 #define RAM_UF_ZEROPAGE (1 << 3)
124
125 /* RAM can be migrated */
126 #define RAM_MIGRATABLE (1 << 4)
127
128 /* RAM is a persistent kind memory */
129 #define RAM_PMEM (1 << 5)
130
131 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
132 IOMMUNotifierFlag flags,
133 hwaddr start, hwaddr end,
134 int iommu_idx)
135 {
136 n->notify = fn;
137 n->notifier_flags = flags;
138 n->start = start;
139 n->end = end;
140 n->iommu_idx = iommu_idx;
141 }
142
143 /*
144 * Memory region callbacks
145 */
146 struct MemoryRegionOps {
147 /* Read from the memory region. @addr is relative to @mr; @size is
148 * in bytes. */
149 uint64_t (*read)(void *opaque,
150 hwaddr addr,
151 unsigned size);
152 /* Write to the memory region. @addr is relative to @mr; @size is
153 * in bytes. */
154 void (*write)(void *opaque,
155 hwaddr addr,
156 uint64_t data,
157 unsigned size);
158
159 MemTxResult (*read_with_attrs)(void *opaque,
160 hwaddr addr,
161 uint64_t *data,
162 unsigned size,
163 MemTxAttrs attrs);
164 MemTxResult (*write_with_attrs)(void *opaque,
165 hwaddr addr,
166 uint64_t data,
167 unsigned size,
168 MemTxAttrs attrs);
169
170 enum device_endian endianness;
171 /* Guest-visible constraints: */
172 struct {
173 /* If nonzero, specify bounds on access sizes beyond which a machine
174 * check is thrown.
175 */
176 unsigned min_access_size;
177 unsigned max_access_size;
178 /* If true, unaligned accesses are supported. Otherwise unaligned
179 * accesses throw machine checks.
180 */
181 bool unaligned;
182 /*
183 * If present, and returns #false, the transaction is not accepted
184 * by the device (and results in machine dependent behaviour such
185 * as a machine check exception).
186 */
187 bool (*accepts)(void *opaque, hwaddr addr,
188 unsigned size, bool is_write,
189 MemTxAttrs attrs);
190 } valid;
191 /* Internal implementation constraints: */
192 struct {
193 /* If nonzero, specifies the minimum size implemented. Smaller sizes
194 * will be rounded upwards and a partial result will be returned.
195 */
196 unsigned min_access_size;
197 /* If nonzero, specifies the maximum size implemented. Larger sizes
198 * will be done as a series of accesses with smaller sizes.
199 */
200 unsigned max_access_size;
201 /* If true, unaligned accesses are supported. Otherwise all accesses
202 * are converted to (possibly multiple) naturally aligned accesses.
203 */
204 bool unaligned;
205 } impl;
206 };
207
208 enum IOMMUMemoryRegionAttr {
209 IOMMU_ATTR_SPAPR_TCE_FD
210 };
211
212 /**
213 * IOMMUMemoryRegionClass:
214 *
215 * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION
216 * and provide an implementation of at least the @translate method here
217 * to handle requests to the memory region. Other methods are optional.
218 *
219 * The IOMMU implementation must use the IOMMU notifier infrastructure
220 * to report whenever mappings are changed, by calling
221 * memory_region_notify_iommu() (or, if necessary, by calling
222 * memory_region_notify_one() for each registered notifier).
223 *
224 * Conceptually an IOMMU provides a mapping from input address
225 * to an output TLB entry. If the IOMMU is aware of memory transaction
226 * attributes and the output TLB entry depends on the transaction
227 * attributes, we represent this using IOMMU indexes. Each index
228 * selects a particular translation table that the IOMMU has:
229 * @attrs_to_index returns the IOMMU index for a set of transaction attributes
230 * @translate takes an input address and an IOMMU index
231 * and the mapping returned can only depend on the input address and the
232 * IOMMU index.
233 *
234 * Most IOMMUs don't care about the transaction attributes and support
235 * only a single IOMMU index. A more complex IOMMU might have one index
236 * for secure transactions and one for non-secure transactions.
237 */
238 typedef struct IOMMUMemoryRegionClass {
239 /* private */
240 struct DeviceClass parent_class;
241
242 /*
243 * Return a TLB entry that contains a given address.
244 *
245 * The IOMMUAccessFlags indicated via @flag are optional and may
246 * be specified as IOMMU_NONE to indicate that the caller needs
247 * the full translation information for both reads and writes. If
248 * the access flags are specified then the IOMMU implementation
249 * may use this as an optimization, to stop doing a page table
250 * walk as soon as it knows that the requested permissions are not
251 * allowed. If IOMMU_NONE is passed then the IOMMU must do the
252 * full page table walk and report the permissions in the returned
253 * IOMMUTLBEntry. (Note that this implies that an IOMMU may not
254 * return different mappings for reads and writes.)
255 *
256 * The returned information remains valid while the caller is
257 * holding the big QEMU lock or is inside an RCU critical section;
258 * if the caller wishes to cache the mapping beyond that it must
259 * register an IOMMU notifier so it can invalidate its cached
260 * information when the IOMMU mapping changes.
261 *
262 * @iommu: the IOMMUMemoryRegion
263 * @hwaddr: address to be translated within the memory region
264 * @flag: requested access permissions
265 * @iommu_idx: IOMMU index for the translation
266 */
267 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
268 IOMMUAccessFlags flag, int iommu_idx);
269 /* Returns minimum supported page size in bytes.
270 * If this method is not provided then the minimum is assumed to
271 * be TARGET_PAGE_SIZE.
272 *
273 * @iommu: the IOMMUMemoryRegion
274 */
275 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
276 /* Called when IOMMU Notifier flag changes (ie when the set of
277 * events which IOMMU users are requesting notification for changes).
278 * Optional method -- need not be provided if the IOMMU does not
279 * need to know exactly which events must be notified.
280 *
281 * @iommu: the IOMMUMemoryRegion
282 * @old_flags: events which previously needed to be notified
283 * @new_flags: events which now need to be notified
284 */
285 void (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
286 IOMMUNotifierFlag old_flags,
287 IOMMUNotifierFlag new_flags);
288 /* Called to handle memory_region_iommu_replay().
289 *
290 * The default implementation of memory_region_iommu_replay() is to
291 * call the IOMMU translate method for every page in the address space
292 * with flag == IOMMU_NONE and then call the notifier if translate
293 * returns a valid mapping. If this method is implemented then it
294 * overrides the default behaviour, and must provide the full semantics
295 * of memory_region_iommu_replay(), by calling @notifier for every
296 * translation present in the IOMMU.
297 *
298 * Optional method -- an IOMMU only needs to provide this method
299 * if the default is inefficient or produces undesirable side effects.
300 *
301 * Note: this is not related to record-and-replay functionality.
302 */
303 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
304
305 /* Get IOMMU misc attributes. This is an optional method that
306 * can be used to allow users of the IOMMU to get implementation-specific
307 * information. The IOMMU implements this method to handle calls
308 * by IOMMU users to memory_region_iommu_get_attr() by filling in
309 * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that
310 * the IOMMU supports. If the method is unimplemented then
311 * memory_region_iommu_get_attr() will always return -EINVAL.
312 *
313 * @iommu: the IOMMUMemoryRegion
314 * @attr: attribute being queried
315 * @data: memory to fill in with the attribute data
316 *
317 * Returns 0 on success, or a negative errno; in particular
318 * returns -EINVAL for unrecognized or unimplemented attribute types.
319 */
320 int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr,
321 void *data);
322
323 /* Return the IOMMU index to use for a given set of transaction attributes.
324 *
325 * Optional method: if an IOMMU only supports a single IOMMU index then
326 * the default implementation of memory_region_iommu_attrs_to_index()
327 * will return 0.
328 *
329 * The indexes supported by an IOMMU must be contiguous, starting at 0.
330 *
331 * @iommu: the IOMMUMemoryRegion
332 * @attrs: memory transaction attributes
333 */
334 int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs);
335
336 /* Return the number of IOMMU indexes this IOMMU supports.
337 *
338 * Optional method: if this method is not provided, then
339 * memory_region_iommu_num_indexes() will return 1, indicating that
340 * only a single IOMMU index is supported.
341 *
342 * @iommu: the IOMMUMemoryRegion
343 */
344 int (*num_indexes)(IOMMUMemoryRegion *iommu);
345 } IOMMUMemoryRegionClass;
346
347 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
348 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
349
350 struct MemoryRegion {
351 Object parent_obj;
352
353 /* All fields are private - violators will be prosecuted */
354
355 /* The following fields should fit in a cache line */
356 bool romd_mode;
357 bool ram;
358 bool subpage;
359 bool readonly; /* For RAM regions */
360 bool nonvolatile;
361 bool rom_device;
362 bool flush_coalesced_mmio;
363 bool global_locking;
364 uint8_t dirty_log_mask;
365 bool is_iommu;
366 RAMBlock *ram_block;
367 Object *owner;
368
369 const MemoryRegionOps *ops;
370 void *opaque;
371 MemoryRegion *container;
372 Int128 size;
373 hwaddr addr;
374 void (*destructor)(MemoryRegion *mr);
375 uint64_t align;
376 bool terminates;
377 bool ram_device;
378 bool enabled;
379 bool warning_printed; /* For reservations */
380 uint8_t vga_logging_count;
381 MemoryRegion *alias;
382 hwaddr alias_offset;
383 int32_t priority;
384 QTAILQ_HEAD(, MemoryRegion) subregions;
385 QTAILQ_ENTRY(MemoryRegion) subregions_link;
386 QTAILQ_HEAD(, CoalescedMemoryRange) coalesced;
387 const char *name;
388 unsigned ioeventfd_nb;
389 MemoryRegionIoeventfd *ioeventfds;
390 };
391
392 struct IOMMUMemoryRegion {
393 MemoryRegion parent_obj;
394
395 QLIST_HEAD(, IOMMUNotifier) iommu_notify;
396 IOMMUNotifierFlag iommu_notify_flags;
397 };
398
399 #define IOMMU_NOTIFIER_FOREACH(n, mr) \
400 QLIST_FOREACH((n), &(mr)->iommu_notify, node)
401
402 /**
403 * MemoryListener: callbacks structure for updates to the physical memory map
404 *
405 * Allows a component to adjust to changes in the guest-visible memory map.
406 * Use with memory_listener_register() and memory_listener_unregister().
407 */
408 struct MemoryListener {
409 void (*begin)(MemoryListener *listener);
410 void (*commit)(MemoryListener *listener);
411 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
412 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
413 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
414 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
415 int old, int new);
416 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
417 int old, int new);
418 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
419 void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
420 void (*log_global_start)(MemoryListener *listener);
421 void (*log_global_stop)(MemoryListener *listener);
422 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
423 bool match_data, uint64_t data, EventNotifier *e);
424 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
425 bool match_data, uint64_t data, EventNotifier *e);
426 void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section,
427 hwaddr addr, hwaddr len);
428 void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section,
429 hwaddr addr, hwaddr len);
430 /* Lower = earlier (during add), later (during del) */
431 unsigned priority;
432 AddressSpace *address_space;
433 QTAILQ_ENTRY(MemoryListener) link;
434 QTAILQ_ENTRY(MemoryListener) link_as;
435 };
436
437 /**
438 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
439 */
440 struct AddressSpace {
441 /* All fields are private. */
442 struct rcu_head rcu;
443 char *name;
444 MemoryRegion *root;
445
446 /* Accessed via RCU. */
447 struct FlatView *current_map;
448
449 int ioeventfd_nb;
450 struct MemoryRegionIoeventfd *ioeventfds;
451 QTAILQ_HEAD(, MemoryListener) listeners;
452 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
453 };
454
455 typedef struct AddressSpaceDispatch AddressSpaceDispatch;
456 typedef struct FlatRange FlatRange;
457
458 /* Flattened global view of current active memory hierarchy. Kept in sorted
459 * order.
460 */
461 struct FlatView {
462 struct rcu_head rcu;
463 unsigned ref;
464 FlatRange *ranges;
465 unsigned nr;
466 unsigned nr_allocated;
467 struct AddressSpaceDispatch *dispatch;
468 MemoryRegion *root;
469 };
470
471 static inline FlatView *address_space_to_flatview(AddressSpace *as)
472 {
473 return atomic_rcu_read(&as->current_map);
474 }
475
476
477 /**
478 * MemoryRegionSection: describes a fragment of a #MemoryRegion
479 *
480 * @mr: the region, or %NULL if empty
481 * @fv: the flat view of the address space the region is mapped in
482 * @offset_within_region: the beginning of the section, relative to @mr's start
483 * @size: the size of the section; will not exceed @mr's boundaries
484 * @offset_within_address_space: the address of the first byte of the section
485 * relative to the region's address space
486 * @readonly: writes to this section are ignored
487 * @nonvolatile: this section is non-volatile
488 */
489 struct MemoryRegionSection {
490 MemoryRegion *mr;
491 FlatView *fv;
492 hwaddr offset_within_region;
493 Int128 size;
494 hwaddr offset_within_address_space;
495 bool readonly;
496 bool nonvolatile;
497 };
498
499 /**
500 * memory_region_init: Initialize a memory region
501 *
502 * The region typically acts as a container for other memory regions. Use
503 * memory_region_add_subregion() to add subregions.
504 *
505 * @mr: the #MemoryRegion to be initialized
506 * @owner: the object that tracks the region's reference count
507 * @name: used for debugging; not visible to the user or ABI
508 * @size: size of the region; any subregions beyond this size will be clipped
509 */
510 void memory_region_init(MemoryRegion *mr,
511 struct Object *owner,
512 const char *name,
513 uint64_t size);
514
515 /**
516 * memory_region_ref: Add 1 to a memory region's reference count
517 *
518 * Whenever memory regions are accessed outside the BQL, they need to be
519 * preserved against hot-unplug. MemoryRegions actually do not have their
520 * own reference count; they piggyback on a QOM object, their "owner".
521 * This function adds a reference to the owner.
522 *
523 * All MemoryRegions must have an owner if they can disappear, even if the
524 * device they belong to operates exclusively under the BQL. This is because
525 * the region could be returned at any time by memory_region_find, and this
526 * is usually under guest control.
527 *
528 * @mr: the #MemoryRegion
529 */
530 void memory_region_ref(MemoryRegion *mr);
531
532 /**
533 * memory_region_unref: Remove 1 to a memory region's reference count
534 *
535 * Whenever memory regions are accessed outside the BQL, they need to be
536 * preserved against hot-unplug. MemoryRegions actually do not have their
537 * own reference count; they piggyback on a QOM object, their "owner".
538 * This function removes a reference to the owner and possibly destroys it.
539 *
540 * @mr: the #MemoryRegion
541 */
542 void memory_region_unref(MemoryRegion *mr);
543
544 /**
545 * memory_region_init_io: Initialize an I/O memory region.
546 *
547 * Accesses into the region will cause the callbacks in @ops to be called.
548 * if @size is nonzero, subregions will be clipped to @size.
549 *
550 * @mr: the #MemoryRegion to be initialized.
551 * @owner: the object that tracks the region's reference count
552 * @ops: a structure containing read and write callbacks to be used when
553 * I/O is performed on the region.
554 * @opaque: passed to the read and write callbacks of the @ops structure.
555 * @name: used for debugging; not visible to the user or ABI
556 * @size: size of the region.
557 */
558 void memory_region_init_io(MemoryRegion *mr,
559 struct Object *owner,
560 const MemoryRegionOps *ops,
561 void *opaque,
562 const char *name,
563 uint64_t size);
564
565 /**
566 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
567 * into the region will modify memory
568 * directly.
569 *
570 * @mr: the #MemoryRegion to be initialized.
571 * @owner: the object that tracks the region's reference count
572 * @name: Region name, becomes part of RAMBlock name used in migration stream
573 * must be unique within any device
574 * @size: size of the region.
575 * @errp: pointer to Error*, to store an error if it happens.
576 *
577 * Note that this function does not do anything to cause the data in the
578 * RAM memory region to be migrated; that is the responsibility of the caller.
579 */
580 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
581 struct Object *owner,
582 const char *name,
583 uint64_t size,
584 Error **errp);
585
586 /**
587 * memory_region_init_ram_shared_nomigrate: Initialize RAM memory region.
588 * Accesses into the region will
589 * modify memory directly.
590 *
591 * @mr: the #MemoryRegion to be initialized.
592 * @owner: the object that tracks the region's reference count
593 * @name: Region name, becomes part of RAMBlock name used in migration stream
594 * must be unique within any device
595 * @size: size of the region.
596 * @share: allow remapping RAM to different addresses
597 * @errp: pointer to Error*, to store an error if it happens.
598 *
599 * Note that this function is similar to memory_region_init_ram_nomigrate.
600 * The only difference is part of the RAM region can be remapped.
601 */
602 void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr,
603 struct Object *owner,
604 const char *name,
605 uint64_t size,
606 bool share,
607 Error **errp);
608
609 /**
610 * memory_region_init_resizeable_ram: Initialize memory region with resizeable
611 * RAM. Accesses into the region will
612 * modify memory directly. Only an initial
613 * portion of this RAM is actually used.
614 * The used size can change across reboots.
615 *
616 * @mr: the #MemoryRegion to be initialized.
617 * @owner: the object that tracks the region's reference count
618 * @name: Region name, becomes part of RAMBlock name used in migration stream
619 * must be unique within any device
620 * @size: used size of the region.
621 * @max_size: max size of the region.
622 * @resized: callback to notify owner about used size change.
623 * @errp: pointer to Error*, to store an error if it happens.
624 *
625 * Note that this function does not do anything to cause the data in the
626 * RAM memory region to be migrated; that is the responsibility of the caller.
627 */
628 void memory_region_init_resizeable_ram(MemoryRegion *mr,
629 struct Object *owner,
630 const char *name,
631 uint64_t size,
632 uint64_t max_size,
633 void (*resized)(const char*,
634 uint64_t length,
635 void *host),
636 Error **errp);
637 #ifdef CONFIG_POSIX
638
639 /**
640 * memory_region_init_ram_from_file: Initialize RAM memory region with a
641 * mmap-ed backend.
642 *
643 * @mr: the #MemoryRegion to be initialized.
644 * @owner: the object that tracks the region's reference count
645 * @name: Region name, becomes part of RAMBlock name used in migration stream
646 * must be unique within any device
647 * @size: size of the region.
648 * @align: alignment of the region base address; if 0, the default alignment
649 * (getpagesize()) will be used.
650 * @ram_flags: Memory region features:
651 * - RAM_SHARED: memory must be mmaped with the MAP_SHARED flag
652 * - RAM_PMEM: the memory is persistent memory
653 * Other bits are ignored now.
654 * @path: the path in which to allocate the RAM.
655 * @errp: pointer to Error*, to store an error if it happens.
656 *
657 * Note that this function does not do anything to cause the data in the
658 * RAM memory region to be migrated; that is the responsibility of the caller.
659 */
660 void memory_region_init_ram_from_file(MemoryRegion *mr,
661 struct Object *owner,
662 const char *name,
663 uint64_t size,
664 uint64_t align,
665 uint32_t ram_flags,
666 const char *path,
667 Error **errp);
668
669 /**
670 * memory_region_init_ram_from_fd: Initialize RAM memory region with a
671 * mmap-ed backend.
672 *
673 * @mr: the #MemoryRegion to be initialized.
674 * @owner: the object that tracks the region's reference count
675 * @name: the name of the region.
676 * @size: size of the region.
677 * @share: %true if memory must be mmaped with the MAP_SHARED flag
678 * @fd: the fd to mmap.
679 * @errp: pointer to Error*, to store an error if it happens.
680 *
681 * Note that this function does not do anything to cause the data in the
682 * RAM memory region to be migrated; that is the responsibility of the caller.
683 */
684 void memory_region_init_ram_from_fd(MemoryRegion *mr,
685 struct Object *owner,
686 const char *name,
687 uint64_t size,
688 bool share,
689 int fd,
690 Error **errp);
691 #endif
692
693 /**
694 * memory_region_init_ram_ptr: Initialize RAM memory region from a
695 * user-provided pointer. Accesses into the
696 * region will modify memory directly.
697 *
698 * @mr: the #MemoryRegion to be initialized.
699 * @owner: the object that tracks the region's reference count
700 * @name: Region name, becomes part of RAMBlock name used in migration stream
701 * must be unique within any device
702 * @size: size of the region.
703 * @ptr: memory to be mapped; must contain at least @size bytes.
704 *
705 * Note that this function does not do anything to cause the data in the
706 * RAM memory region to be migrated; that is the responsibility of the caller.
707 */
708 void memory_region_init_ram_ptr(MemoryRegion *mr,
709 struct Object *owner,
710 const char *name,
711 uint64_t size,
712 void *ptr);
713
714 /**
715 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
716 * a user-provided pointer.
717 *
718 * A RAM device represents a mapping to a physical device, such as to a PCI
719 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
720 * into the VM address space and access to the region will modify memory
721 * directly. However, the memory region should not be included in a memory
722 * dump (device may not be enabled/mapped at the time of the dump), and
723 * operations incompatible with manipulating MMIO should be avoided. Replaces
724 * skip_dump flag.
725 *
726 * @mr: the #MemoryRegion to be initialized.
727 * @owner: the object that tracks the region's reference count
728 * @name: the name of the region.
729 * @size: size of the region.
730 * @ptr: memory to be mapped; must contain at least @size bytes.
731 *
732 * Note that this function does not do anything to cause the data in the
733 * RAM memory region to be migrated; that is the responsibility of the caller.
734 * (For RAM device memory regions, migrating the contents rarely makes sense.)
735 */
736 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
737 struct Object *owner,
738 const char *name,
739 uint64_t size,
740 void *ptr);
741
742 /**
743 * memory_region_init_alias: Initialize a memory region that aliases all or a
744 * part of another memory region.
745 *
746 * @mr: the #MemoryRegion to be initialized.
747 * @owner: the object that tracks the region's reference count
748 * @name: used for debugging; not visible to the user or ABI
749 * @orig: the region to be referenced; @mr will be equivalent to
750 * @orig between @offset and @offset + @size - 1.
751 * @offset: start of the section in @orig to be referenced.
752 * @size: size of the region.
753 */
754 void memory_region_init_alias(MemoryRegion *mr,
755 struct Object *owner,
756 const char *name,
757 MemoryRegion *orig,
758 hwaddr offset,
759 uint64_t size);
760
761 /**
762 * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
763 *
764 * This has the same effect as calling memory_region_init_ram_nomigrate()
765 * and then marking the resulting region read-only with
766 * memory_region_set_readonly().
767 *
768 * Note that this function does not do anything to cause the data in the
769 * RAM side of the memory region to be migrated; that is the responsibility
770 * of the caller.
771 *
772 * @mr: the #MemoryRegion to be initialized.
773 * @owner: the object that tracks the region's reference count
774 * @name: Region name, becomes part of RAMBlock name used in migration stream
775 * must be unique within any device
776 * @size: size of the region.
777 * @errp: pointer to Error*, to store an error if it happens.
778 */
779 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
780 struct Object *owner,
781 const char *name,
782 uint64_t size,
783 Error **errp);
784
785 /**
786 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
787 * Writes are handled via callbacks.
788 *
789 * Note that this function does not do anything to cause the data in the
790 * RAM side of the memory region to be migrated; that is the responsibility
791 * of the caller.
792 *
793 * @mr: the #MemoryRegion to be initialized.
794 * @owner: the object that tracks the region's reference count
795 * @ops: callbacks for write access handling (must not be NULL).
796 * @opaque: passed to the read and write callbacks of the @ops structure.
797 * @name: Region name, becomes part of RAMBlock name used in migration stream
798 * must be unique within any device
799 * @size: size of the region.
800 * @errp: pointer to Error*, to store an error if it happens.
801 */
802 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
803 struct Object *owner,
804 const MemoryRegionOps *ops,
805 void *opaque,
806 const char *name,
807 uint64_t size,
808 Error **errp);
809
810 /**
811 * memory_region_init_iommu: Initialize a memory region of a custom type
812 * that translates addresses
813 *
814 * An IOMMU region translates addresses and forwards accesses to a target
815 * memory region.
816 *
817 * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION.
818 * @_iommu_mr should be a pointer to enough memory for an instance of
819 * that subclass, @instance_size is the size of that subclass, and
820 * @mrtypename is its name. This function will initialize @_iommu_mr as an
821 * instance of the subclass, and its methods will then be called to handle
822 * accesses to the memory region. See the documentation of
823 * #IOMMUMemoryRegionClass for further details.
824 *
825 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
826 * @instance_size: the IOMMUMemoryRegion subclass instance size
827 * @mrtypename: the type name of the #IOMMUMemoryRegion
828 * @owner: the object that tracks the region's reference count
829 * @name: used for debugging; not visible to the user or ABI
830 * @size: size of the region.
831 */
832 void memory_region_init_iommu(void *_iommu_mr,
833 size_t instance_size,
834 const char *mrtypename,
835 Object *owner,
836 const char *name,
837 uint64_t size);
838
839 /**
840 * memory_region_init_ram - Initialize RAM memory region. Accesses into the
841 * region will modify memory directly.
842 *
843 * @mr: the #MemoryRegion to be initialized
844 * @owner: the object that tracks the region's reference count (must be
845 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
846 * @name: name of the memory region
847 * @size: size of the region in bytes
848 * @errp: pointer to Error*, to store an error if it happens.
849 *
850 * This function allocates RAM for a board model or device, and
851 * arranges for it to be migrated (by calling vmstate_register_ram()
852 * if @owner is a DeviceState, or vmstate_register_ram_global() if
853 * @owner is NULL).
854 *
855 * TODO: Currently we restrict @owner to being either NULL (for
856 * global RAM regions with no owner) or devices, so that we can
857 * give the RAM block a unique name for migration purposes.
858 * We should lift this restriction and allow arbitrary Objects.
859 * If you pass a non-NULL non-device @owner then we will assert.
860 */
861 void memory_region_init_ram(MemoryRegion *mr,
862 struct Object *owner,
863 const char *name,
864 uint64_t size,
865 Error **errp);
866
867 /**
868 * memory_region_init_rom: Initialize a ROM memory region.
869 *
870 * This has the same effect as calling memory_region_init_ram()
871 * and then marking the resulting region read-only with
872 * memory_region_set_readonly(). This includes arranging for the
873 * contents to be migrated.
874 *
875 * TODO: Currently we restrict @owner to being either NULL (for
876 * global RAM regions with no owner) or devices, so that we can
877 * give the RAM block a unique name for migration purposes.
878 * We should lift this restriction and allow arbitrary Objects.
879 * If you pass a non-NULL non-device @owner then we will assert.
880 *
881 * @mr: the #MemoryRegion to be initialized.
882 * @owner: the object that tracks the region's reference count
883 * @name: Region name, becomes part of RAMBlock name used in migration stream
884 * must be unique within any device
885 * @size: size of the region.
886 * @errp: pointer to Error*, to store an error if it happens.
887 */
888 void memory_region_init_rom(MemoryRegion *mr,
889 struct Object *owner,
890 const char *name,
891 uint64_t size,
892 Error **errp);
893
894 /**
895 * memory_region_init_rom_device: Initialize a ROM memory region.
896 * Writes are handled via callbacks.
897 *
898 * This function initializes a memory region backed by RAM for reads
899 * and callbacks for writes, and arranges for the RAM backing to
900 * be migrated (by calling vmstate_register_ram()
901 * if @owner is a DeviceState, or vmstate_register_ram_global() if
902 * @owner is NULL).
903 *
904 * TODO: Currently we restrict @owner to being either NULL (for
905 * global RAM regions with no owner) or devices, so that we can
906 * give the RAM block a unique name for migration purposes.
907 * We should lift this restriction and allow arbitrary Objects.
908 * If you pass a non-NULL non-device @owner then we will assert.
909 *
910 * @mr: the #MemoryRegion to be initialized.
911 * @owner: the object that tracks the region's reference count
912 * @ops: callbacks for write access handling (must not be NULL).
913 * @name: Region name, becomes part of RAMBlock name used in migration stream
914 * must be unique within any device
915 * @size: size of the region.
916 * @errp: pointer to Error*, to store an error if it happens.
917 */
918 void memory_region_init_rom_device(MemoryRegion *mr,
919 struct Object *owner,
920 const MemoryRegionOps *ops,
921 void *opaque,
922 const char *name,
923 uint64_t size,
924 Error **errp);
925
926
927 /**
928 * memory_region_owner: get a memory region's owner.
929 *
930 * @mr: the memory region being queried.
931 */
932 struct Object *memory_region_owner(MemoryRegion *mr);
933
934 /**
935 * memory_region_size: get a memory region's size.
936 *
937 * @mr: the memory region being queried.
938 */
939 uint64_t memory_region_size(MemoryRegion *mr);
940
941 /**
942 * memory_region_is_ram: check whether a memory region is random access
943 *
944 * Returns %true if a memory region is random access.
945 *
946 * @mr: the memory region being queried
947 */
948 static inline bool memory_region_is_ram(MemoryRegion *mr)
949 {
950 return mr->ram;
951 }
952
953 /**
954 * memory_region_is_ram_device: check whether a memory region is a ram device
955 *
956 * Returns %true if a memory region is a device backed ram region
957 *
958 * @mr: the memory region being queried
959 */
960 bool memory_region_is_ram_device(MemoryRegion *mr);
961
962 /**
963 * memory_region_is_romd: check whether a memory region is in ROMD mode
964 *
965 * Returns %true if a memory region is a ROM device and currently set to allow
966 * direct reads.
967 *
968 * @mr: the memory region being queried
969 */
970 static inline bool memory_region_is_romd(MemoryRegion *mr)
971 {
972 return mr->rom_device && mr->romd_mode;
973 }
974
975 /**
976 * memory_region_get_iommu: check whether a memory region is an iommu
977 *
978 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
979 * otherwise NULL.
980 *
981 * @mr: the memory region being queried
982 */
983 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
984 {
985 if (mr->alias) {
986 return memory_region_get_iommu(mr->alias);
987 }
988 if (mr->is_iommu) {
989 return (IOMMUMemoryRegion *) mr;
990 }
991 return NULL;
992 }
993
994 /**
995 * memory_region_get_iommu_class_nocheck: returns iommu memory region class
996 * if an iommu or NULL if not
997 *
998 * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu,
999 * otherwise NULL. This is fast path avoiding QOM checking, use with caution.
1000 *
1001 * @mr: the memory region being queried
1002 */
1003 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
1004 IOMMUMemoryRegion *iommu_mr)
1005 {
1006 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
1007 }
1008
1009 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
1010
1011 /**
1012 * memory_region_iommu_get_min_page_size: get minimum supported page size
1013 * for an iommu
1014 *
1015 * Returns minimum supported page size for an iommu.
1016 *
1017 * @iommu_mr: the memory region being queried
1018 */
1019 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
1020
1021 /**
1022 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
1023 *
1024 * The notification type will be decided by entry.perm bits:
1025 *
1026 * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE.
1027 * - For MAP (newly added entry) notifies: set entry.perm to the
1028 * permission of the page (which is definitely !IOMMU_NONE).
1029 *
1030 * Note: for any IOMMU implementation, an in-place mapping change
1031 * should be notified with an UNMAP followed by a MAP.
1032 *
1033 * @iommu_mr: the memory region that was changed
1034 * @iommu_idx: the IOMMU index for the translation table which has changed
1035 * @entry: the new entry in the IOMMU translation table. The entry
1036 * replaces all old entries for the same virtual I/O address range.
1037 * Deleted entries have .@perm == 0.
1038 */
1039 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
1040 int iommu_idx,
1041 IOMMUTLBEntry entry);
1042
1043 /**
1044 * memory_region_notify_one: notify a change in an IOMMU translation
1045 * entry to a single notifier
1046 *
1047 * This works just like memory_region_notify_iommu(), but it only
1048 * notifies a specific notifier, not all of them.
1049 *
1050 * @notifier: the notifier to be notified
1051 * @entry: the new entry in the IOMMU translation table. The entry
1052 * replaces all old entries for the same virtual I/O address range.
1053 * Deleted entries have .@perm == 0.
1054 */
1055 void memory_region_notify_one(IOMMUNotifier *notifier,
1056 IOMMUTLBEntry *entry);
1057
1058 /**
1059 * memory_region_register_iommu_notifier: register a notifier for changes to
1060 * IOMMU translation entries.
1061 *
1062 * @mr: the memory region to observe
1063 * @n: the IOMMUNotifier to be added; the notify callback receives a
1064 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
1065 * ceases to be valid on exit from the notifier.
1066 */
1067 void memory_region_register_iommu_notifier(MemoryRegion *mr,
1068 IOMMUNotifier *n);
1069
1070 /**
1071 * memory_region_iommu_replay: replay existing IOMMU translations to
1072 * a notifier with the minimum page granularity returned by
1073 * mr->iommu_ops->get_page_size().
1074 *
1075 * Note: this is not related to record-and-replay functionality.
1076 *
1077 * @iommu_mr: the memory region to observe
1078 * @n: the notifier to which to replay iommu mappings
1079 */
1080 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
1081
1082 /**
1083 * memory_region_iommu_replay_all: replay existing IOMMU translations
1084 * to all the notifiers registered.
1085 *
1086 * Note: this is not related to record-and-replay functionality.
1087 *
1088 * @iommu_mr: the memory region to observe
1089 */
1090 void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr);
1091
1092 /**
1093 * memory_region_unregister_iommu_notifier: unregister a notifier for
1094 * changes to IOMMU translation entries.
1095 *
1096 * @mr: the memory region which was observed and for which notity_stopped()
1097 * needs to be called
1098 * @n: the notifier to be removed.
1099 */
1100 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
1101 IOMMUNotifier *n);
1102
1103 /**
1104 * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is
1105 * defined on the IOMMU.
1106 *
1107 * Returns 0 on success, or a negative errno otherwise. In particular,
1108 * -EINVAL indicates that the IOMMU does not support the requested
1109 * attribute.
1110 *
1111 * @iommu_mr: the memory region
1112 * @attr: the requested attribute
1113 * @data: a pointer to the requested attribute data
1114 */
1115 int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr,
1116 enum IOMMUMemoryRegionAttr attr,
1117 void *data);
1118
1119 /**
1120 * memory_region_iommu_attrs_to_index: return the IOMMU index to
1121 * use for translations with the given memory transaction attributes.
1122 *
1123 * @iommu_mr: the memory region
1124 * @attrs: the memory transaction attributes
1125 */
1126 int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr,
1127 MemTxAttrs attrs);
1128
1129 /**
1130 * memory_region_iommu_num_indexes: return the total number of IOMMU
1131 * indexes that this IOMMU supports.
1132 *
1133 * @iommu_mr: the memory region
1134 */
1135 int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr);
1136
1137 /**
1138 * memory_region_name: get a memory region's name
1139 *
1140 * Returns the string that was used to initialize the memory region.
1141 *
1142 * @mr: the memory region being queried
1143 */
1144 const char *memory_region_name(const MemoryRegion *mr);
1145
1146 /**
1147 * memory_region_is_logging: return whether a memory region is logging writes
1148 *
1149 * Returns %true if the memory region is logging writes for the given client
1150 *
1151 * @mr: the memory region being queried
1152 * @client: the client being queried
1153 */
1154 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
1155
1156 /**
1157 * memory_region_get_dirty_log_mask: return the clients for which a
1158 * memory region is logging writes.
1159 *
1160 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
1161 * are the bit indices.
1162 *
1163 * @mr: the memory region being queried
1164 */
1165 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
1166
1167 /**
1168 * memory_region_is_rom: check whether a memory region is ROM
1169 *
1170 * Returns %true if a memory region is read-only memory.
1171 *
1172 * @mr: the memory region being queried
1173 */
1174 static inline bool memory_region_is_rom(MemoryRegion *mr)
1175 {
1176 return mr->ram && mr->readonly;
1177 }
1178
1179 /**
1180 * memory_region_is_nonvolatile: check whether a memory region is non-volatile
1181 *
1182 * Returns %true is a memory region is non-volatile memory.
1183 *
1184 * @mr: the memory region being queried
1185 */
1186 static inline bool memory_region_is_nonvolatile(MemoryRegion *mr)
1187 {
1188 return mr->nonvolatile;
1189 }
1190
1191 /**
1192 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
1193 *
1194 * Returns a file descriptor backing a file-based RAM memory region,
1195 * or -1 if the region is not a file-based RAM memory region.
1196 *
1197 * @mr: the RAM or alias memory region being queried.
1198 */
1199 int memory_region_get_fd(MemoryRegion *mr);
1200
1201 /**
1202 * memory_region_from_host: Convert a pointer into a RAM memory region
1203 * and an offset within it.
1204 *
1205 * Given a host pointer inside a RAM memory region (created with
1206 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
1207 * the MemoryRegion and the offset within it.
1208 *
1209 * Use with care; by the time this function returns, the returned pointer is
1210 * not protected by RCU anymore. If the caller is not within an RCU critical
1211 * section and does not hold the iothread lock, it must have other means of
1212 * protecting the pointer, such as a reference to the region that includes
1213 * the incoming ram_addr_t.
1214 *
1215 * @ptr: the host pointer to be converted
1216 * @offset: the offset within memory region
1217 */
1218 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
1219
1220 /**
1221 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
1222 *
1223 * Returns a host pointer to a RAM memory region (created with
1224 * memory_region_init_ram() or memory_region_init_ram_ptr()).
1225 *
1226 * Use with care; by the time this function returns, the returned pointer is
1227 * not protected by RCU anymore. If the caller is not within an RCU critical
1228 * section and does not hold the iothread lock, it must have other means of
1229 * protecting the pointer, such as a reference to the region that includes
1230 * the incoming ram_addr_t.
1231 *
1232 * @mr: the memory region being queried.
1233 */
1234 void *memory_region_get_ram_ptr(MemoryRegion *mr);
1235
1236 /* memory_region_ram_resize: Resize a RAM region.
1237 *
1238 * Only legal before guest might have detected the memory size: e.g. on
1239 * incoming migration, or right after reset.
1240 *
1241 * @mr: a memory region created with @memory_region_init_resizeable_ram.
1242 * @newsize: the new size the region
1243 * @errp: pointer to Error*, to store an error if it happens.
1244 */
1245 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
1246 Error **errp);
1247
1248 /**
1249 * memory_region_set_log: Turn dirty logging on or off for a region.
1250 *
1251 * Turns dirty logging on or off for a specified client (display, migration).
1252 * Only meaningful for RAM regions.
1253 *
1254 * @mr: the memory region being updated.
1255 * @log: whether dirty logging is to be enabled or disabled.
1256 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
1257 */
1258 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
1259
1260 /**
1261 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
1262 *
1263 * Marks a range of bytes as dirty, after it has been dirtied outside
1264 * guest code.
1265 *
1266 * @mr: the memory region being dirtied.
1267 * @addr: the address (relative to the start of the region) being dirtied.
1268 * @size: size of the range being dirtied.
1269 */
1270 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1271 hwaddr size);
1272
1273 /**
1274 * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
1275 *
1276 * This function is called when the caller wants to clear the remote
1277 * dirty bitmap of a memory range within the memory region. This can
1278 * be used by e.g. KVM to manually clear dirty log when
1279 * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
1280 * kernel.
1281 *
1282 * @mr: the memory region to clear the dirty log upon
1283 * @start: start address offset within the memory region
1284 * @len: length of the memory region to clear dirty bitmap
1285 */
1286 void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
1287 hwaddr len);
1288
1289 /**
1290 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
1291 * bitmap and clear it.
1292 *
1293 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
1294 * returns the snapshot. The snapshot can then be used to query dirty
1295 * status, using memory_region_snapshot_get_dirty. Snapshotting allows
1296 * querying the same page multiple times, which is especially useful for
1297 * display updates where the scanlines often are not page aligned.
1298 *
1299 * The dirty bitmap region which gets copyed into the snapshot (and
1300 * cleared afterwards) can be larger than requested. The boundaries
1301 * are rounded up/down so complete bitmap longs (covering 64 pages on
1302 * 64bit hosts) can be copied over into the bitmap snapshot. Which
1303 * isn't a problem for display updates as the extra pages are outside
1304 * the visible area, and in case the visible area changes a full
1305 * display redraw is due anyway. Should other use cases for this
1306 * function emerge we might have to revisit this implementation
1307 * detail.
1308 *
1309 * Use g_free to release DirtyBitmapSnapshot.
1310 *
1311 * @mr: the memory region being queried.
1312 * @addr: the address (relative to the start of the region) being queried.
1313 * @size: the size of the range being queried.
1314 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
1315 */
1316 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1317 hwaddr addr,
1318 hwaddr size,
1319 unsigned client);
1320
1321 /**
1322 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
1323 * in the specified dirty bitmap snapshot.
1324 *
1325 * @mr: the memory region being queried.
1326 * @snap: the dirty bitmap snapshot
1327 * @addr: the address (relative to the start of the region) being queried.
1328 * @size: the size of the range being queried.
1329 */
1330 bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
1331 DirtyBitmapSnapshot *snap,
1332 hwaddr addr, hwaddr size);
1333
1334 /**
1335 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
1336 * client.
1337 *
1338 * Marks a range of pages as no longer dirty.
1339 *
1340 * @mr: the region being updated.
1341 * @addr: the start of the subrange being cleaned.
1342 * @size: the size of the subrange being cleaned.
1343 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1344 * %DIRTY_MEMORY_VGA.
1345 */
1346 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1347 hwaddr size, unsigned client);
1348
1349 /**
1350 * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate
1351 * TBs (for self-modifying code).
1352 *
1353 * The MemoryRegionOps->write() callback of a ROM device must use this function
1354 * to mark byte ranges that have been modified internally, such as by directly
1355 * accessing the memory returned by memory_region_get_ram_ptr().
1356 *
1357 * This function marks the range dirty and invalidates TBs so that TCG can
1358 * detect self-modifying code.
1359 *
1360 * @mr: the region being flushed.
1361 * @addr: the start, relative to the start of the region, of the range being
1362 * flushed.
1363 * @size: the size, in bytes, of the range being flushed.
1364 */
1365 void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size);
1366
1367 /**
1368 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
1369 *
1370 * Allows a memory region to be marked as read-only (turning it into a ROM).
1371 * only useful on RAM regions.
1372 *
1373 * @mr: the region being updated.
1374 * @readonly: whether rhe region is to be ROM or RAM.
1375 */
1376 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
1377
1378 /**
1379 * memory_region_set_nonvolatile: Turn a memory region non-volatile
1380 *
1381 * Allows a memory region to be marked as non-volatile.
1382 * only useful on RAM regions.
1383 *
1384 * @mr: the region being updated.
1385 * @nonvolatile: whether rhe region is to be non-volatile.
1386 */
1387 void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile);
1388
1389 /**
1390 * memory_region_rom_device_set_romd: enable/disable ROMD mode
1391 *
1392 * Allows a ROM device (initialized with memory_region_init_rom_device() to
1393 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
1394 * device is mapped to guest memory and satisfies read access directly.
1395 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
1396 * Writes are always handled by the #MemoryRegion.write function.
1397 *
1398 * @mr: the memory region to be updated
1399 * @romd_mode: %true to put the region into ROMD mode
1400 */
1401 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
1402
1403 /**
1404 * memory_region_set_coalescing: Enable memory coalescing for the region.
1405 *
1406 * Enabled writes to a region to be queued for later processing. MMIO ->write
1407 * callbacks may be delayed until a non-coalesced MMIO is issued.
1408 * Only useful for IO regions. Roughly similar to write-combining hardware.
1409 *
1410 * @mr: the memory region to be write coalesced
1411 */
1412 void memory_region_set_coalescing(MemoryRegion *mr);
1413
1414 /**
1415 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
1416 * a region.
1417 *
1418 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
1419 * Multiple calls can be issued coalesced disjoint ranges.
1420 *
1421 * @mr: the memory region to be updated.
1422 * @offset: the start of the range within the region to be coalesced.
1423 * @size: the size of the subrange to be coalesced.
1424 */
1425 void memory_region_add_coalescing(MemoryRegion *mr,
1426 hwaddr offset,
1427 uint64_t size);
1428
1429 /**
1430 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
1431 *
1432 * Disables any coalescing caused by memory_region_set_coalescing() or
1433 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
1434 * hardware.
1435 *
1436 * @mr: the memory region to be updated.
1437 */
1438 void memory_region_clear_coalescing(MemoryRegion *mr);
1439
1440 /**
1441 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
1442 * accesses.
1443 *
1444 * Ensure that pending coalesced MMIO request are flushed before the memory
1445 * region is accessed. This property is automatically enabled for all regions
1446 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
1447 *
1448 * @mr: the memory region to be updated.
1449 */
1450 void memory_region_set_flush_coalesced(MemoryRegion *mr);
1451
1452 /**
1453 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
1454 * accesses.
1455 *
1456 * Clear the automatic coalesced MMIO flushing enabled via
1457 * memory_region_set_flush_coalesced. Note that this service has no effect on
1458 * memory regions that have MMIO coalescing enabled for themselves. For them,
1459 * automatic flushing will stop once coalescing is disabled.
1460 *
1461 * @mr: the memory region to be updated.
1462 */
1463 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
1464
1465 /**
1466 * memory_region_clear_global_locking: Declares that access processing does
1467 * not depend on the QEMU global lock.
1468 *
1469 * By clearing this property, accesses to the memory region will be processed
1470 * outside of QEMU's global lock (unless the lock is held on when issuing the
1471 * access request). In this case, the device model implementing the access
1472 * handlers is responsible for synchronization of concurrency.
1473 *
1474 * @mr: the memory region to be updated.
1475 */
1476 void memory_region_clear_global_locking(MemoryRegion *mr);
1477
1478 /**
1479 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
1480 * is written to a location.
1481 *
1482 * Marks a word in an IO region (initialized with memory_region_init_io())
1483 * as a trigger for an eventfd event. The I/O callback will not be called.
1484 * The caller must be prepared to handle failure (that is, take the required
1485 * action if the callback _is_ called).
1486 *
1487 * @mr: the memory region being updated.
1488 * @addr: the address within @mr that is to be monitored
1489 * @size: the size of the access to trigger the eventfd
1490 * @match_data: whether to match against @data, instead of just @addr
1491 * @data: the data to match against the guest write
1492 * @e: event notifier to be triggered when @addr, @size, and @data all match.
1493 **/
1494 void memory_region_add_eventfd(MemoryRegion *mr,
1495 hwaddr addr,
1496 unsigned size,
1497 bool match_data,
1498 uint64_t data,
1499 EventNotifier *e);
1500
1501 /**
1502 * memory_region_del_eventfd: Cancel an eventfd.
1503 *
1504 * Cancels an eventfd trigger requested by a previous
1505 * memory_region_add_eventfd() call.
1506 *
1507 * @mr: the memory region being updated.
1508 * @addr: the address within @mr that is to be monitored
1509 * @size: the size of the access to trigger the eventfd
1510 * @match_data: whether to match against @data, instead of just @addr
1511 * @data: the data to match against the guest write
1512 * @e: event notifier to be triggered when @addr, @size, and @data all match.
1513 */
1514 void memory_region_del_eventfd(MemoryRegion *mr,
1515 hwaddr addr,
1516 unsigned size,
1517 bool match_data,
1518 uint64_t data,
1519 EventNotifier *e);
1520
1521 /**
1522 * memory_region_add_subregion: Add a subregion to a container.
1523 *
1524 * Adds a subregion at @offset. The subregion may not overlap with other
1525 * subregions (except for those explicitly marked as overlapping). A region
1526 * may only be added once as a subregion (unless removed with
1527 * memory_region_del_subregion()); use memory_region_init_alias() if you
1528 * want a region to be a subregion in multiple locations.
1529 *
1530 * @mr: the region to contain the new subregion; must be a container
1531 * initialized with memory_region_init().
1532 * @offset: the offset relative to @mr where @subregion is added.
1533 * @subregion: the subregion to be added.
1534 */
1535 void memory_region_add_subregion(MemoryRegion *mr,
1536 hwaddr offset,
1537 MemoryRegion *subregion);
1538 /**
1539 * memory_region_add_subregion_overlap: Add a subregion to a container
1540 * with overlap.
1541 *
1542 * Adds a subregion at @offset. The subregion may overlap with other
1543 * subregions. Conflicts are resolved by having a higher @priority hide a
1544 * lower @priority. Subregions without priority are taken as @priority 0.
1545 * A region may only be added once as a subregion (unless removed with
1546 * memory_region_del_subregion()); use memory_region_init_alias() if you
1547 * want a region to be a subregion in multiple locations.
1548 *
1549 * @mr: the region to contain the new subregion; must be a container
1550 * initialized with memory_region_init().
1551 * @offset: the offset relative to @mr where @subregion is added.
1552 * @subregion: the subregion to be added.
1553 * @priority: used for resolving overlaps; highest priority wins.
1554 */
1555 void memory_region_add_subregion_overlap(MemoryRegion *mr,
1556 hwaddr offset,
1557 MemoryRegion *subregion,
1558 int priority);
1559
1560 /**
1561 * memory_region_get_ram_addr: Get the ram address associated with a memory
1562 * region
1563 */
1564 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
1565
1566 uint64_t memory_region_get_alignment(const MemoryRegion *mr);
1567 /**
1568 * memory_region_del_subregion: Remove a subregion.
1569 *
1570 * Removes a subregion from its container.
1571 *
1572 * @mr: the container to be updated.
1573 * @subregion: the region being removed; must be a current subregion of @mr.
1574 */
1575 void memory_region_del_subregion(MemoryRegion *mr,
1576 MemoryRegion *subregion);
1577
1578 /*
1579 * memory_region_set_enabled: dynamically enable or disable a region
1580 *
1581 * Enables or disables a memory region. A disabled memory region
1582 * ignores all accesses to itself and its subregions. It does not
1583 * obscure sibling subregions with lower priority - it simply behaves as
1584 * if it was removed from the hierarchy.
1585 *
1586 * Regions default to being enabled.
1587 *
1588 * @mr: the region to be updated
1589 * @enabled: whether to enable or disable the region
1590 */
1591 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
1592
1593 /*
1594 * memory_region_set_address: dynamically update the address of a region
1595 *
1596 * Dynamically updates the address of a region, relative to its container.
1597 * May be used on regions are currently part of a memory hierarchy.
1598 *
1599 * @mr: the region to be updated
1600 * @addr: new address, relative to container region
1601 */
1602 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
1603
1604 /*
1605 * memory_region_set_size: dynamically update the size of a region.
1606 *
1607 * Dynamically updates the size of a region.
1608 *
1609 * @mr: the region to be updated
1610 * @size: used size of the region.
1611 */
1612 void memory_region_set_size(MemoryRegion *mr, uint64_t size);
1613
1614 /*
1615 * memory_region_set_alias_offset: dynamically update a memory alias's offset
1616 *
1617 * Dynamically updates the offset into the target region that an alias points
1618 * to, as if the fourth argument to memory_region_init_alias() has changed.
1619 *
1620 * @mr: the #MemoryRegion to be updated; should be an alias.
1621 * @offset: the new offset into the target memory region
1622 */
1623 void memory_region_set_alias_offset(MemoryRegion *mr,
1624 hwaddr offset);
1625
1626 /**
1627 * memory_region_present: checks if an address relative to a @container
1628 * translates into #MemoryRegion within @container
1629 *
1630 * Answer whether a #MemoryRegion within @container covers the address
1631 * @addr.
1632 *
1633 * @container: a #MemoryRegion within which @addr is a relative address
1634 * @addr: the area within @container to be searched
1635 */
1636 bool memory_region_present(MemoryRegion *container, hwaddr addr);
1637
1638 /**
1639 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
1640 * into any address space.
1641 *
1642 * @mr: a #MemoryRegion which should be checked if it's mapped
1643 */
1644 bool memory_region_is_mapped(MemoryRegion *mr);
1645
1646 /**
1647 * memory_region_find: translate an address/size relative to a
1648 * MemoryRegion into a #MemoryRegionSection.
1649 *
1650 * Locates the first #MemoryRegion within @mr that overlaps the range
1651 * given by @addr and @size.
1652 *
1653 * Returns a #MemoryRegionSection that describes a contiguous overlap.
1654 * It will have the following characteristics:
1655 * .@size = 0 iff no overlap was found
1656 * .@mr is non-%NULL iff an overlap was found
1657 *
1658 * Remember that in the return value the @offset_within_region is
1659 * relative to the returned region (in the .@mr field), not to the
1660 * @mr argument.
1661 *
1662 * Similarly, the .@offset_within_address_space is relative to the
1663 * address space that contains both regions, the passed and the
1664 * returned one. However, in the special case where the @mr argument
1665 * has no container (and thus is the root of the address space), the
1666 * following will hold:
1667 * .@offset_within_address_space >= @addr
1668 * .@offset_within_address_space + .@size <= @addr + @size
1669 *
1670 * @mr: a MemoryRegion within which @addr is a relative address
1671 * @addr: start of the area within @as to be searched
1672 * @size: size of the area to be searched
1673 */
1674 MemoryRegionSection memory_region_find(MemoryRegion *mr,
1675 hwaddr addr, uint64_t size);
1676
1677 /**
1678 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
1679 *
1680 * Synchronizes the dirty page log for all address spaces.
1681 */
1682 void memory_global_dirty_log_sync(void);
1683
1684 /**
1685 * memory_region_transaction_begin: Start a transaction.
1686 *
1687 * During a transaction, changes will be accumulated and made visible
1688 * only when the transaction ends (is committed).
1689 */
1690 void memory_region_transaction_begin(void);
1691
1692 /**
1693 * memory_region_transaction_commit: Commit a transaction and make changes
1694 * visible to the guest.
1695 */
1696 void memory_region_transaction_commit(void);
1697
1698 /**
1699 * memory_listener_register: register callbacks to be called when memory
1700 * sections are mapped or unmapped into an address
1701 * space
1702 *
1703 * @listener: an object containing the callbacks to be called
1704 * @filter: if non-%NULL, only regions in this address space will be observed
1705 */
1706 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
1707
1708 /**
1709 * memory_listener_unregister: undo the effect of memory_listener_register()
1710 *
1711 * @listener: an object containing the callbacks to be removed
1712 */
1713 void memory_listener_unregister(MemoryListener *listener);
1714
1715 /**
1716 * memory_global_dirty_log_start: begin dirty logging for all regions
1717 */
1718 void memory_global_dirty_log_start(void);
1719
1720 /**
1721 * memory_global_dirty_log_stop: end dirty logging for all regions
1722 */
1723 void memory_global_dirty_log_stop(void);
1724
1725 void mtree_info(bool flatview, bool dispatch_tree, bool owner);
1726
1727 /**
1728 * memory_region_dispatch_read: perform a read directly to the specified
1729 * MemoryRegion.
1730 *
1731 * @mr: #MemoryRegion to access
1732 * @addr: address within that region
1733 * @pval: pointer to uint64_t which the data is written to
1734 * @size: size of the access in bytes
1735 * @attrs: memory transaction attributes to use for the access
1736 */
1737 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1738 hwaddr addr,
1739 uint64_t *pval,
1740 unsigned size,
1741 MemTxAttrs attrs);
1742 /**
1743 * memory_region_dispatch_write: perform a write directly to the specified
1744 * MemoryRegion.
1745 *
1746 * @mr: #MemoryRegion to access
1747 * @addr: address within that region
1748 * @data: data to write
1749 * @size: size of the access in bytes
1750 * @attrs: memory transaction attributes to use for the access
1751 */
1752 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1753 hwaddr addr,
1754 uint64_t data,
1755 unsigned size,
1756 MemTxAttrs attrs);
1757
1758 /**
1759 * address_space_init: initializes an address space
1760 *
1761 * @as: an uninitialized #AddressSpace
1762 * @root: a #MemoryRegion that routes addresses for the address space
1763 * @name: an address space name. The name is only used for debugging
1764 * output.
1765 */
1766 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
1767
1768 /**
1769 * address_space_destroy: destroy an address space
1770 *
1771 * Releases all resources associated with an address space. After an address space
1772 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
1773 * as well.
1774 *
1775 * @as: address space to be destroyed
1776 */
1777 void address_space_destroy(AddressSpace *as);
1778
1779 /**
1780 * address_space_remove_listeners: unregister all listeners of an address space
1781 *
1782 * Removes all callbacks previously registered with memory_listener_register()
1783 * for @as.
1784 *
1785 * @as: an initialized #AddressSpace
1786 */
1787 void address_space_remove_listeners(AddressSpace *as);
1788
1789 /**
1790 * address_space_rw: read from or write to an address space.
1791 *
1792 * Return a MemTxResult indicating whether the operation succeeded
1793 * or failed (eg unassigned memory, device rejected the transaction,
1794 * IOMMU fault).
1795 *
1796 * @as: #AddressSpace to be accessed
1797 * @addr: address within that address space
1798 * @attrs: memory transaction attributes
1799 * @buf: buffer with the data transferred
1800 * @len: the number of bytes to read or write
1801 * @is_write: indicates the transfer direction
1802 */
1803 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
1804 MemTxAttrs attrs, uint8_t *buf,
1805 hwaddr len, bool is_write);
1806
1807 /**
1808 * address_space_write: write to address space.
1809 *
1810 * Return a MemTxResult indicating whether the operation succeeded
1811 * or failed (eg unassigned memory, device rejected the transaction,
1812 * IOMMU fault).
1813 *
1814 * @as: #AddressSpace to be accessed
1815 * @addr: address within that address space
1816 * @attrs: memory transaction attributes
1817 * @buf: buffer with the data transferred
1818 * @len: the number of bytes to write
1819 */
1820 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
1821 MemTxAttrs attrs,
1822 const uint8_t *buf, hwaddr len);
1823
1824 /**
1825 * address_space_write_rom: write to address space, including ROM.
1826 *
1827 * This function writes to the specified address space, but will
1828 * write data to both ROM and RAM. This is used for non-guest
1829 * writes like writes from the gdb debug stub or initial loading
1830 * of ROM contents.
1831 *
1832 * Note that portions of the write which attempt to write data to
1833 * a device will be silently ignored -- only real RAM and ROM will
1834 * be written to.
1835 *
1836 * Return a MemTxResult indicating whether the operation succeeded
1837 * or failed (eg unassigned memory, device rejected the transaction,
1838 * IOMMU fault).
1839 *
1840 * @as: #AddressSpace to be accessed
1841 * @addr: address within that address space
1842 * @attrs: memory transaction attributes
1843 * @buf: buffer with the data transferred
1844 * @len: the number of bytes to write
1845 */
1846 MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr,
1847 MemTxAttrs attrs,
1848 const uint8_t *buf, hwaddr len);
1849
1850 /* address_space_ld*: load from an address space
1851 * address_space_st*: store to an address space
1852 *
1853 * These functions perform a load or store of the byte, word,
1854 * longword or quad to the specified address within the AddressSpace.
1855 * The _le suffixed functions treat the data as little endian;
1856 * _be indicates big endian; no suffix indicates "same endianness
1857 * as guest CPU".
1858 *
1859 * The "guest CPU endianness" accessors are deprecated for use outside
1860 * target-* code; devices should be CPU-agnostic and use either the LE
1861 * or the BE accessors.
1862 *
1863 * @as #AddressSpace to be accessed
1864 * @addr: address within that address space
1865 * @val: data value, for stores
1866 * @attrs: memory transaction attributes
1867 * @result: location to write the success/failure of the transaction;
1868 * if NULL, this information is discarded
1869 */
1870
1871 #define SUFFIX
1872 #define ARG1 as
1873 #define ARG1_DECL AddressSpace *as
1874 #include "exec/memory_ldst.inc.h"
1875
1876 #define SUFFIX
1877 #define ARG1 as
1878 #define ARG1_DECL AddressSpace *as
1879 #include "exec/memory_ldst_phys.inc.h"
1880
1881 struct MemoryRegionCache {
1882 void *ptr;
1883 hwaddr xlat;
1884 hwaddr len;
1885 FlatView *fv;
1886 MemoryRegionSection mrs;
1887 bool is_write;
1888 };
1889
1890 #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL })
1891
1892
1893 /* address_space_ld*_cached: load from a cached #MemoryRegion
1894 * address_space_st*_cached: store into a cached #MemoryRegion
1895 *
1896 * These functions perform a load or store of the byte, word,
1897 * longword or quad to the specified address. The address is
1898 * a physical address in the AddressSpace, but it must lie within
1899 * a #MemoryRegion that was mapped with address_space_cache_init.
1900 *
1901 * The _le suffixed functions treat the data as little endian;
1902 * _be indicates big endian; no suffix indicates "same endianness
1903 * as guest CPU".
1904 *
1905 * The "guest CPU endianness" accessors are deprecated for use outside
1906 * target-* code; devices should be CPU-agnostic and use either the LE
1907 * or the BE accessors.
1908 *
1909 * @cache: previously initialized #MemoryRegionCache to be accessed
1910 * @addr: address within the address space
1911 * @val: data value, for stores
1912 * @attrs: memory transaction attributes
1913 * @result: location to write the success/failure of the transaction;
1914 * if NULL, this information is discarded
1915 */
1916
1917 #define SUFFIX _cached_slow
1918 #define ARG1 cache
1919 #define ARG1_DECL MemoryRegionCache *cache
1920 #include "exec/memory_ldst.inc.h"
1921
1922 /* Inline fast path for direct RAM access. */
1923 static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache,
1924 hwaddr addr, MemTxAttrs attrs, MemTxResult *result)
1925 {
1926 assert(addr < cache->len);
1927 if (likely(cache->ptr)) {
1928 return ldub_p(cache->ptr + addr);
1929 } else {
1930 return address_space_ldub_cached_slow(cache, addr, attrs, result);
1931 }
1932 }
1933
1934 static inline void address_space_stb_cached(MemoryRegionCache *cache,
1935 hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result)
1936 {
1937 assert(addr < cache->len);
1938 if (likely(cache->ptr)) {
1939 stb_p(cache->ptr + addr, val);
1940 } else {
1941 address_space_stb_cached_slow(cache, addr, val, attrs, result);
1942 }
1943 }
1944
1945 #define ENDIANNESS _le
1946 #include "exec/memory_ldst_cached.inc.h"
1947
1948 #define ENDIANNESS _be
1949 #include "exec/memory_ldst_cached.inc.h"
1950
1951 #define SUFFIX _cached
1952 #define ARG1 cache
1953 #define ARG1_DECL MemoryRegionCache *cache
1954 #include "exec/memory_ldst_phys.inc.h"
1955
1956 /* address_space_cache_init: prepare for repeated access to a physical
1957 * memory region
1958 *
1959 * @cache: #MemoryRegionCache to be filled
1960 * @as: #AddressSpace to be accessed
1961 * @addr: address within that address space
1962 * @len: length of buffer
1963 * @is_write: indicates the transfer direction
1964 *
1965 * Will only work with RAM, and may map a subset of the requested range by
1966 * returning a value that is less than @len. On failure, return a negative
1967 * errno value.
1968 *
1969 * Because it only works with RAM, this function can be used for
1970 * read-modify-write operations. In this case, is_write should be %true.
1971 *
1972 * Note that addresses passed to the address_space_*_cached functions
1973 * are relative to @addr.
1974 */
1975 int64_t address_space_cache_init(MemoryRegionCache *cache,
1976 AddressSpace *as,
1977 hwaddr addr,
1978 hwaddr len,
1979 bool is_write);
1980
1981 /**
1982 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
1983 *
1984 * @cache: The #MemoryRegionCache to operate on.
1985 * @addr: The first physical address that was written, relative to the
1986 * address that was passed to @address_space_cache_init.
1987 * @access_len: The number of bytes that were written starting at @addr.
1988 */
1989 void address_space_cache_invalidate(MemoryRegionCache *cache,
1990 hwaddr addr,
1991 hwaddr access_len);
1992
1993 /**
1994 * address_space_cache_destroy: free a #MemoryRegionCache
1995 *
1996 * @cache: The #MemoryRegionCache whose memory should be released.
1997 */
1998 void address_space_cache_destroy(MemoryRegionCache *cache);
1999
2000 /* address_space_get_iotlb_entry: translate an address into an IOTLB
2001 * entry. Should be called from an RCU critical section.
2002 */
2003 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
2004 bool is_write, MemTxAttrs attrs);
2005
2006 /* address_space_translate: translate an address range into an address space
2007 * into a MemoryRegion and an address range into that section. Should be
2008 * called from an RCU critical section, to avoid that the last reference
2009 * to the returned region disappears after address_space_translate returns.
2010 *
2011 * @fv: #FlatView to be accessed
2012 * @addr: address within that address space
2013 * @xlat: pointer to address within the returned memory region section's
2014 * #MemoryRegion.
2015 * @len: pointer to length
2016 * @is_write: indicates the transfer direction
2017 * @attrs: memory attributes
2018 */
2019 MemoryRegion *flatview_translate(FlatView *fv,
2020 hwaddr addr, hwaddr *xlat,
2021 hwaddr *len, bool is_write,
2022 MemTxAttrs attrs);
2023
2024 static inline MemoryRegion *address_space_translate(AddressSpace *as,
2025 hwaddr addr, hwaddr *xlat,
2026 hwaddr *len, bool is_write,
2027 MemTxAttrs attrs)
2028 {
2029 return flatview_translate(address_space_to_flatview(as),
2030 addr, xlat, len, is_write, attrs);
2031 }
2032
2033 /* address_space_access_valid: check for validity of accessing an address
2034 * space range
2035 *
2036 * Check whether memory is assigned to the given address space range, and
2037 * access is permitted by any IOMMU regions that are active for the address
2038 * space.
2039 *
2040 * For now, addr and len should be aligned to a page size. This limitation
2041 * will be lifted in the future.
2042 *
2043 * @as: #AddressSpace to be accessed
2044 * @addr: address within that address space
2045 * @len: length of the area to be checked
2046 * @is_write: indicates the transfer direction
2047 * @attrs: memory attributes
2048 */
2049 bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len,
2050 bool is_write, MemTxAttrs attrs);
2051
2052 /* address_space_map: map a physical memory region into a host virtual address
2053 *
2054 * May map a subset of the requested range, given by and returned in @plen.
2055 * May return %NULL if resources needed to perform the mapping are exhausted.
2056 * Use only for reads OR writes - not for read-modify-write operations.
2057 * Use cpu_register_map_client() to know when retrying the map operation is
2058 * likely to succeed.
2059 *
2060 * @as: #AddressSpace to be accessed
2061 * @addr: address within that address space
2062 * @plen: pointer to length of buffer; updated on return
2063 * @is_write: indicates the transfer direction
2064 * @attrs: memory attributes
2065 */
2066 void *address_space_map(AddressSpace *as, hwaddr addr,
2067 hwaddr *plen, bool is_write, MemTxAttrs attrs);
2068
2069 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
2070 *
2071 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
2072 * the amount of memory that was actually read or written by the caller.
2073 *
2074 * @as: #AddressSpace used
2075 * @buffer: host pointer as returned by address_space_map()
2076 * @len: buffer length as returned by address_space_map()
2077 * @access_len: amount of data actually transferred
2078 * @is_write: indicates the transfer direction
2079 */
2080 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2081 int is_write, hwaddr access_len);
2082
2083
2084 /* Internal functions, part of the implementation of address_space_read. */
2085 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
2086 MemTxAttrs attrs, uint8_t *buf, hwaddr len);
2087 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
2088 MemTxAttrs attrs, uint8_t *buf,
2089 hwaddr len, hwaddr addr1, hwaddr l,
2090 MemoryRegion *mr);
2091 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
2092
2093 /* Internal functions, part of the implementation of address_space_read_cached
2094 * and address_space_write_cached. */
2095 void address_space_read_cached_slow(MemoryRegionCache *cache,
2096 hwaddr addr, void *buf, hwaddr len);
2097 void address_space_write_cached_slow(MemoryRegionCache *cache,
2098 hwaddr addr, const void *buf, hwaddr len);
2099
2100 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
2101 {
2102 if (is_write) {
2103 return memory_region_is_ram(mr) &&
2104 !mr->readonly && !memory_region_is_ram_device(mr);
2105 } else {
2106 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
2107 memory_region_is_romd(mr);
2108 }
2109 }
2110
2111 /**
2112 * address_space_read: read from an address space.
2113 *
2114 * Return a MemTxResult indicating whether the operation succeeded
2115 * or failed (eg unassigned memory, device rejected the transaction,
2116 * IOMMU fault). Called within RCU critical section.
2117 *
2118 * @as: #AddressSpace to be accessed
2119 * @addr: address within that address space
2120 * @attrs: memory transaction attributes
2121 * @buf: buffer with the data transferred
2122 */
2123 static inline __attribute__((__always_inline__))
2124 MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
2125 MemTxAttrs attrs, uint8_t *buf,
2126 hwaddr len)
2127 {
2128 MemTxResult result = MEMTX_OK;
2129 hwaddr l, addr1;
2130 void *ptr;
2131 MemoryRegion *mr;
2132 FlatView *fv;
2133
2134 if (__builtin_constant_p(len)) {
2135 if (len) {
2136 rcu_read_lock();
2137 fv = address_space_to_flatview(as);
2138 l = len;
2139 mr = flatview_translate(fv, addr, &addr1, &l, false, attrs);
2140 if (len == l && memory_access_is_direct(mr, false)) {
2141 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
2142 memcpy(buf, ptr, len);
2143 } else {
2144 result = flatview_read_continue(fv, addr, attrs, buf, len,
2145 addr1, l, mr);
2146 }
2147 rcu_read_unlock();
2148 }
2149 } else {
2150 result = address_space_read_full(as, addr, attrs, buf, len);
2151 }
2152 return result;
2153 }
2154
2155 /**
2156 * address_space_read_cached: read from a cached RAM region
2157 *
2158 * @cache: Cached region to be addressed
2159 * @addr: address relative to the base of the RAM region
2160 * @buf: buffer with the data transferred
2161 * @len: length of the data transferred
2162 */
2163 static inline void
2164 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
2165 void *buf, hwaddr len)
2166 {
2167 assert(addr < cache->len && len <= cache->len - addr);
2168 if (likely(cache->ptr)) {
2169 memcpy(buf, cache->ptr + addr, len);
2170 } else {
2171 address_space_read_cached_slow(cache, addr, buf, len);
2172 }
2173 }
2174
2175 /**
2176 * address_space_write_cached: write to a cached RAM region
2177 *
2178 * @cache: Cached region to be addressed
2179 * @addr: address relative to the base of the RAM region
2180 * @buf: buffer with the data transferred
2181 * @len: length of the data transferred
2182 */
2183 static inline void
2184 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
2185 void *buf, hwaddr len)
2186 {
2187 assert(addr < cache->len && len <= cache->len - addr);
2188 if (likely(cache->ptr)) {
2189 memcpy(cache->ptr + addr, buf, len);
2190 } else {
2191 address_space_write_cached_slow(cache, addr, buf, len);
2192 }
2193 }
2194
2195 #endif
2196
2197 #endif