]>
Commit | Line | Data |
---|---|---|
093bc2cd AK |
1 | /* |
2 | * Physical memory management API | |
3 | * | |
4 | * Copyright 2011 Red Hat, Inc. and/or its affiliates | |
5 | * | |
6 | * Authors: | |
7 | * Avi Kivity <avi@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | #ifndef MEMORY_H | |
15 | #define MEMORY_H | |
16 | ||
17 | #ifndef CONFIG_USER_ONLY | |
18 | ||
022c62cb PB |
19 | #include "exec/cpu-common.h" |
20 | #include "exec/hwaddr.h" | |
cc05c43a | 21 | #include "exec/memattrs.h" |
e67c9046 | 22 | #include "exec/memop.h" |
0987d735 | 23 | #include "exec/ramlist.h" |
1b53ecd9 | 24 | #include "qemu/bswap.h" |
1de7afc9 | 25 | #include "qemu/queue.h" |
1de7afc9 | 26 | #include "qemu/int128.h" |
06866575 | 27 | #include "qemu/notify.h" |
b4fefef9 | 28 | #include "qom/object.h" |
374f2981 | 29 | #include "qemu/rcu.h" |
093bc2cd | 30 | |
07bdaa41 PB |
31 | #define RAM_ADDR_INVALID (~(ram_addr_t)0) |
32 | ||
052e87b0 PB |
33 | #define MAX_PHYS_ADDR_SPACE_BITS 62 |
34 | #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1) | |
35 | ||
b4fefef9 | 36 | #define TYPE_MEMORY_REGION "qemu:memory-region" |
8110fa1d EH |
37 | DECLARE_INSTANCE_CHECKER(MemoryRegion, MEMORY_REGION, |
38 | TYPE_MEMORY_REGION) | |
b4fefef9 | 39 | |
3df9d748 | 40 | #define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region" |
db1015e9 | 41 | typedef struct IOMMUMemoryRegionClass IOMMUMemoryRegionClass; |
8110fa1d EH |
42 | DECLARE_OBJ_CHECKERS(IOMMUMemoryRegion, IOMMUMemoryRegionClass, |
43 | IOMMU_MEMORY_REGION, TYPE_IOMMU_MEMORY_REGION) | |
3df9d748 | 44 | |
ae7a2bca PX |
45 | extern bool global_dirty_log; |
46 | ||
093bc2cd | 47 | typedef struct MemoryRegionOps MemoryRegionOps; |
74901c3b | 48 | |
f7806925 EA |
49 | struct ReservedRegion { |
50 | hwaddr low; | |
51 | hwaddr high; | |
52 | unsigned type; | |
53 | }; | |
54 | ||
30951157 AK |
55 | typedef struct IOMMUTLBEntry IOMMUTLBEntry; |
56 | ||
57 | /* See address_space_translate: bit 0 is read, bit 1 is write. */ | |
58 | typedef enum { | |
59 | IOMMU_NONE = 0, | |
60 | IOMMU_RO = 1, | |
61 | IOMMU_WO = 2, | |
62 | IOMMU_RW = 3, | |
63 | } IOMMUAccessFlags; | |
64 | ||
f06a696d PX |
65 | #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0)) |
66 | ||
30951157 AK |
67 | struct IOMMUTLBEntry { |
68 | AddressSpace *target_as; | |
69 | hwaddr iova; | |
70 | hwaddr translated_addr; | |
71 | hwaddr addr_mask; /* 0xfff = 4k translation */ | |
72 | IOMMUAccessFlags perm; | |
73 | }; | |
74 | ||
cdb30812 PX |
75 | /* |
76 | * Bitmap for different IOMMUNotifier capabilities. Each notifier can | |
77 | * register with one or multiple IOMMU Notifier capability bit(s). | |
78 | */ | |
79 | typedef enum { | |
80 | IOMMU_NOTIFIER_NONE = 0, | |
81 | /* Notify cache invalidations */ | |
82 | IOMMU_NOTIFIER_UNMAP = 0x1, | |
83 | /* Notify entry changes (newly created entries) */ | |
84 | IOMMU_NOTIFIER_MAP = 0x2, | |
85 | } IOMMUNotifierFlag; | |
86 | ||
87 | #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP) | |
88 | ||
698feb5e PX |
89 | struct IOMMUNotifier; |
90 | typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier, | |
91 | IOMMUTLBEntry *data); | |
92 | ||
cdb30812 | 93 | struct IOMMUNotifier { |
698feb5e | 94 | IOMMUNotify notify; |
cdb30812 | 95 | IOMMUNotifierFlag notifier_flags; |
698feb5e PX |
96 | /* Notify for address space range start <= addr <= end */ |
97 | hwaddr start; | |
98 | hwaddr end; | |
cb1efcf4 | 99 | int iommu_idx; |
cdb30812 PX |
100 | QLIST_ENTRY(IOMMUNotifier) node; |
101 | }; | |
102 | typedef struct IOMMUNotifier IOMMUNotifier; | |
103 | ||
b0e5de93 JH |
104 | /* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */ |
105 | #define RAM_PREALLOC (1 << 0) | |
106 | ||
107 | /* RAM is mmap-ed with MAP_SHARED */ | |
108 | #define RAM_SHARED (1 << 1) | |
109 | ||
110 | /* Only a portion of RAM (used_length) is actually used, and migrated. | |
111 | * This used_length size can change across reboots. | |
112 | */ | |
113 | #define RAM_RESIZEABLE (1 << 2) | |
114 | ||
115 | /* UFFDIO_ZEROPAGE is available on this RAMBlock to atomically | |
116 | * zero the page and wake waiting processes. | |
117 | * (Set during postcopy) | |
118 | */ | |
119 | #define RAM_UF_ZEROPAGE (1 << 3) | |
120 | ||
121 | /* RAM can be migrated */ | |
122 | #define RAM_MIGRATABLE (1 << 4) | |
123 | ||
a4de8552 JH |
124 | /* RAM is a persistent kind memory */ |
125 | #define RAM_PMEM (1 << 5) | |
126 | ||
698feb5e PX |
127 | static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn, |
128 | IOMMUNotifierFlag flags, | |
cb1efcf4 PM |
129 | hwaddr start, hwaddr end, |
130 | int iommu_idx) | |
698feb5e PX |
131 | { |
132 | n->notify = fn; | |
133 | n->notifier_flags = flags; | |
134 | n->start = start; | |
135 | n->end = end; | |
cb1efcf4 | 136 | n->iommu_idx = iommu_idx; |
698feb5e PX |
137 | } |
138 | ||
093bc2cd AK |
139 | /* |
140 | * Memory region callbacks | |
141 | */ | |
142 | struct MemoryRegionOps { | |
143 | /* Read from the memory region. @addr is relative to @mr; @size is | |
144 | * in bytes. */ | |
145 | uint64_t (*read)(void *opaque, | |
a8170e5e | 146 | hwaddr addr, |
093bc2cd AK |
147 | unsigned size); |
148 | /* Write to the memory region. @addr is relative to @mr; @size is | |
149 | * in bytes. */ | |
150 | void (*write)(void *opaque, | |
a8170e5e | 151 | hwaddr addr, |
093bc2cd AK |
152 | uint64_t data, |
153 | unsigned size); | |
154 | ||
cc05c43a PM |
155 | MemTxResult (*read_with_attrs)(void *opaque, |
156 | hwaddr addr, | |
157 | uint64_t *data, | |
158 | unsigned size, | |
159 | MemTxAttrs attrs); | |
160 | MemTxResult (*write_with_attrs)(void *opaque, | |
161 | hwaddr addr, | |
162 | uint64_t data, | |
163 | unsigned size, | |
164 | MemTxAttrs attrs); | |
165 | ||
093bc2cd AK |
166 | enum device_endian endianness; |
167 | /* Guest-visible constraints: */ | |
168 | struct { | |
169 | /* If nonzero, specify bounds on access sizes beyond which a machine | |
170 | * check is thrown. | |
171 | */ | |
172 | unsigned min_access_size; | |
173 | unsigned max_access_size; | |
174 | /* If true, unaligned accesses are supported. Otherwise unaligned | |
175 | * accesses throw machine checks. | |
176 | */ | |
177 | bool unaligned; | |
897fa7cf AK |
178 | /* |
179 | * If present, and returns #false, the transaction is not accepted | |
180 | * by the device (and results in machine dependent behaviour such | |
181 | * as a machine check exception). | |
182 | */ | |
a8170e5e | 183 | bool (*accepts)(void *opaque, hwaddr addr, |
8372d383 PM |
184 | unsigned size, bool is_write, |
185 | MemTxAttrs attrs); | |
093bc2cd AK |
186 | } valid; |
187 | /* Internal implementation constraints: */ | |
188 | struct { | |
189 | /* If nonzero, specifies the minimum size implemented. Smaller sizes | |
190 | * will be rounded upwards and a partial result will be returned. | |
191 | */ | |
192 | unsigned min_access_size; | |
193 | /* If nonzero, specifies the maximum size implemented. Larger sizes | |
194 | * will be done as a series of accesses with smaller sizes. | |
195 | */ | |
196 | unsigned max_access_size; | |
197 | /* If true, unaligned accesses are supported. Otherwise all accesses | |
198 | * are converted to (possibly multiple) naturally aligned accesses. | |
199 | */ | |
edc1ba7a | 200 | bool unaligned; |
093bc2cd AK |
201 | } impl; |
202 | }; | |
203 | ||
1b53ecd9 MA |
204 | typedef struct MemoryRegionClass { |
205 | /* private */ | |
206 | ObjectClass parent_class; | |
207 | } MemoryRegionClass; | |
208 | ||
209 | ||
f1334de6 AK |
210 | enum IOMMUMemoryRegionAttr { |
211 | IOMMU_ATTR_SPAPR_TCE_FD | |
212 | }; | |
213 | ||
acbef3cc | 214 | /* |
2ce931d0 PM |
215 | * IOMMUMemoryRegionClass: |
216 | * | |
217 | * All IOMMU implementations need to subclass TYPE_IOMMU_MEMORY_REGION | |
218 | * and provide an implementation of at least the @translate method here | |
219 | * to handle requests to the memory region. Other methods are optional. | |
220 | * | |
221 | * The IOMMU implementation must use the IOMMU notifier infrastructure | |
222 | * to report whenever mappings are changed, by calling | |
223 | * memory_region_notify_iommu() (or, if necessary, by calling | |
224 | * memory_region_notify_one() for each registered notifier). | |
21f40209 PM |
225 | * |
226 | * Conceptually an IOMMU provides a mapping from input address | |
227 | * to an output TLB entry. If the IOMMU is aware of memory transaction | |
228 | * attributes and the output TLB entry depends on the transaction | |
229 | * attributes, we represent this using IOMMU indexes. Each index | |
230 | * selects a particular translation table that the IOMMU has: | |
ffb716f0 | 231 | * |
21f40209 | 232 | * @attrs_to_index returns the IOMMU index for a set of transaction attributes |
ffb716f0 | 233 | * |
21f40209 | 234 | * @translate takes an input address and an IOMMU index |
ffb716f0 | 235 | * |
21f40209 PM |
236 | * and the mapping returned can only depend on the input address and the |
237 | * IOMMU index. | |
238 | * | |
239 | * Most IOMMUs don't care about the transaction attributes and support | |
240 | * only a single IOMMU index. A more complex IOMMU might have one index | |
241 | * for secure transactions and one for non-secure transactions. | |
2ce931d0 | 242 | */ |
db1015e9 | 243 | struct IOMMUMemoryRegionClass { |
ffb716f0 | 244 | /* private: */ |
1b53ecd9 | 245 | MemoryRegionClass parent_class; |
30951157 | 246 | |
ffb716f0 EH |
247 | /* public: */ |
248 | /** | |
249 | * @translate: | |
250 | * | |
2ce931d0 PM |
251 | * Return a TLB entry that contains a given address. |
252 | * | |
253 | * The IOMMUAccessFlags indicated via @flag are optional and may | |
254 | * be specified as IOMMU_NONE to indicate that the caller needs | |
255 | * the full translation information for both reads and writes. If | |
256 | * the access flags are specified then the IOMMU implementation | |
257 | * may use this as an optimization, to stop doing a page table | |
258 | * walk as soon as it knows that the requested permissions are not | |
259 | * allowed. If IOMMU_NONE is passed then the IOMMU must do the | |
260 | * full page table walk and report the permissions in the returned | |
261 | * IOMMUTLBEntry. (Note that this implies that an IOMMU may not | |
262 | * return different mappings for reads and writes.) | |
263 | * | |
264 | * The returned information remains valid while the caller is | |
265 | * holding the big QEMU lock or is inside an RCU critical section; | |
266 | * if the caller wishes to cache the mapping beyond that it must | |
267 | * register an IOMMU notifier so it can invalidate its cached | |
268 | * information when the IOMMU mapping changes. | |
269 | * | |
270 | * @iommu: the IOMMUMemoryRegion | |
ffb716f0 | 271 | * |
2ce931d0 | 272 | * @hwaddr: address to be translated within the memory region |
ffb716f0 EH |
273 | * |
274 | * @flag: requested access permission | |
275 | * | |
2c91bcf2 | 276 | * @iommu_idx: IOMMU index for the translation |
bf55b7af | 277 | */ |
3df9d748 | 278 | IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr, |
2c91bcf2 | 279 | IOMMUAccessFlags flag, int iommu_idx); |
ffb716f0 EH |
280 | /** |
281 | * @get_min_page_size: | |
282 | * | |
283 | * Returns minimum supported page size in bytes. | |
284 | * | |
2ce931d0 PM |
285 | * If this method is not provided then the minimum is assumed to |
286 | * be TARGET_PAGE_SIZE. | |
287 | * | |
288 | * @iommu: the IOMMUMemoryRegion | |
289 | */ | |
3df9d748 | 290 | uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu); |
ffb716f0 EH |
291 | /** |
292 | * @notify_flag_changed: | |
293 | * | |
294 | * Called when IOMMU Notifier flag changes (ie when the set of | |
2ce931d0 PM |
295 | * events which IOMMU users are requesting notification for changes). |
296 | * Optional method -- need not be provided if the IOMMU does not | |
297 | * need to know exactly which events must be notified. | |
298 | * | |
299 | * @iommu: the IOMMUMemoryRegion | |
ffb716f0 | 300 | * |
2ce931d0 | 301 | * @old_flags: events which previously needed to be notified |
ffb716f0 | 302 | * |
2ce931d0 | 303 | * @new_flags: events which now need to be notified |
549d4005 EA |
304 | * |
305 | * Returns 0 on success, or a negative errno; in particular | |
306 | * returns -EINVAL if the new flag bitmap is not supported by the | |
307 | * IOMMU memory region. In case of failure, the error object | |
308 | * must be created | |
2ce931d0 | 309 | */ |
549d4005 EA |
310 | int (*notify_flag_changed)(IOMMUMemoryRegion *iommu, |
311 | IOMMUNotifierFlag old_flags, | |
312 | IOMMUNotifierFlag new_flags, | |
313 | Error **errp); | |
ffb716f0 EH |
314 | /** |
315 | * @replay: | |
316 | * | |
317 | * Called to handle memory_region_iommu_replay(). | |
2ce931d0 PM |
318 | * |
319 | * The default implementation of memory_region_iommu_replay() is to | |
320 | * call the IOMMU translate method for every page in the address space | |
321 | * with flag == IOMMU_NONE and then call the notifier if translate | |
322 | * returns a valid mapping. If this method is implemented then it | |
323 | * overrides the default behaviour, and must provide the full semantics | |
324 | * of memory_region_iommu_replay(), by calling @notifier for every | |
325 | * translation present in the IOMMU. | |
326 | * | |
327 | * Optional method -- an IOMMU only needs to provide this method | |
328 | * if the default is inefficient or produces undesirable side effects. | |
329 | * | |
330 | * Note: this is not related to record-and-replay functionality. | |
331 | */ | |
3df9d748 | 332 | void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier); |
f1334de6 | 333 | |
ffb716f0 EH |
334 | /** |
335 | * @get_attr: | |
336 | * | |
337 | * Get IOMMU misc attributes. This is an optional method that | |
2ce931d0 PM |
338 | * can be used to allow users of the IOMMU to get implementation-specific |
339 | * information. The IOMMU implements this method to handle calls | |
340 | * by IOMMU users to memory_region_iommu_get_attr() by filling in | |
341 | * the arbitrary data pointer for any IOMMUMemoryRegionAttr values that | |
342 | * the IOMMU supports. If the method is unimplemented then | |
343 | * memory_region_iommu_get_attr() will always return -EINVAL. | |
344 | * | |
345 | * @iommu: the IOMMUMemoryRegion | |
ffb716f0 | 346 | * |
2ce931d0 | 347 | * @attr: attribute being queried |
ffb716f0 | 348 | * |
2ce931d0 PM |
349 | * @data: memory to fill in with the attribute data |
350 | * | |
351 | * Returns 0 on success, or a negative errno; in particular | |
352 | * returns -EINVAL for unrecognized or unimplemented attribute types. | |
353 | */ | |
354 | int (*get_attr)(IOMMUMemoryRegion *iommu, enum IOMMUMemoryRegionAttr attr, | |
f1334de6 | 355 | void *data); |
21f40209 | 356 | |
ffb716f0 EH |
357 | /** |
358 | * @attrs_to_index: | |
359 | * | |
360 | * Return the IOMMU index to use for a given set of transaction attributes. | |
21f40209 PM |
361 | * |
362 | * Optional method: if an IOMMU only supports a single IOMMU index then | |
363 | * the default implementation of memory_region_iommu_attrs_to_index() | |
364 | * will return 0. | |
365 | * | |
366 | * The indexes supported by an IOMMU must be contiguous, starting at 0. | |
367 | * | |
368 | * @iommu: the IOMMUMemoryRegion | |
369 | * @attrs: memory transaction attributes | |
370 | */ | |
371 | int (*attrs_to_index)(IOMMUMemoryRegion *iommu, MemTxAttrs attrs); | |
372 | ||
ffb716f0 EH |
373 | /** |
374 | * @num_indexes: | |
375 | * | |
376 | * Return the number of IOMMU indexes this IOMMU supports. | |
21f40209 PM |
377 | * |
378 | * Optional method: if this method is not provided, then | |
379 | * memory_region_iommu_num_indexes() will return 1, indicating that | |
380 | * only a single IOMMU index is supported. | |
381 | * | |
382 | * @iommu: the IOMMUMemoryRegion | |
383 | */ | |
384 | int (*num_indexes)(IOMMUMemoryRegion *iommu); | |
db1015e9 | 385 | }; |
30951157 | 386 | |
093bc2cd | 387 | typedef struct CoalescedMemoryRange CoalescedMemoryRange; |
3e9d69e7 | 388 | typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd; |
093bc2cd | 389 | |
08226b44 PB |
390 | /** MemoryRegion: |
391 | * | |
392 | * A struct representing a memory region. | |
393 | */ | |
093bc2cd | 394 | struct MemoryRegion { |
b4fefef9 | 395 | Object parent_obj; |
a676854f | 396 | |
08226b44 | 397 | /* private: */ |
a676854f PB |
398 | |
399 | /* The following fields should fit in a cache line */ | |
400 | bool romd_mode; | |
401 | bool ram; | |
402 | bool subpage; | |
403 | bool readonly; /* For RAM regions */ | |
c26763f8 | 404 | bool nonvolatile; |
a676854f PB |
405 | bool rom_device; |
406 | bool flush_coalesced_mmio; | |
a676854f | 407 | uint8_t dirty_log_mask; |
3df9d748 | 408 | bool is_iommu; |
58eaa217 | 409 | RAMBlock *ram_block; |
612263cf | 410 | Object *owner; |
a676854f PB |
411 | |
412 | const MemoryRegionOps *ops; | |
093bc2cd | 413 | void *opaque; |
feca4ac1 | 414 | MemoryRegion *container; |
08dafab4 | 415 | Int128 size; |
a8170e5e | 416 | hwaddr addr; |
545e92e0 | 417 | void (*destructor)(MemoryRegion *mr); |
a2b257d6 | 418 | uint64_t align; |
14a3c10a | 419 | bool terminates; |
21e00fa5 | 420 | bool ram_device; |
6bba19ba | 421 | bool enabled; |
1660e72d | 422 | bool warning_printed; /* For reservations */ |
deb809ed | 423 | uint8_t vga_logging_count; |
093bc2cd | 424 | MemoryRegion *alias; |
a8170e5e | 425 | hwaddr alias_offset; |
d33382da | 426 | int32_t priority; |
b58deb34 | 427 | QTAILQ_HEAD(, MemoryRegion) subregions; |
093bc2cd | 428 | QTAILQ_ENTRY(MemoryRegion) subregions_link; |
b58deb34 | 429 | QTAILQ_HEAD(, CoalescedMemoryRange) coalesced; |
302fa283 | 430 | const char *name; |
3e9d69e7 AK |
431 | unsigned ioeventfd_nb; |
432 | MemoryRegionIoeventfd *ioeventfds; | |
3df9d748 AK |
433 | }; |
434 | ||
435 | struct IOMMUMemoryRegion { | |
436 | MemoryRegion parent_obj; | |
437 | ||
cdb30812 | 438 | QLIST_HEAD(, IOMMUNotifier) iommu_notify; |
5bf3d319 | 439 | IOMMUNotifierFlag iommu_notify_flags; |
093bc2cd AK |
440 | }; |
441 | ||
512fa408 PX |
442 | #define IOMMU_NOTIFIER_FOREACH(n, mr) \ |
443 | QLIST_FOREACH((n), &(mr)->iommu_notify, node) | |
444 | ||
c2fc83e8 | 445 | /** |
301302f0 | 446 | * struct MemoryListener: callbacks structure for updates to the physical memory map |
c2fc83e8 PB |
447 | * |
448 | * Allows a component to adjust to changes in the guest-visible memory map. | |
449 | * Use with memory_listener_register() and memory_listener_unregister(). | |
450 | */ | |
451 | struct MemoryListener { | |
5d248213 PB |
452 | /** |
453 | * @begin: | |
454 | * | |
455 | * Called at the beginning of an address space update transaction. | |
456 | * Followed by calls to #MemoryListener.region_add(), | |
457 | * #MemoryListener.region_del(), #MemoryListener.region_nop(), | |
458 | * #MemoryListener.log_start() and #MemoryListener.log_stop() in | |
459 | * increasing address order. | |
460 | * | |
461 | * @listener: The #MemoryListener. | |
462 | */ | |
c2fc83e8 | 463 | void (*begin)(MemoryListener *listener); |
5d248213 PB |
464 | |
465 | /** | |
466 | * @commit: | |
467 | * | |
468 | * Called at the end of an address space update transaction, | |
469 | * after the last call to #MemoryListener.region_add(), | |
470 | * #MemoryListener.region_del() or #MemoryListener.region_nop(), | |
471 | * #MemoryListener.log_start() and #MemoryListener.log_stop(). | |
472 | * | |
473 | * @listener: The #MemoryListener. | |
474 | */ | |
c2fc83e8 | 475 | void (*commit)(MemoryListener *listener); |
5d248213 PB |
476 | |
477 | /** | |
478 | * @region_add: | |
479 | * | |
480 | * Called during an address space update transaction, | |
481 | * for a section of the address space that is new in this address space | |
482 | * space since the last transaction. | |
483 | * | |
484 | * @listener: The #MemoryListener. | |
485 | * @section: The new #MemoryRegionSection. | |
486 | */ | |
c2fc83e8 | 487 | void (*region_add)(MemoryListener *listener, MemoryRegionSection *section); |
5d248213 PB |
488 | |
489 | /** | |
490 | * @region_del: | |
491 | * | |
492 | * Called during an address space update transaction, | |
493 | * for a section of the address space that has disappeared in the address | |
494 | * space since the last transaction. | |
495 | * | |
496 | * @listener: The #MemoryListener. | |
497 | * @section: The old #MemoryRegionSection. | |
498 | */ | |
c2fc83e8 | 499 | void (*region_del)(MemoryListener *listener, MemoryRegionSection *section); |
5d248213 PB |
500 | |
501 | /** | |
502 | * @region_nop: | |
503 | * | |
504 | * Called during an address space update transaction, | |
505 | * for a section of the address space that is in the same place in the address | |
506 | * space as in the last transaction. | |
507 | * | |
508 | * @listener: The #MemoryListener. | |
509 | * @section: The #MemoryRegionSection. | |
510 | */ | |
c2fc83e8 | 511 | void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section); |
5d248213 PB |
512 | |
513 | /** | |
514 | * @log_start: | |
515 | * | |
516 | * Called during an address space update transaction, after | |
517 | * one of #MemoryListener.region_add(),#MemoryListener.region_del() or | |
518 | * #MemoryListener.region_nop(), if dirty memory logging clients have | |
519 | * become active since the last transaction. | |
520 | * | |
521 | * @listener: The #MemoryListener. | |
522 | * @section: The #MemoryRegionSection. | |
523 | * @old: A bitmap of dirty memory logging clients that were active in | |
524 | * the previous transaction. | |
525 | * @new: A bitmap of dirty memory logging clients that are active in | |
526 | * the current transaction. | |
527 | */ | |
b2dfd71c PB |
528 | void (*log_start)(MemoryListener *listener, MemoryRegionSection *section, |
529 | int old, int new); | |
5d248213 PB |
530 | |
531 | /** | |
532 | * @log_stop: | |
533 | * | |
534 | * Called during an address space update transaction, after | |
535 | * one of #MemoryListener.region_add(), #MemoryListener.region_del() or | |
536 | * #MemoryListener.region_nop() and possibly after | |
537 | * #MemoryListener.log_start(), if dirty memory logging clients have | |
538 | * become inactive since the last transaction. | |
539 | * | |
540 | * @listener: The #MemoryListener. | |
541 | * @section: The #MemoryRegionSection. | |
542 | * @old: A bitmap of dirty memory logging clients that were active in | |
543 | * the previous transaction. | |
544 | * @new: A bitmap of dirty memory logging clients that are active in | |
545 | * the current transaction. | |
546 | */ | |
b2dfd71c PB |
547 | void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section, |
548 | int old, int new); | |
5d248213 PB |
549 | |
550 | /** | |
551 | * @log_sync: | |
552 | * | |
553 | * Called by memory_region_snapshot_and_clear_dirty() and | |
554 | * memory_global_dirty_log_sync(), before accessing QEMU's "official" | |
555 | * copy of the dirty memory bitmap for a #MemoryRegionSection. | |
556 | * | |
557 | * @listener: The #MemoryListener. | |
558 | * @section: The #MemoryRegionSection. | |
559 | */ | |
c2fc83e8 | 560 | void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section); |
5d248213 PB |
561 | |
562 | /** | |
563 | * @log_clear: | |
564 | * | |
565 | * Called before reading the dirty memory bitmap for a | |
566 | * #MemoryRegionSection. | |
567 | * | |
568 | * @listener: The #MemoryListener. | |
569 | * @section: The #MemoryRegionSection. | |
570 | */ | |
077874e0 | 571 | void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section); |
5d248213 PB |
572 | |
573 | /** | |
574 | * @log_global_start: | |
575 | * | |
576 | * Called by memory_global_dirty_log_start(), which | |
577 | * enables the %DIRTY_LOG_MIGRATION client on all memory regions in | |
578 | * the address space. #MemoryListener.log_global_start() is also | |
579 | * called when a #MemoryListener is added, if global dirty logging is | |
580 | * active at that time. | |
581 | * | |
582 | * @listener: The #MemoryListener. | |
583 | */ | |
c2fc83e8 | 584 | void (*log_global_start)(MemoryListener *listener); |
5d248213 PB |
585 | |
586 | /** | |
587 | * @log_global_stop: | |
588 | * | |
589 | * Called by memory_global_dirty_log_stop(), which | |
590 | * disables the %DIRTY_LOG_MIGRATION client on all memory regions in | |
591 | * the address space. | |
592 | * | |
593 | * @listener: The #MemoryListener. | |
594 | */ | |
c2fc83e8 | 595 | void (*log_global_stop)(MemoryListener *listener); |
5d248213 PB |
596 | |
597 | /** | |
598 | * @log_global_after_sync: | |
599 | * | |
600 | * Called after reading the dirty memory bitmap | |
601 | * for any #MemoryRegionSection. | |
602 | * | |
603 | * @listener: The #MemoryListener. | |
604 | */ | |
9458a9a1 | 605 | void (*log_global_after_sync)(MemoryListener *listener); |
5d248213 PB |
606 | |
607 | /** | |
608 | * @eventfd_add: | |
609 | * | |
610 | * Called during an address space update transaction, | |
611 | * for a section of the address space that has had a new ioeventfd | |
612 | * registration since the last transaction. | |
613 | * | |
614 | * @listener: The #MemoryListener. | |
615 | * @section: The new #MemoryRegionSection. | |
616 | * @match_data: The @match_data parameter for the new ioeventfd. | |
617 | * @data: The @data parameter for the new ioeventfd. | |
618 | * @e: The #EventNotifier parameter for the new ioeventfd. | |
619 | */ | |
c2fc83e8 PB |
620 | void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section, |
621 | bool match_data, uint64_t data, EventNotifier *e); | |
5d248213 PB |
622 | |
623 | /** | |
624 | * @eventfd_del: | |
625 | * | |
626 | * Called during an address space update transaction, | |
627 | * for a section of the address space that has dropped an ioeventfd | |
628 | * registration since the last transaction. | |
629 | * | |
630 | * @listener: The #MemoryListener. | |
631 | * @section: The new #MemoryRegionSection. | |
632 | * @match_data: The @match_data parameter for the dropped ioeventfd. | |
633 | * @data: The @data parameter for the dropped ioeventfd. | |
634 | * @e: The #EventNotifier parameter for the dropped ioeventfd. | |
635 | */ | |
c2fc83e8 PB |
636 | void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section, |
637 | bool match_data, uint64_t data, EventNotifier *e); | |
5d248213 PB |
638 | |
639 | /** | |
640 | * @coalesced_io_add: | |
641 | * | |
642 | * Called during an address space update transaction, | |
643 | * for a section of the address space that has had a new coalesced | |
644 | * MMIO range registration since the last transaction. | |
645 | * | |
646 | * @listener: The #MemoryListener. | |
647 | * @section: The new #MemoryRegionSection. | |
648 | * @addr: The starting address for the coalesced MMIO range. | |
649 | * @len: The length of the coalesced MMIO range. | |
650 | */ | |
e6d34aee | 651 | void (*coalesced_io_add)(MemoryListener *listener, MemoryRegionSection *section, |
c2fc83e8 | 652 | hwaddr addr, hwaddr len); |
5d248213 PB |
653 | |
654 | /** | |
655 | * @coalesced_io_del: | |
656 | * | |
657 | * Called during an address space update transaction, | |
658 | * for a section of the address space that has dropped a coalesced | |
659 | * MMIO range since the last transaction. | |
660 | * | |
661 | * @listener: The #MemoryListener. | |
662 | * @section: The new #MemoryRegionSection. | |
663 | * @addr: The starting address for the coalesced MMIO range. | |
664 | * @len: The length of the coalesced MMIO range. | |
665 | */ | |
e6d34aee | 666 | void (*coalesced_io_del)(MemoryListener *listener, MemoryRegionSection *section, |
c2fc83e8 | 667 | hwaddr addr, hwaddr len); |
5d248213 PB |
668 | /** |
669 | * @priority: | |
670 | * | |
671 | * Govern the order in which memory listeners are invoked. Lower priorities | |
672 | * are invoked earlier for "add" or "start" callbacks, and later for "delete" | |
673 | * or "stop" callbacks. | |
674 | */ | |
c2fc83e8 | 675 | unsigned priority; |
5d248213 PB |
676 | |
677 | /* private: */ | |
d45fa784 | 678 | AddressSpace *address_space; |
c2fc83e8 | 679 | QTAILQ_ENTRY(MemoryListener) link; |
9a54635d | 680 | QTAILQ_ENTRY(MemoryListener) link_as; |
c2fc83e8 PB |
681 | }; |
682 | ||
9ad2bbc1 | 683 | /** |
301302f0 | 684 | * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects |
9ad2bbc1 AK |
685 | */ |
686 | struct AddressSpace { | |
08226b44 | 687 | /* private: */ |
374f2981 | 688 | struct rcu_head rcu; |
7dca8043 | 689 | char *name; |
9ad2bbc1 | 690 | MemoryRegion *root; |
374f2981 PB |
691 | |
692 | /* Accessed via RCU. */ | |
9ad2bbc1 | 693 | struct FlatView *current_map; |
374f2981 | 694 | |
9ad2bbc1 AK |
695 | int ioeventfd_nb; |
696 | struct MemoryRegionIoeventfd *ioeventfds; | |
eae3eb3e | 697 | QTAILQ_HEAD(, MemoryListener) listeners; |
0d673e36 | 698 | QTAILQ_ENTRY(AddressSpace) address_spaces_link; |
9ad2bbc1 AK |
699 | }; |
700 | ||
785a507e PB |
701 | typedef struct AddressSpaceDispatch AddressSpaceDispatch; |
702 | typedef struct FlatRange FlatRange; | |
703 | ||
704 | /* Flattened global view of current active memory hierarchy. Kept in sorted | |
705 | * order. | |
706 | */ | |
707 | struct FlatView { | |
708 | struct rcu_head rcu; | |
709 | unsigned ref; | |
710 | FlatRange *ranges; | |
711 | unsigned nr; | |
712 | unsigned nr_allocated; | |
713 | struct AddressSpaceDispatch *dispatch; | |
714 | MemoryRegion *root; | |
715 | }; | |
716 | ||
717 | static inline FlatView *address_space_to_flatview(AddressSpace *as) | |
718 | { | |
d73415a3 | 719 | return qatomic_rcu_read(&as->current_map); |
785a507e PB |
720 | } |
721 | ||
16620684 | 722 | |
e2177955 | 723 | /** |
301302f0 | 724 | * struct MemoryRegionSection: describes a fragment of a #MemoryRegion |
e2177955 AK |
725 | * |
726 | * @mr: the region, or %NULL if empty | |
57914ecb | 727 | * @fv: the flat view of the address space the region is mapped in |
e2177955 AK |
728 | * @offset_within_region: the beginning of the section, relative to @mr's start |
729 | * @size: the size of the section; will not exceed @mr's boundaries | |
730 | * @offset_within_address_space: the address of the first byte of the section | |
731 | * relative to the region's address space | |
7a8499e8 | 732 | * @readonly: writes to this section are ignored |
c26763f8 | 733 | * @nonvolatile: this section is non-volatile |
e2177955 AK |
734 | */ |
735 | struct MemoryRegionSection { | |
44f85d32 | 736 | Int128 size; |
e2177955 | 737 | MemoryRegion *mr; |
16620684 | 738 | FlatView *fv; |
a8170e5e | 739 | hwaddr offset_within_region; |
a8170e5e | 740 | hwaddr offset_within_address_space; |
7a8499e8 | 741 | bool readonly; |
c26763f8 | 742 | bool nonvolatile; |
e2177955 AK |
743 | }; |
744 | ||
9366cf02 DDAG |
745 | static inline bool MemoryRegionSection_eq(MemoryRegionSection *a, |
746 | MemoryRegionSection *b) | |
747 | { | |
748 | return a->mr == b->mr && | |
749 | a->fv == b->fv && | |
750 | a->offset_within_region == b->offset_within_region && | |
751 | a->offset_within_address_space == b->offset_within_address_space && | |
752 | int128_eq(a->size, b->size) && | |
753 | a->readonly == b->readonly && | |
754 | a->nonvolatile == b->nonvolatile; | |
755 | } | |
756 | ||
093bc2cd AK |
757 | /** |
758 | * memory_region_init: Initialize a memory region | |
759 | * | |
69ddaf66 | 760 | * The region typically acts as a container for other memory regions. Use |
093bc2cd AK |
761 | * memory_region_add_subregion() to add subregions. |
762 | * | |
763 | * @mr: the #MemoryRegion to be initialized | |
2c9b15ca | 764 | * @owner: the object that tracks the region's reference count |
093bc2cd AK |
765 | * @name: used for debugging; not visible to the user or ABI |
766 | * @size: size of the region; any subregions beyond this size will be clipped | |
767 | */ | |
768 | void memory_region_init(MemoryRegion *mr, | |
2c9b15ca | 769 | struct Object *owner, |
093bc2cd AK |
770 | const char *name, |
771 | uint64_t size); | |
46637be2 PB |
772 | |
773 | /** | |
774 | * memory_region_ref: Add 1 to a memory region's reference count | |
775 | * | |
776 | * Whenever memory regions are accessed outside the BQL, they need to be | |
777 | * preserved against hot-unplug. MemoryRegions actually do not have their | |
778 | * own reference count; they piggyback on a QOM object, their "owner". | |
779 | * This function adds a reference to the owner. | |
780 | * | |
781 | * All MemoryRegions must have an owner if they can disappear, even if the | |
782 | * device they belong to operates exclusively under the BQL. This is because | |
783 | * the region could be returned at any time by memory_region_find, and this | |
784 | * is usually under guest control. | |
785 | * | |
786 | * @mr: the #MemoryRegion | |
787 | */ | |
788 | void memory_region_ref(MemoryRegion *mr); | |
789 | ||
790 | /** | |
791 | * memory_region_unref: Remove 1 to a memory region's reference count | |
792 | * | |
793 | * Whenever memory regions are accessed outside the BQL, they need to be | |
794 | * preserved against hot-unplug. MemoryRegions actually do not have their | |
795 | * own reference count; they piggyback on a QOM object, their "owner". | |
796 | * This function removes a reference to the owner and possibly destroys it. | |
797 | * | |
798 | * @mr: the #MemoryRegion | |
799 | */ | |
800 | void memory_region_unref(MemoryRegion *mr); | |
801 | ||
093bc2cd AK |
802 | /** |
803 | * memory_region_init_io: Initialize an I/O memory region. | |
804 | * | |
69ddaf66 | 805 | * Accesses into the region will cause the callbacks in @ops to be called. |
093bc2cd AK |
806 | * if @size is nonzero, subregions will be clipped to @size. |
807 | * | |
808 | * @mr: the #MemoryRegion to be initialized. | |
2c9b15ca | 809 | * @owner: the object that tracks the region's reference count |
093bc2cd AK |
810 | * @ops: a structure containing read and write callbacks to be used when |
811 | * I/O is performed on the region. | |
b6af0975 | 812 | * @opaque: passed to the read and write callbacks of the @ops structure. |
093bc2cd AK |
813 | * @name: used for debugging; not visible to the user or ABI |
814 | * @size: size of the region. | |
815 | */ | |
816 | void memory_region_init_io(MemoryRegion *mr, | |
2c9b15ca | 817 | struct Object *owner, |
093bc2cd AK |
818 | const MemoryRegionOps *ops, |
819 | void *opaque, | |
820 | const char *name, | |
821 | uint64_t size); | |
822 | ||
823 | /** | |
1cfe48c1 PM |
824 | * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses |
825 | * into the region will modify memory | |
826 | * directly. | |
093bc2cd AK |
827 | * |
828 | * @mr: the #MemoryRegion to be initialized. | |
2c9b15ca | 829 | * @owner: the object that tracks the region's reference count |
e8f5fe2d DDAG |
830 | * @name: Region name, becomes part of RAMBlock name used in migration stream |
831 | * must be unique within any device | |
093bc2cd | 832 | * @size: size of the region. |
49946538 | 833 | * @errp: pointer to Error*, to store an error if it happens. |
a5c0234b PM |
834 | * |
835 | * Note that this function does not do anything to cause the data in the | |
836 | * RAM memory region to be migrated; that is the responsibility of the caller. | |
093bc2cd | 837 | */ |
1cfe48c1 PM |
838 | void memory_region_init_ram_nomigrate(MemoryRegion *mr, |
839 | struct Object *owner, | |
840 | const char *name, | |
841 | uint64_t size, | |
842 | Error **errp); | |
093bc2cd | 843 | |
06329cce MA |
844 | /** |
845 | * memory_region_init_ram_shared_nomigrate: Initialize RAM memory region. | |
846 | * Accesses into the region will | |
847 | * modify memory directly. | |
848 | * | |
849 | * @mr: the #MemoryRegion to be initialized. | |
850 | * @owner: the object that tracks the region's reference count | |
851 | * @name: Region name, becomes part of RAMBlock name used in migration stream | |
852 | * must be unique within any device | |
853 | * @size: size of the region. | |
854 | * @share: allow remapping RAM to different addresses | |
855 | * @errp: pointer to Error*, to store an error if it happens. | |
856 | * | |
857 | * Note that this function is similar to memory_region_init_ram_nomigrate. | |
858 | * The only difference is part of the RAM region can be remapped. | |
859 | */ | |
860 | void memory_region_init_ram_shared_nomigrate(MemoryRegion *mr, | |
861 | struct Object *owner, | |
862 | const char *name, | |
863 | uint64_t size, | |
864 | bool share, | |
865 | Error **errp); | |
866 | ||
60786ef3 MT |
867 | /** |
868 | * memory_region_init_resizeable_ram: Initialize memory region with resizeable | |
869 | * RAM. Accesses into the region will | |
870 | * modify memory directly. Only an initial | |
871 | * portion of this RAM is actually used. | |
872 | * The used size can change across reboots. | |
873 | * | |
874 | * @mr: the #MemoryRegion to be initialized. | |
875 | * @owner: the object that tracks the region's reference count | |
e8f5fe2d DDAG |
876 | * @name: Region name, becomes part of RAMBlock name used in migration stream |
877 | * must be unique within any device | |
60786ef3 MT |
878 | * @size: used size of the region. |
879 | * @max_size: max size of the region. | |
880 | * @resized: callback to notify owner about used size change. | |
881 | * @errp: pointer to Error*, to store an error if it happens. | |
a5c0234b PM |
882 | * |
883 | * Note that this function does not do anything to cause the data in the | |
884 | * RAM memory region to be migrated; that is the responsibility of the caller. | |
60786ef3 MT |
885 | */ |
886 | void memory_region_init_resizeable_ram(MemoryRegion *mr, | |
887 | struct Object *owner, | |
888 | const char *name, | |
889 | uint64_t size, | |
890 | uint64_t max_size, | |
891 | void (*resized)(const char*, | |
892 | uint64_t length, | |
893 | void *host), | |
894 | Error **errp); | |
d5dbde46 | 895 | #ifdef CONFIG_POSIX |
cbfc0171 | 896 | |
0b183fc8 PB |
897 | /** |
898 | * memory_region_init_ram_from_file: Initialize RAM memory region with a | |
899 | * mmap-ed backend. | |
900 | * | |
901 | * @mr: the #MemoryRegion to be initialized. | |
902 | * @owner: the object that tracks the region's reference count | |
e8f5fe2d DDAG |
903 | * @name: Region name, becomes part of RAMBlock name used in migration stream |
904 | * must be unique within any device | |
0b183fc8 | 905 | * @size: size of the region. |
98376843 HZ |
906 | * @align: alignment of the region base address; if 0, the default alignment |
907 | * (getpagesize()) will be used. | |
cbfc0171 JH |
908 | * @ram_flags: Memory region features: |
909 | * - RAM_SHARED: memory must be mmaped with the MAP_SHARED flag | |
a4de8552 | 910 | * - RAM_PMEM: the memory is persistent memory |
cbfc0171 | 911 | * Other bits are ignored now. |
0b183fc8 | 912 | * @path: the path in which to allocate the RAM. |
7f56e740 | 913 | * @errp: pointer to Error*, to store an error if it happens. |
a5c0234b PM |
914 | * |
915 | * Note that this function does not do anything to cause the data in the | |
916 | * RAM memory region to be migrated; that is the responsibility of the caller. | |
0b183fc8 PB |
917 | */ |
918 | void memory_region_init_ram_from_file(MemoryRegion *mr, | |
919 | struct Object *owner, | |
920 | const char *name, | |
921 | uint64_t size, | |
98376843 | 922 | uint64_t align, |
cbfc0171 | 923 | uint32_t ram_flags, |
7f56e740 PB |
924 | const char *path, |
925 | Error **errp); | |
fea617c5 MAL |
926 | |
927 | /** | |
928 | * memory_region_init_ram_from_fd: Initialize RAM memory region with a | |
929 | * mmap-ed backend. | |
930 | * | |
931 | * @mr: the #MemoryRegion to be initialized. | |
932 | * @owner: the object that tracks the region's reference count | |
933 | * @name: the name of the region. | |
934 | * @size: size of the region. | |
935 | * @share: %true if memory must be mmaped with the MAP_SHARED flag | |
936 | * @fd: the fd to mmap. | |
937 | * @errp: pointer to Error*, to store an error if it happens. | |
a5c0234b PM |
938 | * |
939 | * Note that this function does not do anything to cause the data in the | |
940 | * RAM memory region to be migrated; that is the responsibility of the caller. | |
fea617c5 MAL |
941 | */ |
942 | void memory_region_init_ram_from_fd(MemoryRegion *mr, | |
943 | struct Object *owner, | |
944 | const char *name, | |
945 | uint64_t size, | |
946 | bool share, | |
947 | int fd, | |
948 | Error **errp); | |
0b183fc8 PB |
949 | #endif |
950 | ||
093bc2cd | 951 | /** |
1a7e8cae BZ |
952 | * memory_region_init_ram_ptr: Initialize RAM memory region from a |
953 | * user-provided pointer. Accesses into the | |
954 | * region will modify memory directly. | |
093bc2cd AK |
955 | * |
956 | * @mr: the #MemoryRegion to be initialized. | |
2c9b15ca | 957 | * @owner: the object that tracks the region's reference count |
e8f5fe2d DDAG |
958 | * @name: Region name, becomes part of RAMBlock name used in migration stream |
959 | * must be unique within any device | |
093bc2cd AK |
960 | * @size: size of the region. |
961 | * @ptr: memory to be mapped; must contain at least @size bytes. | |
a5c0234b PM |
962 | * |
963 | * Note that this function does not do anything to cause the data in the | |
964 | * RAM memory region to be migrated; that is the responsibility of the caller. | |
093bc2cd AK |
965 | */ |
966 | void memory_region_init_ram_ptr(MemoryRegion *mr, | |
2c9b15ca | 967 | struct Object *owner, |
093bc2cd AK |
968 | const char *name, |
969 | uint64_t size, | |
970 | void *ptr); | |
971 | ||
21e00fa5 AW |
972 | /** |
973 | * memory_region_init_ram_device_ptr: Initialize RAM device memory region from | |
974 | * a user-provided pointer. | |
975 | * | |
976 | * A RAM device represents a mapping to a physical device, such as to a PCI | |
977 | * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped | |
978 | * into the VM address space and access to the region will modify memory | |
979 | * directly. However, the memory region should not be included in a memory | |
980 | * dump (device may not be enabled/mapped at the time of the dump), and | |
981 | * operations incompatible with manipulating MMIO should be avoided. Replaces | |
982 | * skip_dump flag. | |
983 | * | |
984 | * @mr: the #MemoryRegion to be initialized. | |
985 | * @owner: the object that tracks the region's reference count | |
986 | * @name: the name of the region. | |
987 | * @size: size of the region. | |
988 | * @ptr: memory to be mapped; must contain at least @size bytes. | |
a5c0234b PM |
989 | * |
990 | * Note that this function does not do anything to cause the data in the | |
991 | * RAM memory region to be migrated; that is the responsibility of the caller. | |
992 | * (For RAM device memory regions, migrating the contents rarely makes sense.) | |
21e00fa5 AW |
993 | */ |
994 | void memory_region_init_ram_device_ptr(MemoryRegion *mr, | |
995 | struct Object *owner, | |
996 | const char *name, | |
997 | uint64_t size, | |
998 | void *ptr); | |
999 | ||
093bc2cd AK |
1000 | /** |
1001 | * memory_region_init_alias: Initialize a memory region that aliases all or a | |
1002 | * part of another memory region. | |
1003 | * | |
1004 | * @mr: the #MemoryRegion to be initialized. | |
2c9b15ca | 1005 | * @owner: the object that tracks the region's reference count |
093bc2cd AK |
1006 | * @name: used for debugging; not visible to the user or ABI |
1007 | * @orig: the region to be referenced; @mr will be equivalent to | |
1008 | * @orig between @offset and @offset + @size - 1. | |
1009 | * @offset: start of the section in @orig to be referenced. | |
1010 | * @size: size of the region. | |
1011 | */ | |
1012 | void memory_region_init_alias(MemoryRegion *mr, | |
2c9b15ca | 1013 | struct Object *owner, |
093bc2cd AK |
1014 | const char *name, |
1015 | MemoryRegion *orig, | |
a8170e5e | 1016 | hwaddr offset, |
093bc2cd | 1017 | uint64_t size); |
d0a9b5bc | 1018 | |
a1777f7f | 1019 | /** |
b59821a9 | 1020 | * memory_region_init_rom_nomigrate: Initialize a ROM memory region. |
a1777f7f | 1021 | * |
b59821a9 | 1022 | * This has the same effect as calling memory_region_init_ram_nomigrate() |
a1777f7f PM |
1023 | * and then marking the resulting region read-only with |
1024 | * memory_region_set_readonly(). | |
1025 | * | |
b59821a9 PM |
1026 | * Note that this function does not do anything to cause the data in the |
1027 | * RAM side of the memory region to be migrated; that is the responsibility | |
1028 | * of the caller. | |
1029 | * | |
a1777f7f PM |
1030 | * @mr: the #MemoryRegion to be initialized. |
1031 | * @owner: the object that tracks the region's reference count | |
e8f5fe2d DDAG |
1032 | * @name: Region name, becomes part of RAMBlock name used in migration stream |
1033 | * must be unique within any device | |
a1777f7f PM |
1034 | * @size: size of the region. |
1035 | * @errp: pointer to Error*, to store an error if it happens. | |
1036 | */ | |
b59821a9 PM |
1037 | void memory_region_init_rom_nomigrate(MemoryRegion *mr, |
1038 | struct Object *owner, | |
1039 | const char *name, | |
1040 | uint64_t size, | |
1041 | Error **errp); | |
a1777f7f | 1042 | |
d0a9b5bc | 1043 | /** |
b59821a9 PM |
1044 | * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region. |
1045 | * Writes are handled via callbacks. | |
1046 | * | |
1047 | * Note that this function does not do anything to cause the data in the | |
1048 | * RAM side of the memory region to be migrated; that is the responsibility | |
1049 | * of the caller. | |
d0a9b5bc AK |
1050 | * |
1051 | * @mr: the #MemoryRegion to be initialized. | |
2c9b15ca | 1052 | * @owner: the object that tracks the region's reference count |
39e0b03d | 1053 | * @ops: callbacks for write access handling (must not be NULL). |
57914ecb | 1054 | * @opaque: passed to the read and write callbacks of the @ops structure. |
e8f5fe2d DDAG |
1055 | * @name: Region name, becomes part of RAMBlock name used in migration stream |
1056 | * must be unique within any device | |
d0a9b5bc | 1057 | * @size: size of the region. |
33e0eb52 | 1058 | * @errp: pointer to Error*, to store an error if it happens. |
d0a9b5bc | 1059 | */ |
b59821a9 PM |
1060 | void memory_region_init_rom_device_nomigrate(MemoryRegion *mr, |
1061 | struct Object *owner, | |
1062 | const MemoryRegionOps *ops, | |
1063 | void *opaque, | |
1064 | const char *name, | |
1065 | uint64_t size, | |
1066 | Error **errp); | |
d0a9b5bc | 1067 | |
30951157 | 1068 | /** |
1221a474 AK |
1069 | * memory_region_init_iommu: Initialize a memory region of a custom type |
1070 | * that translates addresses | |
30951157 AK |
1071 | * |
1072 | * An IOMMU region translates addresses and forwards accesses to a target | |
1073 | * memory region. | |
1074 | * | |
2ce931d0 PM |
1075 | * The IOMMU implementation must define a subclass of TYPE_IOMMU_MEMORY_REGION. |
1076 | * @_iommu_mr should be a pointer to enough memory for an instance of | |
1077 | * that subclass, @instance_size is the size of that subclass, and | |
1078 | * @mrtypename is its name. This function will initialize @_iommu_mr as an | |
1079 | * instance of the subclass, and its methods will then be called to handle | |
1080 | * accesses to the memory region. See the documentation of | |
1081 | * #IOMMUMemoryRegionClass for further details. | |
1082 | * | |
1221a474 AK |
1083 | * @_iommu_mr: the #IOMMUMemoryRegion to be initialized |
1084 | * @instance_size: the IOMMUMemoryRegion subclass instance size | |
57914ecb | 1085 | * @mrtypename: the type name of the #IOMMUMemoryRegion |
2c9b15ca | 1086 | * @owner: the object that tracks the region's reference count |
30951157 AK |
1087 | * @name: used for debugging; not visible to the user or ABI |
1088 | * @size: size of the region. | |
1089 | */ | |
1221a474 AK |
1090 | void memory_region_init_iommu(void *_iommu_mr, |
1091 | size_t instance_size, | |
1092 | const char *mrtypename, | |
1093 | Object *owner, | |
30951157 AK |
1094 | const char *name, |
1095 | uint64_t size); | |
1096 | ||
b08199c6 PM |
1097 | /** |
1098 | * memory_region_init_ram - Initialize RAM memory region. Accesses into the | |
1099 | * region will modify memory directly. | |
1100 | * | |
1101 | * @mr: the #MemoryRegion to be initialized | |
1102 | * @owner: the object that tracks the region's reference count (must be | |
1103 | * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL) | |
1104 | * @name: name of the memory region | |
1105 | * @size: size of the region in bytes | |
1106 | * @errp: pointer to Error*, to store an error if it happens. | |
1107 | * | |
1108 | * This function allocates RAM for a board model or device, and | |
1109 | * arranges for it to be migrated (by calling vmstate_register_ram() | |
1110 | * if @owner is a DeviceState, or vmstate_register_ram_global() if | |
1111 | * @owner is NULL). | |
1112 | * | |
1113 | * TODO: Currently we restrict @owner to being either NULL (for | |
1114 | * global RAM regions with no owner) or devices, so that we can | |
1115 | * give the RAM block a unique name for migration purposes. | |
1116 | * We should lift this restriction and allow arbitrary Objects. | |
1117 | * If you pass a non-NULL non-device @owner then we will assert. | |
1118 | */ | |
1119 | void memory_region_init_ram(MemoryRegion *mr, | |
1120 | struct Object *owner, | |
1121 | const char *name, | |
1122 | uint64_t size, | |
1123 | Error **errp); | |
1124 | ||
1125 | /** | |
1126 | * memory_region_init_rom: Initialize a ROM memory region. | |
1127 | * | |
1128 | * This has the same effect as calling memory_region_init_ram() | |
1129 | * and then marking the resulting region read-only with | |
1130 | * memory_region_set_readonly(). This includes arranging for the | |
1131 | * contents to be migrated. | |
1132 | * | |
1133 | * TODO: Currently we restrict @owner to being either NULL (for | |
1134 | * global RAM regions with no owner) or devices, so that we can | |
1135 | * give the RAM block a unique name for migration purposes. | |
1136 | * We should lift this restriction and allow arbitrary Objects. | |
1137 | * If you pass a non-NULL non-device @owner then we will assert. | |
1138 | * | |
1139 | * @mr: the #MemoryRegion to be initialized. | |
1140 | * @owner: the object that tracks the region's reference count | |
1141 | * @name: Region name, becomes part of RAMBlock name used in migration stream | |
1142 | * must be unique within any device | |
1143 | * @size: size of the region. | |
1144 | * @errp: pointer to Error*, to store an error if it happens. | |
1145 | */ | |
1146 | void memory_region_init_rom(MemoryRegion *mr, | |
1147 | struct Object *owner, | |
1148 | const char *name, | |
1149 | uint64_t size, | |
1150 | Error **errp); | |
1151 | ||
1152 | /** | |
1153 | * memory_region_init_rom_device: Initialize a ROM memory region. | |
1154 | * Writes are handled via callbacks. | |
1155 | * | |
1156 | * This function initializes a memory region backed by RAM for reads | |
1157 | * and callbacks for writes, and arranges for the RAM backing to | |
1158 | * be migrated (by calling vmstate_register_ram() | |
1159 | * if @owner is a DeviceState, or vmstate_register_ram_global() if | |
1160 | * @owner is NULL). | |
1161 | * | |
1162 | * TODO: Currently we restrict @owner to being either NULL (for | |
1163 | * global RAM regions with no owner) or devices, so that we can | |
1164 | * give the RAM block a unique name for migration purposes. | |
1165 | * We should lift this restriction and allow arbitrary Objects. | |
1166 | * If you pass a non-NULL non-device @owner then we will assert. | |
1167 | * | |
1168 | * @mr: the #MemoryRegion to be initialized. | |
1169 | * @owner: the object that tracks the region's reference count | |
1170 | * @ops: callbacks for write access handling (must not be NULL). | |
5d248213 | 1171 | * @opaque: passed to the read and write callbacks of the @ops structure. |
b08199c6 PM |
1172 | * @name: Region name, becomes part of RAMBlock name used in migration stream |
1173 | * must be unique within any device | |
1174 | * @size: size of the region. | |
1175 | * @errp: pointer to Error*, to store an error if it happens. | |
1176 | */ | |
1177 | void memory_region_init_rom_device(MemoryRegion *mr, | |
1178 | struct Object *owner, | |
1179 | const MemoryRegionOps *ops, | |
1180 | void *opaque, | |
1181 | const char *name, | |
1182 | uint64_t size, | |
1183 | Error **errp); | |
1184 | ||
1185 | ||
803c0816 PB |
1186 | /** |
1187 | * memory_region_owner: get a memory region's owner. | |
1188 | * | |
1189 | * @mr: the memory region being queried. | |
1190 | */ | |
1191 | struct Object *memory_region_owner(MemoryRegion *mr); | |
1192 | ||
093bc2cd AK |
1193 | /** |
1194 | * memory_region_size: get a memory region's size. | |
1195 | * | |
1196 | * @mr: the memory region being queried. | |
1197 | */ | |
1198 | uint64_t memory_region_size(MemoryRegion *mr); | |
1199 | ||
8ea9252a AK |
1200 | /** |
1201 | * memory_region_is_ram: check whether a memory region is random access | |
1202 | * | |
847b31f0 | 1203 | * Returns %true if a memory region is random access. |
8ea9252a AK |
1204 | * |
1205 | * @mr: the memory region being queried | |
1206 | */ | |
1619d1fe PB |
1207 | static inline bool memory_region_is_ram(MemoryRegion *mr) |
1208 | { | |
1209 | return mr->ram; | |
1210 | } | |
8ea9252a | 1211 | |
e4dc3f59 | 1212 | /** |
21e00fa5 | 1213 | * memory_region_is_ram_device: check whether a memory region is a ram device |
e4dc3f59 | 1214 | * |
847b31f0 | 1215 | * Returns %true if a memory region is a device backed ram region |
e4dc3f59 ND |
1216 | * |
1217 | * @mr: the memory region being queried | |
1218 | */ | |
21e00fa5 | 1219 | bool memory_region_is_ram_device(MemoryRegion *mr); |
e4dc3f59 | 1220 | |
fd062573 | 1221 | /** |
5f9a5ea1 | 1222 | * memory_region_is_romd: check whether a memory region is in ROMD mode |
fd062573 | 1223 | * |
5f9a5ea1 | 1224 | * Returns %true if a memory region is a ROM device and currently set to allow |
fd062573 BS |
1225 | * direct reads. |
1226 | * | |
1227 | * @mr: the memory region being queried | |
1228 | */ | |
1229 | static inline bool memory_region_is_romd(MemoryRegion *mr) | |
1230 | { | |
5f9a5ea1 | 1231 | return mr->rom_device && mr->romd_mode; |
fd062573 BS |
1232 | } |
1233 | ||
30951157 | 1234 | /** |
3df9d748 | 1235 | * memory_region_get_iommu: check whether a memory region is an iommu |
30951157 | 1236 | * |
3df9d748 AK |
1237 | * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu, |
1238 | * otherwise NULL. | |
30951157 AK |
1239 | * |
1240 | * @mr: the memory region being queried | |
1241 | */ | |
3df9d748 | 1242 | static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr) |
1619d1fe | 1243 | { |
12d37882 | 1244 | if (mr->alias) { |
3df9d748 AK |
1245 | return memory_region_get_iommu(mr->alias); |
1246 | } | |
1247 | if (mr->is_iommu) { | |
1248 | return (IOMMUMemoryRegion *) mr; | |
12d37882 | 1249 | } |
3df9d748 | 1250 | return NULL; |
1619d1fe PB |
1251 | } |
1252 | ||
1221a474 AK |
1253 | /** |
1254 | * memory_region_get_iommu_class_nocheck: returns iommu memory region class | |
1255 | * if an iommu or NULL if not | |
1256 | * | |
57914ecb JZ |
1257 | * Returns pointer to IOMMUMemoryRegionClass if a memory region is an iommu, |
1258 | * otherwise NULL. This is fast path avoiding QOM checking, use with caution. | |
1221a474 | 1259 | * |
5d248213 | 1260 | * @iommu_mr: the memory region being queried |
1221a474 AK |
1261 | */ |
1262 | static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck( | |
1263 | IOMMUMemoryRegion *iommu_mr) | |
1264 | { | |
1265 | return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class); | |
1266 | } | |
1267 | ||
3df9d748 | 1268 | #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL) |
30951157 | 1269 | |
f682e9c2 AK |
1270 | /** |
1271 | * memory_region_iommu_get_min_page_size: get minimum supported page size | |
1272 | * for an iommu | |
1273 | * | |
1274 | * Returns minimum supported page size for an iommu. | |
1275 | * | |
3df9d748 | 1276 | * @iommu_mr: the memory region being queried |
f682e9c2 | 1277 | */ |
3df9d748 | 1278 | uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr); |
f682e9c2 | 1279 | |
06866575 DG |
1280 | /** |
1281 | * memory_region_notify_iommu: notify a change in an IOMMU translation entry. | |
1282 | * | |
cdb30812 PX |
1283 | * The notification type will be decided by entry.perm bits: |
1284 | * | |
1285 | * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE. | |
1286 | * - For MAP (newly added entry) notifies: set entry.perm to the | |
1287 | * permission of the page (which is definitely !IOMMU_NONE). | |
1288 | * | |
1289 | * Note: for any IOMMU implementation, an in-place mapping change | |
1290 | * should be notified with an UNMAP followed by a MAP. | |
1291 | * | |
3df9d748 | 1292 | * @iommu_mr: the memory region that was changed |
cb1efcf4 | 1293 | * @iommu_idx: the IOMMU index for the translation table which has changed |
06866575 DG |
1294 | * @entry: the new entry in the IOMMU translation table. The entry |
1295 | * replaces all old entries for the same virtual I/O address range. | |
1296 | * Deleted entries have .@perm == 0. | |
1297 | */ | |
3df9d748 | 1298 | void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr, |
cb1efcf4 | 1299 | int iommu_idx, |
06866575 DG |
1300 | IOMMUTLBEntry entry); |
1301 | ||
bd2bfa4c PX |
1302 | /** |
1303 | * memory_region_notify_one: notify a change in an IOMMU translation | |
1304 | * entry to a single notifier | |
1305 | * | |
1306 | * This works just like memory_region_notify_iommu(), but it only | |
1307 | * notifies a specific notifier, not all of them. | |
1308 | * | |
1309 | * @notifier: the notifier to be notified | |
1310 | * @entry: the new entry in the IOMMU translation table. The entry | |
1311 | * replaces all old entries for the same virtual I/O address range. | |
1312 | * Deleted entries have .@perm == 0. | |
1313 | */ | |
1314 | void memory_region_notify_one(IOMMUNotifier *notifier, | |
1315 | IOMMUTLBEntry *entry); | |
1316 | ||
06866575 DG |
1317 | /** |
1318 | * memory_region_register_iommu_notifier: register a notifier for changes to | |
1319 | * IOMMU translation entries. | |
1320 | * | |
549d4005 EA |
1321 | * Returns 0 on success, or a negative errno otherwise. In particular, |
1322 | * -EINVAL indicates that at least one of the attributes of the notifier | |
1323 | * is not supported (flag/range) by the IOMMU memory region. In case of error | |
1324 | * the error object must be created. | |
1325 | * | |
06866575 | 1326 | * @mr: the memory region to observe |
cdb30812 PX |
1327 | * @n: the IOMMUNotifier to be added; the notify callback receives a |
1328 | * pointer to an #IOMMUTLBEntry as the opaque value; the pointer | |
1329 | * ceases to be valid on exit from the notifier. | |
5d248213 | 1330 | * @errp: pointer to Error*, to store an error if it happens. |
06866575 | 1331 | */ |
549d4005 EA |
1332 | int memory_region_register_iommu_notifier(MemoryRegion *mr, |
1333 | IOMMUNotifier *n, Error **errp); | |
06866575 | 1334 | |
a788f227 DG |
1335 | /** |
1336 | * memory_region_iommu_replay: replay existing IOMMU translations to | |
f682e9c2 AK |
1337 | * a notifier with the minimum page granularity returned by |
1338 | * mr->iommu_ops->get_page_size(). | |
a788f227 | 1339 | * |
2ce931d0 PM |
1340 | * Note: this is not related to record-and-replay functionality. |
1341 | * | |
3df9d748 | 1342 | * @iommu_mr: the memory region to observe |
a788f227 | 1343 | * @n: the notifier to which to replay iommu mappings |
a788f227 | 1344 | */ |
3df9d748 | 1345 | void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n); |
a788f227 | 1346 | |
06866575 DG |
1347 | /** |
1348 | * memory_region_unregister_iommu_notifier: unregister a notifier for | |
1349 | * changes to IOMMU translation entries. | |
1350 | * | |
d22d8956 AK |
1351 | * @mr: the memory region which was observed and for which notity_stopped() |
1352 | * needs to be called | |
06866575 DG |
1353 | * @n: the notifier to be removed. |
1354 | */ | |
cdb30812 PX |
1355 | void memory_region_unregister_iommu_notifier(MemoryRegion *mr, |
1356 | IOMMUNotifier *n); | |
06866575 | 1357 | |
f1334de6 AK |
1358 | /** |
1359 | * memory_region_iommu_get_attr: return an IOMMU attr if get_attr() is | |
1360 | * defined on the IOMMU. | |
1361 | * | |
2ce931d0 PM |
1362 | * Returns 0 on success, or a negative errno otherwise. In particular, |
1363 | * -EINVAL indicates that the IOMMU does not support the requested | |
1364 | * attribute. | |
f1334de6 AK |
1365 | * |
1366 | * @iommu_mr: the memory region | |
1367 | * @attr: the requested attribute | |
1368 | * @data: a pointer to the requested attribute data | |
1369 | */ | |
1370 | int memory_region_iommu_get_attr(IOMMUMemoryRegion *iommu_mr, | |
1371 | enum IOMMUMemoryRegionAttr attr, | |
1372 | void *data); | |
1373 | ||
21f40209 PM |
1374 | /** |
1375 | * memory_region_iommu_attrs_to_index: return the IOMMU index to | |
1376 | * use for translations with the given memory transaction attributes. | |
1377 | * | |
1378 | * @iommu_mr: the memory region | |
1379 | * @attrs: the memory transaction attributes | |
1380 | */ | |
1381 | int memory_region_iommu_attrs_to_index(IOMMUMemoryRegion *iommu_mr, | |
1382 | MemTxAttrs attrs); | |
1383 | ||
1384 | /** | |
1385 | * memory_region_iommu_num_indexes: return the total number of IOMMU | |
1386 | * indexes that this IOMMU supports. | |
1387 | * | |
1388 | * @iommu_mr: the memory region | |
1389 | */ | |
1390 | int memory_region_iommu_num_indexes(IOMMUMemoryRegion *iommu_mr); | |
1391 | ||
8991c79b AK |
1392 | /** |
1393 | * memory_region_name: get a memory region's name | |
1394 | * | |
1395 | * Returns the string that was used to initialize the memory region. | |
1396 | * | |
1397 | * @mr: the memory region being queried | |
1398 | */ | |
5d546d4b | 1399 | const char *memory_region_name(const MemoryRegion *mr); |
8991c79b | 1400 | |
55043ba3 AK |
1401 | /** |
1402 | * memory_region_is_logging: return whether a memory region is logging writes | |
1403 | * | |
2d1a35be | 1404 | * Returns %true if the memory region is logging writes for the given client |
55043ba3 AK |
1405 | * |
1406 | * @mr: the memory region being queried | |
2d1a35be | 1407 | * @client: the client being queried |
55043ba3 | 1408 | */ |
2d1a35be PB |
1409 | bool memory_region_is_logging(MemoryRegion *mr, uint8_t client); |
1410 | ||
1411 | /** | |
1412 | * memory_region_get_dirty_log_mask: return the clients for which a | |
1413 | * memory region is logging writes. | |
1414 | * | |
677e7805 PB |
1415 | * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants |
1416 | * are the bit indices. | |
2d1a35be PB |
1417 | * |
1418 | * @mr: the memory region being queried | |
1419 | */ | |
1420 | uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr); | |
55043ba3 | 1421 | |
ce7923da AK |
1422 | /** |
1423 | * memory_region_is_rom: check whether a memory region is ROM | |
1424 | * | |
847b31f0 | 1425 | * Returns %true if a memory region is read-only memory. |
ce7923da AK |
1426 | * |
1427 | * @mr: the memory region being queried | |
1428 | */ | |
1619d1fe PB |
1429 | static inline bool memory_region_is_rom(MemoryRegion *mr) |
1430 | { | |
1431 | return mr->ram && mr->readonly; | |
1432 | } | |
1433 | ||
c26763f8 MAL |
1434 | /** |
1435 | * memory_region_is_nonvolatile: check whether a memory region is non-volatile | |
1436 | * | |
1437 | * Returns %true is a memory region is non-volatile memory. | |
1438 | * | |
1439 | * @mr: the memory region being queried | |
1440 | */ | |
1441 | static inline bool memory_region_is_nonvolatile(MemoryRegion *mr) | |
1442 | { | |
1443 | return mr->nonvolatile; | |
1444 | } | |
ce7923da | 1445 | |
a35ba7be PB |
1446 | /** |
1447 | * memory_region_get_fd: Get a file descriptor backing a RAM memory region. | |
1448 | * | |
1449 | * Returns a file descriptor backing a file-based RAM memory region, | |
1450 | * or -1 if the region is not a file-based RAM memory region. | |
1451 | * | |
1452 | * @mr: the RAM or alias memory region being queried. | |
1453 | */ | |
1454 | int memory_region_get_fd(MemoryRegion *mr); | |
1455 | ||
07bdaa41 PB |
1456 | /** |
1457 | * memory_region_from_host: Convert a pointer into a RAM memory region | |
1458 | * and an offset within it. | |
1459 | * | |
1460 | * Given a host pointer inside a RAM memory region (created with | |
1461 | * memory_region_init_ram() or memory_region_init_ram_ptr()), return | |
1462 | * the MemoryRegion and the offset within it. | |
1463 | * | |
1464 | * Use with care; by the time this function returns, the returned pointer is | |
1465 | * not protected by RCU anymore. If the caller is not within an RCU critical | |
1466 | * section and does not hold the iothread lock, it must have other means of | |
1467 | * protecting the pointer, such as a reference to the region that includes | |
1468 | * the incoming ram_addr_t. | |
1469 | * | |
57914ecb JZ |
1470 | * @ptr: the host pointer to be converted |
1471 | * @offset: the offset within memory region | |
07bdaa41 PB |
1472 | */ |
1473 | MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset); | |
1474 | ||
093bc2cd AK |
1475 | /** |
1476 | * memory_region_get_ram_ptr: Get a pointer into a RAM memory region. | |
1477 | * | |
1478 | * Returns a host pointer to a RAM memory region (created with | |
49b24afc PB |
1479 | * memory_region_init_ram() or memory_region_init_ram_ptr()). |
1480 | * | |
1481 | * Use with care; by the time this function returns, the returned pointer is | |
1482 | * not protected by RCU anymore. If the caller is not within an RCU critical | |
1483 | * section and does not hold the iothread lock, it must have other means of | |
1484 | * protecting the pointer, such as a reference to the region that includes | |
1485 | * the incoming ram_addr_t. | |
093bc2cd AK |
1486 | * |
1487 | * @mr: the memory region being queried. | |
1488 | */ | |
1489 | void *memory_region_get_ram_ptr(MemoryRegion *mr); | |
1490 | ||
37d7c084 PB |
1491 | /* memory_region_ram_resize: Resize a RAM region. |
1492 | * | |
1493 | * Only legal before guest might have detected the memory size: e.g. on | |
1494 | * incoming migration, or right after reset. | |
1495 | * | |
1496 | * @mr: a memory region created with @memory_region_init_resizeable_ram. | |
1497 | * @newsize: the new size the region | |
1498 | * @errp: pointer to Error*, to store an error if it happens. | |
1499 | */ | |
1500 | void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize, | |
1501 | Error **errp); | |
9ecc996a PMD |
1502 | |
1503 | /** | |
1504 | * memory_region_msync: Synchronize selected address range of | |
1505 | * a memory mapped region | |
1506 | * | |
1507 | * @mr: the memory region to be msync | |
1508 | * @addr: the initial address of the range to be sync | |
1509 | * @size: the size of the range to be sync | |
1510 | */ | |
1511 | void memory_region_msync(MemoryRegion *mr, hwaddr addr, hwaddr size); | |
1512 | ||
61c490e2 | 1513 | /** |
9ecc996a | 1514 | * memory_region_writeback: Trigger cache writeback for |
5d248213 | 1515 | * selected address range |
61c490e2 | 1516 | * |
5d248213 PB |
1517 | * @mr: the memory region to be updated |
1518 | * @addr: the initial address of the range to be written back | |
1519 | * @size: the size of the range to be written back | |
61c490e2 | 1520 | */ |
4dfe59d1 | 1521 | void memory_region_writeback(MemoryRegion *mr, hwaddr addr, hwaddr size); |
37d7c084 | 1522 | |
093bc2cd AK |
1523 | /** |
1524 | * memory_region_set_log: Turn dirty logging on or off for a region. | |
1525 | * | |
1526 | * Turns dirty logging on or off for a specified client (display, migration). | |
1527 | * Only meaningful for RAM regions. | |
1528 | * | |
1529 | * @mr: the memory region being updated. | |
1530 | * @log: whether dirty logging is to be enabled or disabled. | |
dbddac6d | 1531 | * @client: the user of the logging information; %DIRTY_MEMORY_VGA only. |
093bc2cd AK |
1532 | */ |
1533 | void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client); | |
1534 | ||
093bc2cd | 1535 | /** |
fd4aa979 | 1536 | * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region. |
093bc2cd | 1537 | * |
fd4aa979 BS |
1538 | * Marks a range of bytes as dirty, after it has been dirtied outside |
1539 | * guest code. | |
093bc2cd | 1540 | * |
fd4aa979 | 1541 | * @mr: the memory region being dirtied. |
093bc2cd | 1542 | * @addr: the address (relative to the start of the region) being dirtied. |
fd4aa979 | 1543 | * @size: size of the range being dirtied. |
093bc2cd | 1544 | */ |
a8170e5e AK |
1545 | void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, |
1546 | hwaddr size); | |
093bc2cd | 1547 | |
077874e0 PX |
1548 | /** |
1549 | * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range | |
1550 | * | |
1551 | * This function is called when the caller wants to clear the remote | |
1552 | * dirty bitmap of a memory range within the memory region. This can | |
1553 | * be used by e.g. KVM to manually clear dirty log when | |
1554 | * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host | |
1555 | * kernel. | |
1556 | * | |
1557 | * @mr: the memory region to clear the dirty log upon | |
1558 | * @start: start address offset within the memory region | |
1559 | * @len: length of the memory region to clear dirty bitmap | |
1560 | */ | |
1561 | void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start, | |
1562 | hwaddr len); | |
1563 | ||
8deaf12c GH |
1564 | /** |
1565 | * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty | |
1566 | * bitmap and clear it. | |
1567 | * | |
1568 | * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and | |
1569 | * returns the snapshot. The snapshot can then be used to query dirty | |
77302fb5 PB |
1570 | * status, using memory_region_snapshot_get_dirty. Snapshotting allows |
1571 | * querying the same page multiple times, which is especially useful for | |
1572 | * display updates where the scanlines often are not page aligned. | |
8deaf12c GH |
1573 | * |
1574 | * The dirty bitmap region which gets copyed into the snapshot (and | |
1575 | * cleared afterwards) can be larger than requested. The boundaries | |
1576 | * are rounded up/down so complete bitmap longs (covering 64 pages on | |
1577 | * 64bit hosts) can be copied over into the bitmap snapshot. Which | |
1578 | * isn't a problem for display updates as the extra pages are outside | |
1579 | * the visible area, and in case the visible area changes a full | |
1580 | * display redraw is due anyway. Should other use cases for this | |
1581 | * function emerge we might have to revisit this implementation | |
1582 | * detail. | |
1583 | * | |
1584 | * Use g_free to release DirtyBitmapSnapshot. | |
1585 | * | |
1586 | * @mr: the memory region being queried. | |
1587 | * @addr: the address (relative to the start of the region) being queried. | |
1588 | * @size: the size of the range being queried. | |
1589 | * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA. | |
1590 | */ | |
1591 | DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, | |
1592 | hwaddr addr, | |
1593 | hwaddr size, | |
1594 | unsigned client); | |
1595 | ||
1596 | /** | |
1597 | * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty | |
1598 | * in the specified dirty bitmap snapshot. | |
1599 | * | |
1600 | * @mr: the memory region being queried. | |
1601 | * @snap: the dirty bitmap snapshot | |
1602 | * @addr: the address (relative to the start of the region) being queried. | |
1603 | * @size: the size of the range being queried. | |
1604 | */ | |
1605 | bool memory_region_snapshot_get_dirty(MemoryRegion *mr, | |
1606 | DirtyBitmapSnapshot *snap, | |
1607 | hwaddr addr, hwaddr size); | |
1608 | ||
093bc2cd AK |
1609 | /** |
1610 | * memory_region_reset_dirty: Mark a range of pages as clean, for a specified | |
1611 | * client. | |
1612 | * | |
1613 | * Marks a range of pages as no longer dirty. | |
1614 | * | |
1615 | * @mr: the region being updated. | |
1616 | * @addr: the start of the subrange being cleaned. | |
1617 | * @size: the size of the subrange being cleaned. | |
1618 | * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or | |
1619 | * %DIRTY_MEMORY_VGA. | |
1620 | */ | |
a8170e5e AK |
1621 | void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, |
1622 | hwaddr size, unsigned client); | |
093bc2cd | 1623 | |
047be4ed SH |
1624 | /** |
1625 | * memory_region_flush_rom_device: Mark a range of pages dirty and invalidate | |
1626 | * TBs (for self-modifying code). | |
1627 | * | |
1628 | * The MemoryRegionOps->write() callback of a ROM device must use this function | |
1629 | * to mark byte ranges that have been modified internally, such as by directly | |
1630 | * accessing the memory returned by memory_region_get_ram_ptr(). | |
1631 | * | |
1632 | * This function marks the range dirty and invalidates TBs so that TCG can | |
1633 | * detect self-modifying code. | |
1634 | * | |
1635 | * @mr: the region being flushed. | |
1636 | * @addr: the start, relative to the start of the region, of the range being | |
1637 | * flushed. | |
1638 | * @size: the size, in bytes, of the range being flushed. | |
1639 | */ | |
1640 | void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size); | |
1641 | ||
093bc2cd AK |
1642 | /** |
1643 | * memory_region_set_readonly: Turn a memory region read-only (or read-write) | |
1644 | * | |
1645 | * Allows a memory region to be marked as read-only (turning it into a ROM). | |
1646 | * only useful on RAM regions. | |
1647 | * | |
1648 | * @mr: the region being updated. | |
1649 | * @readonly: whether rhe region is to be ROM or RAM. | |
1650 | */ | |
1651 | void memory_region_set_readonly(MemoryRegion *mr, bool readonly); | |
1652 | ||
c26763f8 MAL |
1653 | /** |
1654 | * memory_region_set_nonvolatile: Turn a memory region non-volatile | |
1655 | * | |
1656 | * Allows a memory region to be marked as non-volatile. | |
1657 | * only useful on RAM regions. | |
1658 | * | |
1659 | * @mr: the region being updated. | |
1660 | * @nonvolatile: whether rhe region is to be non-volatile. | |
1661 | */ | |
1662 | void memory_region_set_nonvolatile(MemoryRegion *mr, bool nonvolatile); | |
1663 | ||
d0a9b5bc | 1664 | /** |
5f9a5ea1 | 1665 | * memory_region_rom_device_set_romd: enable/disable ROMD mode |
d0a9b5bc AK |
1666 | * |
1667 | * Allows a ROM device (initialized with memory_region_init_rom_device() to | |
5f9a5ea1 JK |
1668 | * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the |
1669 | * device is mapped to guest memory and satisfies read access directly. | |
1670 | * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function. | |
1671 | * Writes are always handled by the #MemoryRegion.write function. | |
d0a9b5bc AK |
1672 | * |
1673 | * @mr: the memory region to be updated | |
5f9a5ea1 | 1674 | * @romd_mode: %true to put the region into ROMD mode |
d0a9b5bc | 1675 | */ |
5f9a5ea1 | 1676 | void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode); |
d0a9b5bc | 1677 | |
093bc2cd AK |
1678 | /** |
1679 | * memory_region_set_coalescing: Enable memory coalescing for the region. | |
1680 | * | |
1681 | * Enabled writes to a region to be queued for later processing. MMIO ->write | |
1682 | * callbacks may be delayed until a non-coalesced MMIO is issued. | |
1683 | * Only useful for IO regions. Roughly similar to write-combining hardware. | |
1684 | * | |
1685 | * @mr: the memory region to be write coalesced | |
1686 | */ | |
1687 | void memory_region_set_coalescing(MemoryRegion *mr); | |
1688 | ||
1689 | /** | |
1690 | * memory_region_add_coalescing: Enable memory coalescing for a sub-range of | |
1691 | * a region. | |
1692 | * | |
1693 | * Like memory_region_set_coalescing(), but works on a sub-range of a region. | |
1694 | * Multiple calls can be issued coalesced disjoint ranges. | |
1695 | * | |
1696 | * @mr: the memory region to be updated. | |
1697 | * @offset: the start of the range within the region to be coalesced. | |
1698 | * @size: the size of the subrange to be coalesced. | |
1699 | */ | |
1700 | void memory_region_add_coalescing(MemoryRegion *mr, | |
a8170e5e | 1701 | hwaddr offset, |
093bc2cd AK |
1702 | uint64_t size); |
1703 | ||
1704 | /** | |
1705 | * memory_region_clear_coalescing: Disable MMIO coalescing for the region. | |
1706 | * | |
1707 | * Disables any coalescing caused by memory_region_set_coalescing() or | |
1708 | * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory | |
1709 | * hardware. | |
1710 | * | |
1711 | * @mr: the memory region to be updated. | |
1712 | */ | |
1713 | void memory_region_clear_coalescing(MemoryRegion *mr); | |
1714 | ||
d410515e JK |
1715 | /** |
1716 | * memory_region_set_flush_coalesced: Enforce memory coalescing flush before | |
1717 | * accesses. | |
1718 | * | |
1719 | * Ensure that pending coalesced MMIO request are flushed before the memory | |
1720 | * region is accessed. This property is automatically enabled for all regions | |
1721 | * passed to memory_region_set_coalescing() and memory_region_add_coalescing(). | |
1722 | * | |
1723 | * @mr: the memory region to be updated. | |
1724 | */ | |
1725 | void memory_region_set_flush_coalesced(MemoryRegion *mr); | |
1726 | ||
1727 | /** | |
1728 | * memory_region_clear_flush_coalesced: Disable memory coalescing flush before | |
1729 | * accesses. | |
1730 | * | |
1731 | * Clear the automatic coalesced MMIO flushing enabled via | |
1732 | * memory_region_set_flush_coalesced. Note that this service has no effect on | |
1733 | * memory regions that have MMIO coalescing enabled for themselves. For them, | |
1734 | * automatic flushing will stop once coalescing is disabled. | |
1735 | * | |
1736 | * @mr: the memory region to be updated. | |
1737 | */ | |
1738 | void memory_region_clear_flush_coalesced(MemoryRegion *mr); | |
1739 | ||
3e9d69e7 AK |
1740 | /** |
1741 | * memory_region_add_eventfd: Request an eventfd to be triggered when a word | |
1742 | * is written to a location. | |
1743 | * | |
1744 | * Marks a word in an IO region (initialized with memory_region_init_io()) | |
1745 | * as a trigger for an eventfd event. The I/O callback will not be called. | |
69ddaf66 | 1746 | * The caller must be prepared to handle failure (that is, take the required |
3e9d69e7 AK |
1747 | * action if the callback _is_ called). |
1748 | * | |
1749 | * @mr: the memory region being updated. | |
1750 | * @addr: the address within @mr that is to be monitored | |
1751 | * @size: the size of the access to trigger the eventfd | |
1752 | * @match_data: whether to match against @data, instead of just @addr | |
1753 | * @data: the data to match against the guest write | |
57914ecb | 1754 | * @e: event notifier to be triggered when @addr, @size, and @data all match. |
3e9d69e7 AK |
1755 | **/ |
1756 | void memory_region_add_eventfd(MemoryRegion *mr, | |
a8170e5e | 1757 | hwaddr addr, |
3e9d69e7 AK |
1758 | unsigned size, |
1759 | bool match_data, | |
1760 | uint64_t data, | |
753d5e14 | 1761 | EventNotifier *e); |
3e9d69e7 AK |
1762 | |
1763 | /** | |
69ddaf66 | 1764 | * memory_region_del_eventfd: Cancel an eventfd. |
3e9d69e7 | 1765 | * |
69ddaf66 ASRJ |
1766 | * Cancels an eventfd trigger requested by a previous |
1767 | * memory_region_add_eventfd() call. | |
3e9d69e7 AK |
1768 | * |
1769 | * @mr: the memory region being updated. | |
1770 | * @addr: the address within @mr that is to be monitored | |
1771 | * @size: the size of the access to trigger the eventfd | |
1772 | * @match_data: whether to match against @data, instead of just @addr | |
1773 | * @data: the data to match against the guest write | |
57914ecb | 1774 | * @e: event notifier to be triggered when @addr, @size, and @data all match. |
3e9d69e7 AK |
1775 | */ |
1776 | void memory_region_del_eventfd(MemoryRegion *mr, | |
a8170e5e | 1777 | hwaddr addr, |
3e9d69e7 AK |
1778 | unsigned size, |
1779 | bool match_data, | |
1780 | uint64_t data, | |
753d5e14 PB |
1781 | EventNotifier *e); |
1782 | ||
093bc2cd | 1783 | /** |
69ddaf66 | 1784 | * memory_region_add_subregion: Add a subregion to a container. |
093bc2cd | 1785 | * |
69ddaf66 | 1786 | * Adds a subregion at @offset. The subregion may not overlap with other |
093bc2cd AK |
1787 | * subregions (except for those explicitly marked as overlapping). A region |
1788 | * may only be added once as a subregion (unless removed with | |
1789 | * memory_region_del_subregion()); use memory_region_init_alias() if you | |
1790 | * want a region to be a subregion in multiple locations. | |
1791 | * | |
1792 | * @mr: the region to contain the new subregion; must be a container | |
1793 | * initialized with memory_region_init(). | |
1794 | * @offset: the offset relative to @mr where @subregion is added. | |
1795 | * @subregion: the subregion to be added. | |
1796 | */ | |
1797 | void memory_region_add_subregion(MemoryRegion *mr, | |
a8170e5e | 1798 | hwaddr offset, |
093bc2cd AK |
1799 | MemoryRegion *subregion); |
1800 | /** | |
1a7e8cae BZ |
1801 | * memory_region_add_subregion_overlap: Add a subregion to a container |
1802 | * with overlap. | |
093bc2cd | 1803 | * |
69ddaf66 | 1804 | * Adds a subregion at @offset. The subregion may overlap with other |
093bc2cd AK |
1805 | * subregions. Conflicts are resolved by having a higher @priority hide a |
1806 | * lower @priority. Subregions without priority are taken as @priority 0. | |
1807 | * A region may only be added once as a subregion (unless removed with | |
1808 | * memory_region_del_subregion()); use memory_region_init_alias() if you | |
1809 | * want a region to be a subregion in multiple locations. | |
1810 | * | |
1811 | * @mr: the region to contain the new subregion; must be a container | |
1812 | * initialized with memory_region_init(). | |
1813 | * @offset: the offset relative to @mr where @subregion is added. | |
1814 | * @subregion: the subregion to be added. | |
1815 | * @priority: used for resolving overlaps; highest priority wins. | |
1816 | */ | |
1817 | void memory_region_add_subregion_overlap(MemoryRegion *mr, | |
a8170e5e | 1818 | hwaddr offset, |
093bc2cd | 1819 | MemoryRegion *subregion, |
a1ff8ae0 | 1820 | int priority); |
e34911c4 AK |
1821 | |
1822 | /** | |
1823 | * memory_region_get_ram_addr: Get the ram address associated with a memory | |
1824 | * region | |
5d248213 PB |
1825 | * |
1826 | * @mr: the region to be queried | |
e34911c4 | 1827 | */ |
7ebb2745 | 1828 | ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr); |
e34911c4 | 1829 | |
a2b257d6 | 1830 | uint64_t memory_region_get_alignment(const MemoryRegion *mr); |
093bc2cd AK |
1831 | /** |
1832 | * memory_region_del_subregion: Remove a subregion. | |
1833 | * | |
1834 | * Removes a subregion from its container. | |
1835 | * | |
1836 | * @mr: the container to be updated. | |
1837 | * @subregion: the region being removed; must be a current subregion of @mr. | |
1838 | */ | |
1839 | void memory_region_del_subregion(MemoryRegion *mr, | |
1840 | MemoryRegion *subregion); | |
1841 | ||
6bba19ba AK |
1842 | /* |
1843 | * memory_region_set_enabled: dynamically enable or disable a region | |
1844 | * | |
1845 | * Enables or disables a memory region. A disabled memory region | |
1846 | * ignores all accesses to itself and its subregions. It does not | |
1847 | * obscure sibling subregions with lower priority - it simply behaves as | |
1848 | * if it was removed from the hierarchy. | |
1849 | * | |
1850 | * Regions default to being enabled. | |
1851 | * | |
1852 | * @mr: the region to be updated | |
1853 | * @enabled: whether to enable or disable the region | |
1854 | */ | |
1855 | void memory_region_set_enabled(MemoryRegion *mr, bool enabled); | |
1856 | ||
2282e1af AK |
1857 | /* |
1858 | * memory_region_set_address: dynamically update the address of a region | |
1859 | * | |
feca4ac1 | 1860 | * Dynamically updates the address of a region, relative to its container. |
2282e1af AK |
1861 | * May be used on regions are currently part of a memory hierarchy. |
1862 | * | |
1863 | * @mr: the region to be updated | |
feca4ac1 | 1864 | * @addr: new address, relative to container region |
2282e1af | 1865 | */ |
a8170e5e | 1866 | void memory_region_set_address(MemoryRegion *mr, hwaddr addr); |
2282e1af | 1867 | |
e7af4c67 MT |
1868 | /* |
1869 | * memory_region_set_size: dynamically update the size of a region. | |
1870 | * | |
1871 | * Dynamically updates the size of a region. | |
1872 | * | |
1873 | * @mr: the region to be updated | |
1874 | * @size: used size of the region. | |
1875 | */ | |
1876 | void memory_region_set_size(MemoryRegion *mr, uint64_t size); | |
1877 | ||
4703359e AK |
1878 | /* |
1879 | * memory_region_set_alias_offset: dynamically update a memory alias's offset | |
1880 | * | |
1881 | * Dynamically updates the offset into the target region that an alias points | |
1882 | * to, as if the fourth argument to memory_region_init_alias() has changed. | |
1883 | * | |
1884 | * @mr: the #MemoryRegion to be updated; should be an alias. | |
1885 | * @offset: the new offset into the target memory region | |
1886 | */ | |
1887 | void memory_region_set_alias_offset(MemoryRegion *mr, | |
a8170e5e | 1888 | hwaddr offset); |
4703359e | 1889 | |
3ce10901 | 1890 | /** |
feca4ac1 PB |
1891 | * memory_region_present: checks if an address relative to a @container |
1892 | * translates into #MemoryRegion within @container | |
3ce10901 | 1893 | * |
feca4ac1 | 1894 | * Answer whether a #MemoryRegion within @container covers the address |
3ce10901 PB |
1895 | * @addr. |
1896 | * | |
feca4ac1 PB |
1897 | * @container: a #MemoryRegion within which @addr is a relative address |
1898 | * @addr: the area within @container to be searched | |
3ce10901 | 1899 | */ |
feca4ac1 | 1900 | bool memory_region_present(MemoryRegion *container, hwaddr addr); |
3ce10901 | 1901 | |
eed2bacf IM |
1902 | /** |
1903 | * memory_region_is_mapped: returns true if #MemoryRegion is mapped | |
1904 | * into any address space. | |
1905 | * | |
1906 | * @mr: a #MemoryRegion which should be checked if it's mapped | |
1907 | */ | |
1908 | bool memory_region_is_mapped(MemoryRegion *mr); | |
1909 | ||
e2177955 | 1910 | /** |
73034e9e PB |
1911 | * memory_region_find: translate an address/size relative to a |
1912 | * MemoryRegion into a #MemoryRegionSection. | |
e2177955 | 1913 | * |
73034e9e PB |
1914 | * Locates the first #MemoryRegion within @mr that overlaps the range |
1915 | * given by @addr and @size. | |
e2177955 AK |
1916 | * |
1917 | * Returns a #MemoryRegionSection that describes a contiguous overlap. | |
1918 | * It will have the following characteristics: | |
08226b44 PB |
1919 | * - @size = 0 iff no overlap was found |
1920 | * - @mr is non-%NULL iff an overlap was found | |
e2177955 | 1921 | * |
73034e9e PB |
1922 | * Remember that in the return value the @offset_within_region is |
1923 | * relative to the returned region (in the .@mr field), not to the | |
1924 | * @mr argument. | |
1925 | * | |
1926 | * Similarly, the .@offset_within_address_space is relative to the | |
1927 | * address space that contains both regions, the passed and the | |
1928 | * returned one. However, in the special case where the @mr argument | |
feca4ac1 | 1929 | * has no container (and thus is the root of the address space), the |
73034e9e | 1930 | * following will hold: |
08226b44 PB |
1931 | * - @offset_within_address_space >= @addr |
1932 | * - @offset_within_address_space + .@size <= @addr + @size | |
73034e9e PB |
1933 | * |
1934 | * @mr: a MemoryRegion within which @addr is a relative address | |
1935 | * @addr: start of the area within @as to be searched | |
e2177955 AK |
1936 | * @size: size of the area to be searched |
1937 | */ | |
73034e9e | 1938 | MemoryRegionSection memory_region_find(MemoryRegion *mr, |
a8170e5e | 1939 | hwaddr addr, uint64_t size); |
e2177955 | 1940 | |
86e775c6 | 1941 | /** |
9c1f8f44 | 1942 | * memory_global_dirty_log_sync: synchronize the dirty log for all memory |
86e775c6 | 1943 | * |
9c1f8f44 | 1944 | * Synchronizes the dirty page log for all address spaces. |
86e775c6 | 1945 | */ |
9c1f8f44 | 1946 | void memory_global_dirty_log_sync(void); |
9458a9a1 PB |
1947 | |
1948 | /** | |
1949 | * memory_global_dirty_log_sync: synchronize the dirty log for all memory | |
1950 | * | |
1951 | * Synchronizes the vCPUs with a thread that is reading the dirty bitmap. | |
1952 | * This function must be called after the dirty log bitmap is cleared, and | |
1953 | * before dirty guest memory pages are read. If you are using | |
1954 | * #DirtyBitmapSnapshot, memory_region_snapshot_and_clear_dirty() takes | |
1955 | * care of doing this. | |
1956 | */ | |
1957 | void memory_global_after_dirty_log_sync(void); | |
86e775c6 | 1958 | |
69ddaf66 ASRJ |
1959 | /** |
1960 | * memory_region_transaction_begin: Start a transaction. | |
1961 | * | |
1962 | * During a transaction, changes will be accumulated and made visible | |
dabdf394 | 1963 | * only when the transaction ends (is committed). |
4ef4db86 AK |
1964 | */ |
1965 | void memory_region_transaction_begin(void); | |
69ddaf66 ASRJ |
1966 | |
1967 | /** | |
1968 | * memory_region_transaction_commit: Commit a transaction and make changes | |
1969 | * visible to the guest. | |
4ef4db86 AK |
1970 | */ |
1971 | void memory_region_transaction_commit(void); | |
1972 | ||
7664e80c AK |
1973 | /** |
1974 | * memory_listener_register: register callbacks to be called when memory | |
1975 | * sections are mapped or unmapped into an address | |
1976 | * space | |
1977 | * | |
1978 | * @listener: an object containing the callbacks to be called | |
7376e582 | 1979 | * @filter: if non-%NULL, only regions in this address space will be observed |
7664e80c | 1980 | */ |
f6790af6 | 1981 | void memory_listener_register(MemoryListener *listener, AddressSpace *filter); |
7664e80c AK |
1982 | |
1983 | /** | |
1984 | * memory_listener_unregister: undo the effect of memory_listener_register() | |
1985 | * | |
1986 | * @listener: an object containing the callbacks to be removed | |
1987 | */ | |
1988 | void memory_listener_unregister(MemoryListener *listener); | |
1989 | ||
1990 | /** | |
1991 | * memory_global_dirty_log_start: begin dirty logging for all regions | |
1992 | */ | |
1993 | void memory_global_dirty_log_start(void); | |
1994 | ||
1995 | /** | |
1a7e8cae | 1996 | * memory_global_dirty_log_stop: end dirty logging for all regions |
7664e80c AK |
1997 | */ |
1998 | void memory_global_dirty_log_stop(void); | |
1999 | ||
2261d393 | 2000 | void mtree_info(bool flatview, bool dispatch_tree, bool owner, bool disabled); |
314e2987 | 2001 | |
3b643495 PM |
2002 | /** |
2003 | * memory_region_dispatch_read: perform a read directly to the specified | |
2004 | * MemoryRegion. | |
2005 | * | |
2006 | * @mr: #MemoryRegion to access | |
2007 | * @addr: address within that region | |
2008 | * @pval: pointer to uint64_t which the data is written to | |
e67c9046 | 2009 | * @op: size, sign, and endianness of the memory operation |
3b643495 PM |
2010 | * @attrs: memory transaction attributes to use for the access |
2011 | */ | |
2012 | MemTxResult memory_region_dispatch_read(MemoryRegion *mr, | |
2013 | hwaddr addr, | |
2014 | uint64_t *pval, | |
e67c9046 | 2015 | MemOp op, |
3b643495 PM |
2016 | MemTxAttrs attrs); |
2017 | /** | |
2018 | * memory_region_dispatch_write: perform a write directly to the specified | |
2019 | * MemoryRegion. | |
2020 | * | |
2021 | * @mr: #MemoryRegion to access | |
2022 | * @addr: address within that region | |
2023 | * @data: data to write | |
e67c9046 | 2024 | * @op: size, sign, and endianness of the memory operation |
3b643495 PM |
2025 | * @attrs: memory transaction attributes to use for the access |
2026 | */ | |
2027 | MemTxResult memory_region_dispatch_write(MemoryRegion *mr, | |
2028 | hwaddr addr, | |
2029 | uint64_t data, | |
e67c9046 | 2030 | MemOp op, |
3b643495 PM |
2031 | MemTxAttrs attrs); |
2032 | ||
9ad2bbc1 AK |
2033 | /** |
2034 | * address_space_init: initializes an address space | |
2035 | * | |
2036 | * @as: an uninitialized #AddressSpace | |
67cc32eb | 2037 | * @root: a #MemoryRegion that routes addresses for the address space |
7dca8043 AK |
2038 | * @name: an address space name. The name is only used for debugging |
2039 | * output. | |
9ad2bbc1 | 2040 | */ |
7dca8043 | 2041 | void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name); |
9ad2bbc1 | 2042 | |
83f3c251 AK |
2043 | /** |
2044 | * address_space_destroy: destroy an address space | |
2045 | * | |
2046 | * Releases all resources associated with an address space. After an address space | |
2047 | * is destroyed, its root memory region (given by address_space_init()) may be destroyed | |
2048 | * as well. | |
2049 | * | |
2050 | * @as: address space to be destroyed | |
2051 | */ | |
2052 | void address_space_destroy(AddressSpace *as); | |
2053 | ||
a2166410 GK |
2054 | /** |
2055 | * address_space_remove_listeners: unregister all listeners of an address space | |
2056 | * | |
2057 | * Removes all callbacks previously registered with memory_listener_register() | |
2058 | * for @as. | |
2059 | * | |
2060 | * @as: an initialized #AddressSpace | |
2061 | */ | |
2062 | void address_space_remove_listeners(AddressSpace *as); | |
2063 | ||
ac1970fb AK |
2064 | /** |
2065 | * address_space_rw: read from or write to an address space. | |
2066 | * | |
5c9eb028 PM |
2067 | * Return a MemTxResult indicating whether the operation succeeded |
2068 | * or failed (eg unassigned memory, device rejected the transaction, | |
2069 | * IOMMU fault). | |
fd8aaa76 | 2070 | * |
ac1970fb AK |
2071 | * @as: #AddressSpace to be accessed |
2072 | * @addr: address within that address space | |
5c9eb028 | 2073 | * @attrs: memory transaction attributes |
ac1970fb | 2074 | * @buf: buffer with the data transferred |
57914ecb | 2075 | * @len: the number of bytes to read or write |
ac1970fb AK |
2076 | * @is_write: indicates the transfer direction |
2077 | */ | |
5c9eb028 | 2078 | MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, |
daa3dda4 | 2079 | MemTxAttrs attrs, void *buf, |
0c249ff7 | 2080 | hwaddr len, bool is_write); |
ac1970fb AK |
2081 | |
2082 | /** | |
2083 | * address_space_write: write to address space. | |
2084 | * | |
5c9eb028 PM |
2085 | * Return a MemTxResult indicating whether the operation succeeded |
2086 | * or failed (eg unassigned memory, device rejected the transaction, | |
2087 | * IOMMU fault). | |
fd8aaa76 | 2088 | * |
ac1970fb AK |
2089 | * @as: #AddressSpace to be accessed |
2090 | * @addr: address within that address space | |
5c9eb028 | 2091 | * @attrs: memory transaction attributes |
ac1970fb | 2092 | * @buf: buffer with the data transferred |
57914ecb | 2093 | * @len: the number of bytes to write |
ac1970fb | 2094 | */ |
5c9eb028 PM |
2095 | MemTxResult address_space_write(AddressSpace *as, hwaddr addr, |
2096 | MemTxAttrs attrs, | |
daa3dda4 | 2097 | const void *buf, hwaddr len); |
ac1970fb | 2098 | |
3c8133f9 PM |
2099 | /** |
2100 | * address_space_write_rom: write to address space, including ROM. | |
2101 | * | |
2102 | * This function writes to the specified address space, but will | |
2103 | * write data to both ROM and RAM. This is used for non-guest | |
2104 | * writes like writes from the gdb debug stub or initial loading | |
2105 | * of ROM contents. | |
2106 | * | |
2107 | * Note that portions of the write which attempt to write data to | |
2108 | * a device will be silently ignored -- only real RAM and ROM will | |
2109 | * be written to. | |
2110 | * | |
2111 | * Return a MemTxResult indicating whether the operation succeeded | |
2112 | * or failed (eg unassigned memory, device rejected the transaction, | |
2113 | * IOMMU fault). | |
2114 | * | |
2115 | * @as: #AddressSpace to be accessed | |
2116 | * @addr: address within that address space | |
2117 | * @attrs: memory transaction attributes | |
2118 | * @buf: buffer with the data transferred | |
2119 | * @len: the number of bytes to write | |
2120 | */ | |
2121 | MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, | |
2122 | MemTxAttrs attrs, | |
daa3dda4 | 2123 | const void *buf, hwaddr len); |
3c8133f9 | 2124 | |
3cc8f884 | 2125 | /* address_space_ld*: load from an address space |
50013115 PM |
2126 | * address_space_st*: store to an address space |
2127 | * | |
2128 | * These functions perform a load or store of the byte, word, | |
2129 | * longword or quad to the specified address within the AddressSpace. | |
2130 | * The _le suffixed functions treat the data as little endian; | |
2131 | * _be indicates big endian; no suffix indicates "same endianness | |
2132 | * as guest CPU". | |
2133 | * | |
2134 | * The "guest CPU endianness" accessors are deprecated for use outside | |
2135 | * target-* code; devices should be CPU-agnostic and use either the LE | |
2136 | * or the BE accessors. | |
2137 | * | |
2138 | * @as #AddressSpace to be accessed | |
2139 | * @addr: address within that address space | |
2140 | * @val: data value, for stores | |
2141 | * @attrs: memory transaction attributes | |
2142 | * @result: location to write the success/failure of the transaction; | |
2143 | * if NULL, this information is discarded | |
2144 | */ | |
4269c82b PB |
2145 | |
2146 | #define SUFFIX | |
2147 | #define ARG1 as | |
2148 | #define ARG1_DECL AddressSpace *as | |
0979ed01 | 2149 | #include "exec/memory_ldst.h.inc" |
4269c82b PB |
2150 | |
2151 | #define SUFFIX | |
2152 | #define ARG1 as | |
2153 | #define ARG1_DECL AddressSpace *as | |
0979ed01 | 2154 | #include "exec/memory_ldst_phys.h.inc" |
0ce265ff | 2155 | |
1f4e496e | 2156 | struct MemoryRegionCache { |
48564041 | 2157 | void *ptr; |
1f4e496e | 2158 | hwaddr xlat; |
1f4e496e | 2159 | hwaddr len; |
48564041 PB |
2160 | FlatView *fv; |
2161 | MemoryRegionSection mrs; | |
2162 | bool is_write; | |
1f4e496e PB |
2163 | }; |
2164 | ||
48564041 PB |
2165 | #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .mrs.mr = NULL }) |
2166 | ||
5eba0404 | 2167 | |
4269c82b PB |
2168 | /* address_space_ld*_cached: load from a cached #MemoryRegion |
2169 | * address_space_st*_cached: store into a cached #MemoryRegion | |
2170 | * | |
2171 | * These functions perform a load or store of the byte, word, | |
2172 | * longword or quad to the specified address. The address is | |
2173 | * a physical address in the AddressSpace, but it must lie within | |
2174 | * a #MemoryRegion that was mapped with address_space_cache_init. | |
2175 | * | |
2176 | * The _le suffixed functions treat the data as little endian; | |
2177 | * _be indicates big endian; no suffix indicates "same endianness | |
2178 | * as guest CPU". | |
2179 | * | |
2180 | * The "guest CPU endianness" accessors are deprecated for use outside | |
2181 | * target-* code; devices should be CPU-agnostic and use either the LE | |
2182 | * or the BE accessors. | |
2183 | * | |
2184 | * @cache: previously initialized #MemoryRegionCache to be accessed | |
2185 | * @addr: address within the address space | |
2186 | * @val: data value, for stores | |
2187 | * @attrs: memory transaction attributes | |
2188 | * @result: location to write the success/failure of the transaction; | |
2189 | * if NULL, this information is discarded | |
2190 | */ | |
2191 | ||
48564041 | 2192 | #define SUFFIX _cached_slow |
4269c82b PB |
2193 | #define ARG1 cache |
2194 | #define ARG1_DECL MemoryRegionCache *cache | |
0979ed01 | 2195 | #include "exec/memory_ldst.h.inc" |
4269c82b | 2196 | |
48564041 PB |
2197 | /* Inline fast path for direct RAM access. */ |
2198 | static inline uint8_t address_space_ldub_cached(MemoryRegionCache *cache, | |
2199 | hwaddr addr, MemTxAttrs attrs, MemTxResult *result) | |
2200 | { | |
2201 | assert(addr < cache->len); | |
2202 | if (likely(cache->ptr)) { | |
2203 | return ldub_p(cache->ptr + addr); | |
2204 | } else { | |
2205 | return address_space_ldub_cached_slow(cache, addr, attrs, result); | |
2206 | } | |
2207 | } | |
2208 | ||
2209 | static inline void address_space_stb_cached(MemoryRegionCache *cache, | |
2210 | hwaddr addr, uint32_t val, MemTxAttrs attrs, MemTxResult *result) | |
2211 | { | |
2212 | assert(addr < cache->len); | |
2213 | if (likely(cache->ptr)) { | |
2214 | stb_p(cache->ptr + addr, val); | |
2215 | } else { | |
2216 | address_space_stb_cached_slow(cache, addr, val, attrs, result); | |
2217 | } | |
2218 | } | |
2219 | ||
2220 | #define ENDIANNESS _le | |
0979ed01 | 2221 | #include "exec/memory_ldst_cached.h.inc" |
48564041 PB |
2222 | |
2223 | #define ENDIANNESS _be | |
0979ed01 | 2224 | #include "exec/memory_ldst_cached.h.inc" |
48564041 | 2225 | |
4269c82b PB |
2226 | #define SUFFIX _cached |
2227 | #define ARG1 cache | |
2228 | #define ARG1_DECL MemoryRegionCache *cache | |
0979ed01 | 2229 | #include "exec/memory_ldst_phys.h.inc" |
4269c82b | 2230 | |
1f4e496e PB |
2231 | /* address_space_cache_init: prepare for repeated access to a physical |
2232 | * memory region | |
2233 | * | |
2234 | * @cache: #MemoryRegionCache to be filled | |
2235 | * @as: #AddressSpace to be accessed | |
2236 | * @addr: address within that address space | |
2237 | * @len: length of buffer | |
2238 | * @is_write: indicates the transfer direction | |
2239 | * | |
2240 | * Will only work with RAM, and may map a subset of the requested range by | |
2241 | * returning a value that is less than @len. On failure, return a negative | |
2242 | * errno value. | |
2243 | * | |
2244 | * Because it only works with RAM, this function can be used for | |
2245 | * read-modify-write operations. In this case, is_write should be %true. | |
2246 | * | |
2247 | * Note that addresses passed to the address_space_*_cached functions | |
2248 | * are relative to @addr. | |
2249 | */ | |
2250 | int64_t address_space_cache_init(MemoryRegionCache *cache, | |
2251 | AddressSpace *as, | |
2252 | hwaddr addr, | |
2253 | hwaddr len, | |
2254 | bool is_write); | |
2255 | ||
2256 | /** | |
2257 | * address_space_cache_invalidate: complete a write to a #MemoryRegionCache | |
2258 | * | |
2259 | * @cache: The #MemoryRegionCache to operate on. | |
2260 | * @addr: The first physical address that was written, relative to the | |
2261 | * address that was passed to @address_space_cache_init. | |
2262 | * @access_len: The number of bytes that were written starting at @addr. | |
2263 | */ | |
2264 | void address_space_cache_invalidate(MemoryRegionCache *cache, | |
2265 | hwaddr addr, | |
2266 | hwaddr access_len); | |
2267 | ||
2268 | /** | |
2269 | * address_space_cache_destroy: free a #MemoryRegionCache | |
2270 | * | |
2271 | * @cache: The #MemoryRegionCache whose memory should be released. | |
2272 | */ | |
2273 | void address_space_cache_destroy(MemoryRegionCache *cache); | |
2274 | ||
052c8fa9 JW |
2275 | /* address_space_get_iotlb_entry: translate an address into an IOTLB |
2276 | * entry. Should be called from an RCU critical section. | |
2277 | */ | |
2278 | IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr, | |
7446eb07 | 2279 | bool is_write, MemTxAttrs attrs); |
1f4e496e | 2280 | |
149f54b5 | 2281 | /* address_space_translate: translate an address range into an address space |
41063e1e PB |
2282 | * into a MemoryRegion and an address range into that section. Should be |
2283 | * called from an RCU critical section, to avoid that the last reference | |
2284 | * to the returned region disappears after address_space_translate returns. | |
149f54b5 | 2285 | * |
57914ecb | 2286 | * @fv: #FlatView to be accessed |
149f54b5 PB |
2287 | * @addr: address within that address space |
2288 | * @xlat: pointer to address within the returned memory region section's | |
2289 | * #MemoryRegion. | |
2290 | * @len: pointer to length | |
2291 | * @is_write: indicates the transfer direction | |
bc6b1cec | 2292 | * @attrs: memory attributes |
149f54b5 | 2293 | */ |
16620684 AK |
2294 | MemoryRegion *flatview_translate(FlatView *fv, |
2295 | hwaddr addr, hwaddr *xlat, | |
efa99a2f PM |
2296 | hwaddr *len, bool is_write, |
2297 | MemTxAttrs attrs); | |
16620684 AK |
2298 | |
2299 | static inline MemoryRegion *address_space_translate(AddressSpace *as, | |
2300 | hwaddr addr, hwaddr *xlat, | |
bc6b1cec PM |
2301 | hwaddr *len, bool is_write, |
2302 | MemTxAttrs attrs) | |
16620684 AK |
2303 | { |
2304 | return flatview_translate(address_space_to_flatview(as), | |
efa99a2f | 2305 | addr, xlat, len, is_write, attrs); |
16620684 | 2306 | } |
149f54b5 | 2307 | |
51644ab7 PB |
2308 | /* address_space_access_valid: check for validity of accessing an address |
2309 | * space range | |
2310 | * | |
30951157 AK |
2311 | * Check whether memory is assigned to the given address space range, and |
2312 | * access is permitted by any IOMMU regions that are active for the address | |
2313 | * space. | |
51644ab7 PB |
2314 | * |
2315 | * For now, addr and len should be aligned to a page size. This limitation | |
2316 | * will be lifted in the future. | |
2317 | * | |
2318 | * @as: #AddressSpace to be accessed | |
2319 | * @addr: address within that address space | |
2320 | * @len: length of the area to be checked | |
2321 | * @is_write: indicates the transfer direction | |
fddffa42 | 2322 | * @attrs: memory attributes |
51644ab7 | 2323 | */ |
0c249ff7 | 2324 | bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len, |
fddffa42 | 2325 | bool is_write, MemTxAttrs attrs); |
51644ab7 | 2326 | |
ac1970fb AK |
2327 | /* address_space_map: map a physical memory region into a host virtual address |
2328 | * | |
2329 | * May map a subset of the requested range, given by and returned in @plen. | |
77f55eac PP |
2330 | * May return %NULL and set *@plen to zero(0), if resources needed to perform |
2331 | * the mapping are exhausted. | |
ac1970fb AK |
2332 | * Use only for reads OR writes - not for read-modify-write operations. |
2333 | * Use cpu_register_map_client() to know when retrying the map operation is | |
2334 | * likely to succeed. | |
2335 | * | |
2336 | * @as: #AddressSpace to be accessed | |
2337 | * @addr: address within that address space | |
2338 | * @plen: pointer to length of buffer; updated on return | |
2339 | * @is_write: indicates the transfer direction | |
f26404fb | 2340 | * @attrs: memory attributes |
ac1970fb | 2341 | */ |
a8170e5e | 2342 | void *address_space_map(AddressSpace *as, hwaddr addr, |
f26404fb | 2343 | hwaddr *plen, bool is_write, MemTxAttrs attrs); |
ac1970fb AK |
2344 | |
2345 | /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map() | |
2346 | * | |
2347 | * Will also mark the memory as dirty if @is_write == %true. @access_len gives | |
2348 | * the amount of memory that was actually read or written by the caller. | |
2349 | * | |
2350 | * @as: #AddressSpace used | |
57914ecb | 2351 | * @buffer: host pointer as returned by address_space_map() |
ac1970fb AK |
2352 | * @len: buffer length as returned by address_space_map() |
2353 | * @access_len: amount of data actually transferred | |
2354 | * @is_write: indicates the transfer direction | |
2355 | */ | |
a8170e5e | 2356 | void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, |
ae5883ab | 2357 | bool is_write, hwaddr access_len); |
ac1970fb AK |
2358 | |
2359 | ||
a203ac70 | 2360 | /* Internal functions, part of the implementation of address_space_read. */ |
b2a44fca | 2361 | MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, |
daa3dda4 | 2362 | MemTxAttrs attrs, void *buf, hwaddr len); |
16620684 | 2363 | MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, |
a152be43 | 2364 | MemTxAttrs attrs, void *buf, |
0c249ff7 | 2365 | hwaddr len, hwaddr addr1, hwaddr l, |
16620684 | 2366 | MemoryRegion *mr); |
0878d0e1 | 2367 | void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr); |
3cc8f884 | 2368 | |
48564041 PB |
2369 | /* Internal functions, part of the implementation of address_space_read_cached |
2370 | * and address_space_write_cached. */ | |
38df19fa PMD |
2371 | MemTxResult address_space_read_cached_slow(MemoryRegionCache *cache, |
2372 | hwaddr addr, void *buf, hwaddr len); | |
2373 | MemTxResult address_space_write_cached_slow(MemoryRegionCache *cache, | |
2374 | hwaddr addr, const void *buf, | |
2375 | hwaddr len); | |
48564041 | 2376 | |
3cc8f884 PB |
2377 | static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) |
2378 | { | |
2379 | if (is_write) { | |
d489ae4a AD |
2380 | return memory_region_is_ram(mr) && !mr->readonly && |
2381 | !mr->rom_device && !memory_region_is_ram_device(mr); | |
3cc8f884 | 2382 | } else { |
4a2e242b AW |
2383 | return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) || |
2384 | memory_region_is_romd(mr); | |
3cc8f884 | 2385 | } |
3cc8f884 PB |
2386 | } |
2387 | ||
2388 | /** | |
2389 | * address_space_read: read from an address space. | |
2390 | * | |
2391 | * Return a MemTxResult indicating whether the operation succeeded | |
2392 | * or failed (eg unassigned memory, device rejected the transaction, | |
b2a44fca | 2393 | * IOMMU fault). Called within RCU critical section. |
3cc8f884 | 2394 | * |
b2a44fca | 2395 | * @as: #AddressSpace to be accessed |
3cc8f884 PB |
2396 | * @addr: address within that address space |
2397 | * @attrs: memory transaction attributes | |
2398 | * @buf: buffer with the data transferred | |
5d248213 | 2399 | * @len: length of the data transferred |
3cc8f884 PB |
2400 | */ |
2401 | static inline __attribute__((__always_inline__)) | |
b2a44fca | 2402 | MemTxResult address_space_read(AddressSpace *as, hwaddr addr, |
daa3dda4 | 2403 | MemTxAttrs attrs, void *buf, |
0c249ff7 | 2404 | hwaddr len) |
3cc8f884 PB |
2405 | { |
2406 | MemTxResult result = MEMTX_OK; | |
2407 | hwaddr l, addr1; | |
2408 | void *ptr; | |
2409 | MemoryRegion *mr; | |
b2a44fca | 2410 | FlatView *fv; |
3cc8f884 PB |
2411 | |
2412 | if (__builtin_constant_p(len)) { | |
2413 | if (len) { | |
293a733d | 2414 | RCU_READ_LOCK_GUARD(); |
b2a44fca | 2415 | fv = address_space_to_flatview(as); |
3cc8f884 | 2416 | l = len; |
efa99a2f | 2417 | mr = flatview_translate(fv, addr, &addr1, &l, false, attrs); |
3cc8f884 | 2418 | if (len == l && memory_access_is_direct(mr, false)) { |
0878d0e1 | 2419 | ptr = qemu_map_ram_ptr(mr->ram_block, addr1); |
3cc8f884 PB |
2420 | memcpy(buf, ptr, len); |
2421 | } else { | |
16620684 AK |
2422 | result = flatview_read_continue(fv, addr, attrs, buf, len, |
2423 | addr1, l, mr); | |
3cc8f884 | 2424 | } |
3cc8f884 PB |
2425 | } |
2426 | } else { | |
b2a44fca | 2427 | result = address_space_read_full(as, addr, attrs, buf, len); |
3cc8f884 PB |
2428 | } |
2429 | return result; | |
2430 | } | |
a203ac70 | 2431 | |
1f4e496e PB |
2432 | /** |
2433 | * address_space_read_cached: read from a cached RAM region | |
2434 | * | |
2435 | * @cache: Cached region to be addressed | |
2436 | * @addr: address relative to the base of the RAM region | |
2437 | * @buf: buffer with the data transferred | |
2438 | * @len: length of the data transferred | |
2439 | */ | |
38df19fa | 2440 | static inline MemTxResult |
1f4e496e | 2441 | address_space_read_cached(MemoryRegionCache *cache, hwaddr addr, |
0c249ff7 | 2442 | void *buf, hwaddr len) |
1f4e496e PB |
2443 | { |
2444 | assert(addr < cache->len && len <= cache->len - addr); | |
48564041 PB |
2445 | if (likely(cache->ptr)) { |
2446 | memcpy(buf, cache->ptr + addr, len); | |
38df19fa | 2447 | return MEMTX_OK; |
48564041 | 2448 | } else { |
38df19fa | 2449 | return address_space_read_cached_slow(cache, addr, buf, len); |
48564041 | 2450 | } |
1f4e496e PB |
2451 | } |
2452 | ||
2453 | /** | |
2454 | * address_space_write_cached: write to a cached RAM region | |
2455 | * | |
2456 | * @cache: Cached region to be addressed | |
2457 | * @addr: address relative to the base of the RAM region | |
2458 | * @buf: buffer with the data transferred | |
2459 | * @len: length of the data transferred | |
2460 | */ | |
38df19fa | 2461 | static inline MemTxResult |
1f4e496e | 2462 | address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, |
daa3dda4 | 2463 | const void *buf, hwaddr len) |
1f4e496e PB |
2464 | { |
2465 | assert(addr < cache->len && len <= cache->len - addr); | |
48564041 PB |
2466 | if (likely(cache->ptr)) { |
2467 | memcpy(cache->ptr + addr, buf, len); | |
38df19fa | 2468 | return MEMTX_OK; |
48564041 | 2469 | } else { |
38df19fa | 2470 | return address_space_write_cached_slow(cache, addr, buf, len); |
48564041 | 2471 | } |
1f4e496e PB |
2472 | } |
2473 | ||
7a3df11c | 2474 | #ifdef NEED_CPU_H |
d5d680ca | 2475 | /* enum device_endian to MemOp. */ |
7a3df11c PB |
2476 | static inline MemOp devend_memop(enum device_endian end) |
2477 | { | |
2478 | QEMU_BUILD_BUG_ON(DEVICE_HOST_ENDIAN != DEVICE_LITTLE_ENDIAN && | |
2479 | DEVICE_HOST_ENDIAN != DEVICE_BIG_ENDIAN); | |
2480 | ||
2481 | #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) | |
2482 | /* Swap if non-host endianness or native (target) endianness */ | |
2483 | return (end == DEVICE_HOST_ENDIAN) ? 0 : MO_BSWAP; | |
2484 | #else | |
2485 | const int non_host_endianness = | |
2486 | DEVICE_LITTLE_ENDIAN ^ DEVICE_BIG_ENDIAN ^ DEVICE_HOST_ENDIAN; | |
2487 | ||
2488 | /* In this case, native (target) endianness needs no swap. */ | |
2489 | return (end == non_host_endianness) ? MO_BSWAP : 0; | |
2490 | #endif | |
2491 | } | |
2492 | #endif | |
d5d680ca | 2493 | |
d24f31db DH |
2494 | /* |
2495 | * Inhibit technologies that require discarding of pages in RAM blocks, e.g., | |
2496 | * to manage the actual amount of memory consumed by the VM (then, the memory | |
2497 | * provided by RAM blocks might be bigger than the desired memory consumption). | |
2498 | * This *must* be set if: | |
2499 | * - Discarding parts of a RAM blocks does not result in the change being | |
2500 | * reflected in the VM and the pages getting freed. | |
2501 | * - All memory in RAM blocks is pinned or duplicated, invaldiating any previous | |
2502 | * discards blindly. | |
2503 | * - Discarding parts of a RAM blocks will result in integrity issues (e.g., | |
2504 | * encrypted VMs). | |
2505 | * Technologies that only temporarily pin the current working set of a | |
2506 | * driver are fine, because we don't expect such pages to be discarded | |
2507 | * (esp. based on guest action like balloon inflation). | |
2508 | * | |
2509 | * This is *not* to be used to protect from concurrent discards (esp., | |
2510 | * postcopy). | |
2511 | * | |
2512 | * Returns 0 if successful. Returns -EBUSY if a technology that relies on | |
2513 | * discards to work reliably is active. | |
2514 | */ | |
2515 | int ram_block_discard_disable(bool state); | |
2516 | ||
2517 | /* | |
2518 | * Inhibit technologies that disable discarding of pages in RAM blocks. | |
2519 | * | |
2520 | * Returns 0 if successful. Returns -EBUSY if discards are already set to | |
2521 | * broken. | |
2522 | */ | |
2523 | int ram_block_discard_require(bool state); | |
2524 | ||
2525 | /* | |
2526 | * Test if discarding of memory in ram blocks is disabled. | |
2527 | */ | |
2528 | bool ram_block_discard_is_disabled(void); | |
2529 | ||
2530 | /* | |
2531 | * Test if discarding of memory in ram blocks is required to work reliably. | |
2532 | */ | |
2533 | bool ram_block_discard_is_required(void); | |
2534 | ||
093bc2cd AK |
2535 | #endif |
2536 | ||
2537 | #endif |