]> git.proxmox.com Git - mirror_qemu.git/blob - include/exec/memory.h
Merge remote-tracking branch 'remotes/ehabkost/tags/x86-and-machine-pull-request...
[mirror_qemu.git] / include / exec / memory.h
1 /*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef MEMORY_H
15 #define MEMORY_H
16
17 #ifndef CONFIG_USER_ONLY
18
19 #include "exec/cpu-common.h"
20 #include "exec/hwaddr.h"
21 #include "exec/memattrs.h"
22 #include "exec/ramlist.h"
23 #include "qemu/queue.h"
24 #include "qemu/int128.h"
25 #include "qemu/notify.h"
26 #include "qom/object.h"
27 #include "qemu/rcu.h"
28
29 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
30
31 #define MAX_PHYS_ADDR_SPACE_BITS 62
32 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
33
34 #define TYPE_MEMORY_REGION "qemu:memory-region"
35 #define MEMORY_REGION(obj) \
36 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
37
38 typedef struct MemoryRegionOps MemoryRegionOps;
39 typedef struct MemoryRegionMmio MemoryRegionMmio;
40
41 struct MemoryRegionMmio {
42 CPUReadMemoryFunc *read[3];
43 CPUWriteMemoryFunc *write[3];
44 };
45
46 typedef struct IOMMUTLBEntry IOMMUTLBEntry;
47
48 /* See address_space_translate: bit 0 is read, bit 1 is write. */
49 typedef enum {
50 IOMMU_NONE = 0,
51 IOMMU_RO = 1,
52 IOMMU_WO = 2,
53 IOMMU_RW = 3,
54 } IOMMUAccessFlags;
55
56 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
57
58 struct IOMMUTLBEntry {
59 AddressSpace *target_as;
60 hwaddr iova;
61 hwaddr translated_addr;
62 hwaddr addr_mask; /* 0xfff = 4k translation */
63 IOMMUAccessFlags perm;
64 };
65
66 /*
67 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
68 * register with one or multiple IOMMU Notifier capability bit(s).
69 */
70 typedef enum {
71 IOMMU_NOTIFIER_NONE = 0,
72 /* Notify cache invalidations */
73 IOMMU_NOTIFIER_UNMAP = 0x1,
74 /* Notify entry changes (newly created entries) */
75 IOMMU_NOTIFIER_MAP = 0x2,
76 } IOMMUNotifierFlag;
77
78 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
79
80 struct IOMMUNotifier;
81 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
82 IOMMUTLBEntry *data);
83
84 struct IOMMUNotifier {
85 IOMMUNotify notify;
86 IOMMUNotifierFlag notifier_flags;
87 /* Notify for address space range start <= addr <= end */
88 hwaddr start;
89 hwaddr end;
90 QLIST_ENTRY(IOMMUNotifier) node;
91 };
92 typedef struct IOMMUNotifier IOMMUNotifier;
93
94 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
95 IOMMUNotifierFlag flags,
96 hwaddr start, hwaddr end)
97 {
98 n->notify = fn;
99 n->notifier_flags = flags;
100 n->start = start;
101 n->end = end;
102 }
103
104 /* New-style MMIO accessors can indicate that the transaction failed.
105 * A zero (MEMTX_OK) response means success; anything else is a failure
106 * of some kind. The memory subsystem will bitwise-OR together results
107 * if it is synthesizing an operation from multiple smaller accesses.
108 */
109 #define MEMTX_OK 0
110 #define MEMTX_ERROR (1U << 0) /* device returned an error */
111 #define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */
112 typedef uint32_t MemTxResult;
113
114 /*
115 * Memory region callbacks
116 */
117 struct MemoryRegionOps {
118 /* Read from the memory region. @addr is relative to @mr; @size is
119 * in bytes. */
120 uint64_t (*read)(void *opaque,
121 hwaddr addr,
122 unsigned size);
123 /* Write to the memory region. @addr is relative to @mr; @size is
124 * in bytes. */
125 void (*write)(void *opaque,
126 hwaddr addr,
127 uint64_t data,
128 unsigned size);
129
130 MemTxResult (*read_with_attrs)(void *opaque,
131 hwaddr addr,
132 uint64_t *data,
133 unsigned size,
134 MemTxAttrs attrs);
135 MemTxResult (*write_with_attrs)(void *opaque,
136 hwaddr addr,
137 uint64_t data,
138 unsigned size,
139 MemTxAttrs attrs);
140
141 enum device_endian endianness;
142 /* Guest-visible constraints: */
143 struct {
144 /* If nonzero, specify bounds on access sizes beyond which a machine
145 * check is thrown.
146 */
147 unsigned min_access_size;
148 unsigned max_access_size;
149 /* If true, unaligned accesses are supported. Otherwise unaligned
150 * accesses throw machine checks.
151 */
152 bool unaligned;
153 /*
154 * If present, and returns #false, the transaction is not accepted
155 * by the device (and results in machine dependent behaviour such
156 * as a machine check exception).
157 */
158 bool (*accepts)(void *opaque, hwaddr addr,
159 unsigned size, bool is_write);
160 } valid;
161 /* Internal implementation constraints: */
162 struct {
163 /* If nonzero, specifies the minimum size implemented. Smaller sizes
164 * will be rounded upwards and a partial result will be returned.
165 */
166 unsigned min_access_size;
167 /* If nonzero, specifies the maximum size implemented. Larger sizes
168 * will be done as a series of accesses with smaller sizes.
169 */
170 unsigned max_access_size;
171 /* If true, unaligned accesses are supported. Otherwise all accesses
172 * are converted to (possibly multiple) naturally aligned accesses.
173 */
174 bool unaligned;
175 } impl;
176
177 /* If .read and .write are not present, old_mmio may be used for
178 * backwards compatibility with old mmio registration
179 */
180 const MemoryRegionMmio old_mmio;
181 };
182
183 typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps;
184
185 struct MemoryRegionIOMMUOps {
186 /*
187 * Return a TLB entry that contains a given address. Flag should
188 * be the access permission of this translation operation. We can
189 * set flag to IOMMU_NONE to mean that we don't need any
190 * read/write permission checks, like, when for region replay.
191 */
192 IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr,
193 IOMMUAccessFlags flag);
194 /* Returns minimum supported page size */
195 uint64_t (*get_min_page_size)(MemoryRegion *iommu);
196 /* Called when IOMMU Notifier flag changed */
197 void (*notify_flag_changed)(MemoryRegion *iommu,
198 IOMMUNotifierFlag old_flags,
199 IOMMUNotifierFlag new_flags);
200 /* Set this up to provide customized IOMMU replay function */
201 void (*replay)(MemoryRegion *iommu, IOMMUNotifier *notifier);
202 };
203
204 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
205 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
206
207 struct MemoryRegion {
208 Object parent_obj;
209
210 /* All fields are private - violators will be prosecuted */
211
212 /* The following fields should fit in a cache line */
213 bool romd_mode;
214 bool ram;
215 bool subpage;
216 bool readonly; /* For RAM regions */
217 bool rom_device;
218 bool flush_coalesced_mmio;
219 bool global_locking;
220 uint8_t dirty_log_mask;
221 RAMBlock *ram_block;
222 Object *owner;
223 const MemoryRegionIOMMUOps *iommu_ops;
224
225 const MemoryRegionOps *ops;
226 void *opaque;
227 MemoryRegion *container;
228 Int128 size;
229 hwaddr addr;
230 void (*destructor)(MemoryRegion *mr);
231 uint64_t align;
232 bool terminates;
233 bool ram_device;
234 bool enabled;
235 bool warning_printed; /* For reservations */
236 uint8_t vga_logging_count;
237 MemoryRegion *alias;
238 hwaddr alias_offset;
239 int32_t priority;
240 QTAILQ_HEAD(subregions, MemoryRegion) subregions;
241 QTAILQ_ENTRY(MemoryRegion) subregions_link;
242 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
243 const char *name;
244 unsigned ioeventfd_nb;
245 MemoryRegionIoeventfd *ioeventfds;
246 QLIST_HEAD(, IOMMUNotifier) iommu_notify;
247 IOMMUNotifierFlag iommu_notify_flags;
248 };
249
250 #define IOMMU_NOTIFIER_FOREACH(n, mr) \
251 QLIST_FOREACH((n), &(mr)->iommu_notify, node)
252
253 /**
254 * MemoryListener: callbacks structure for updates to the physical memory map
255 *
256 * Allows a component to adjust to changes in the guest-visible memory map.
257 * Use with memory_listener_register() and memory_listener_unregister().
258 */
259 struct MemoryListener {
260 void (*begin)(MemoryListener *listener);
261 void (*commit)(MemoryListener *listener);
262 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
263 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
264 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
265 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
266 int old, int new);
267 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
268 int old, int new);
269 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
270 void (*log_global_start)(MemoryListener *listener);
271 void (*log_global_stop)(MemoryListener *listener);
272 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
273 bool match_data, uint64_t data, EventNotifier *e);
274 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
275 bool match_data, uint64_t data, EventNotifier *e);
276 void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
277 hwaddr addr, hwaddr len);
278 void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
279 hwaddr addr, hwaddr len);
280 /* Lower = earlier (during add), later (during del) */
281 unsigned priority;
282 AddressSpace *address_space;
283 QTAILQ_ENTRY(MemoryListener) link;
284 QTAILQ_ENTRY(MemoryListener) link_as;
285 };
286
287 /**
288 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
289 */
290 struct AddressSpace {
291 /* All fields are private. */
292 struct rcu_head rcu;
293 char *name;
294 MemoryRegion *root;
295 int ref_count;
296 bool malloced;
297
298 /* Accessed via RCU. */
299 struct FlatView *current_map;
300
301 int ioeventfd_nb;
302 struct MemoryRegionIoeventfd *ioeventfds;
303 struct AddressSpaceDispatch *dispatch;
304 struct AddressSpaceDispatch *next_dispatch;
305 MemoryListener dispatch_listener;
306 QTAILQ_HEAD(memory_listeners_as, MemoryListener) listeners;
307 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
308 };
309
310 /**
311 * MemoryRegionSection: describes a fragment of a #MemoryRegion
312 *
313 * @mr: the region, or %NULL if empty
314 * @address_space: the address space the region is mapped in
315 * @offset_within_region: the beginning of the section, relative to @mr's start
316 * @size: the size of the section; will not exceed @mr's boundaries
317 * @offset_within_address_space: the address of the first byte of the section
318 * relative to the region's address space
319 * @readonly: writes to this section are ignored
320 */
321 struct MemoryRegionSection {
322 MemoryRegion *mr;
323 AddressSpace *address_space;
324 hwaddr offset_within_region;
325 Int128 size;
326 hwaddr offset_within_address_space;
327 bool readonly;
328 };
329
330 /**
331 * memory_region_init: Initialize a memory region
332 *
333 * The region typically acts as a container for other memory regions. Use
334 * memory_region_add_subregion() to add subregions.
335 *
336 * @mr: the #MemoryRegion to be initialized
337 * @owner: the object that tracks the region's reference count
338 * @name: used for debugging; not visible to the user or ABI
339 * @size: size of the region; any subregions beyond this size will be clipped
340 */
341 void memory_region_init(MemoryRegion *mr,
342 struct Object *owner,
343 const char *name,
344 uint64_t size);
345
346 /**
347 * memory_region_ref: Add 1 to a memory region's reference count
348 *
349 * Whenever memory regions are accessed outside the BQL, they need to be
350 * preserved against hot-unplug. MemoryRegions actually do not have their
351 * own reference count; they piggyback on a QOM object, their "owner".
352 * This function adds a reference to the owner.
353 *
354 * All MemoryRegions must have an owner if they can disappear, even if the
355 * device they belong to operates exclusively under the BQL. This is because
356 * the region could be returned at any time by memory_region_find, and this
357 * is usually under guest control.
358 *
359 * @mr: the #MemoryRegion
360 */
361 void memory_region_ref(MemoryRegion *mr);
362
363 /**
364 * memory_region_unref: Remove 1 to a memory region's reference count
365 *
366 * Whenever memory regions are accessed outside the BQL, they need to be
367 * preserved against hot-unplug. MemoryRegions actually do not have their
368 * own reference count; they piggyback on a QOM object, their "owner".
369 * This function removes a reference to the owner and possibly destroys it.
370 *
371 * @mr: the #MemoryRegion
372 */
373 void memory_region_unref(MemoryRegion *mr);
374
375 /**
376 * memory_region_init_io: Initialize an I/O memory region.
377 *
378 * Accesses into the region will cause the callbacks in @ops to be called.
379 * if @size is nonzero, subregions will be clipped to @size.
380 *
381 * @mr: the #MemoryRegion to be initialized.
382 * @owner: the object that tracks the region's reference count
383 * @ops: a structure containing read and write callbacks to be used when
384 * I/O is performed on the region.
385 * @opaque: passed to the read and write callbacks of the @ops structure.
386 * @name: used for debugging; not visible to the user or ABI
387 * @size: size of the region.
388 */
389 void memory_region_init_io(MemoryRegion *mr,
390 struct Object *owner,
391 const MemoryRegionOps *ops,
392 void *opaque,
393 const char *name,
394 uint64_t size);
395
396 /**
397 * memory_region_init_ram: Initialize RAM memory region. Accesses into the
398 * region will modify memory directly.
399 *
400 * @mr: the #MemoryRegion to be initialized.
401 * @owner: the object that tracks the region's reference count
402 * @name: Region name, becomes part of RAMBlock name used in migration stream
403 * must be unique within any device
404 * @size: size of the region.
405 * @errp: pointer to Error*, to store an error if it happens.
406 */
407 void memory_region_init_ram(MemoryRegion *mr,
408 struct Object *owner,
409 const char *name,
410 uint64_t size,
411 Error **errp);
412
413 /**
414 * memory_region_init_resizeable_ram: Initialize memory region with resizeable
415 * RAM. Accesses into the region will
416 * modify memory directly. Only an initial
417 * portion of this RAM is actually used.
418 * The used size can change across reboots.
419 *
420 * @mr: the #MemoryRegion to be initialized.
421 * @owner: the object that tracks the region's reference count
422 * @name: Region name, becomes part of RAMBlock name used in migration stream
423 * must be unique within any device
424 * @size: used size of the region.
425 * @max_size: max size of the region.
426 * @resized: callback to notify owner about used size change.
427 * @errp: pointer to Error*, to store an error if it happens.
428 */
429 void memory_region_init_resizeable_ram(MemoryRegion *mr,
430 struct Object *owner,
431 const char *name,
432 uint64_t size,
433 uint64_t max_size,
434 void (*resized)(const char*,
435 uint64_t length,
436 void *host),
437 Error **errp);
438 #ifdef __linux__
439 /**
440 * memory_region_init_ram_from_file: Initialize RAM memory region with a
441 * mmap-ed backend.
442 *
443 * @mr: the #MemoryRegion to be initialized.
444 * @owner: the object that tracks the region's reference count
445 * @name: Region name, becomes part of RAMBlock name used in migration stream
446 * must be unique within any device
447 * @size: size of the region.
448 * @share: %true if memory must be mmaped with the MAP_SHARED flag
449 * @path: the path in which to allocate the RAM.
450 * @errp: pointer to Error*, to store an error if it happens.
451 */
452 void memory_region_init_ram_from_file(MemoryRegion *mr,
453 struct Object *owner,
454 const char *name,
455 uint64_t size,
456 bool share,
457 const char *path,
458 Error **errp);
459 #endif
460
461 /**
462 * memory_region_init_ram_ptr: Initialize RAM memory region from a
463 * user-provided pointer. Accesses into the
464 * region will modify memory directly.
465 *
466 * @mr: the #MemoryRegion to be initialized.
467 * @owner: the object that tracks the region's reference count
468 * @name: Region name, becomes part of RAMBlock name used in migration stream
469 * must be unique within any device
470 * @size: size of the region.
471 * @ptr: memory to be mapped; must contain at least @size bytes.
472 */
473 void memory_region_init_ram_ptr(MemoryRegion *mr,
474 struct Object *owner,
475 const char *name,
476 uint64_t size,
477 void *ptr);
478
479 /**
480 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
481 * a user-provided pointer.
482 *
483 * A RAM device represents a mapping to a physical device, such as to a PCI
484 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
485 * into the VM address space and access to the region will modify memory
486 * directly. However, the memory region should not be included in a memory
487 * dump (device may not be enabled/mapped at the time of the dump), and
488 * operations incompatible with manipulating MMIO should be avoided. Replaces
489 * skip_dump flag.
490 *
491 * @mr: the #MemoryRegion to be initialized.
492 * @owner: the object that tracks the region's reference count
493 * @name: the name of the region.
494 * @size: size of the region.
495 * @ptr: memory to be mapped; must contain at least @size bytes.
496 */
497 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
498 struct Object *owner,
499 const char *name,
500 uint64_t size,
501 void *ptr);
502
503 /**
504 * memory_region_init_alias: Initialize a memory region that aliases all or a
505 * part of another memory region.
506 *
507 * @mr: the #MemoryRegion to be initialized.
508 * @owner: the object that tracks the region's reference count
509 * @name: used for debugging; not visible to the user or ABI
510 * @orig: the region to be referenced; @mr will be equivalent to
511 * @orig between @offset and @offset + @size - 1.
512 * @offset: start of the section in @orig to be referenced.
513 * @size: size of the region.
514 */
515 void memory_region_init_alias(MemoryRegion *mr,
516 struct Object *owner,
517 const char *name,
518 MemoryRegion *orig,
519 hwaddr offset,
520 uint64_t size);
521
522 /**
523 * memory_region_init_rom: Initialize a ROM memory region.
524 *
525 * This has the same effect as calling memory_region_init_ram()
526 * and then marking the resulting region read-only with
527 * memory_region_set_readonly().
528 *
529 * @mr: the #MemoryRegion to be initialized.
530 * @owner: the object that tracks the region's reference count
531 * @name: Region name, becomes part of RAMBlock name used in migration stream
532 * must be unique within any device
533 * @size: size of the region.
534 * @errp: pointer to Error*, to store an error if it happens.
535 */
536 void memory_region_init_rom(MemoryRegion *mr,
537 struct Object *owner,
538 const char *name,
539 uint64_t size,
540 Error **errp);
541
542 /**
543 * memory_region_init_rom_device: Initialize a ROM memory region. Writes are
544 * handled via callbacks.
545 *
546 * @mr: the #MemoryRegion to be initialized.
547 * @owner: the object that tracks the region's reference count
548 * @ops: callbacks for write access handling (must not be NULL).
549 * @name: Region name, becomes part of RAMBlock name used in migration stream
550 * must be unique within any device
551 * @size: size of the region.
552 * @errp: pointer to Error*, to store an error if it happens.
553 */
554 void memory_region_init_rom_device(MemoryRegion *mr,
555 struct Object *owner,
556 const MemoryRegionOps *ops,
557 void *opaque,
558 const char *name,
559 uint64_t size,
560 Error **errp);
561
562 /**
563 * memory_region_init_reservation: Initialize a memory region that reserves
564 * I/O space.
565 *
566 * A reservation region primariy serves debugging purposes. It claims I/O
567 * space that is not supposed to be handled by QEMU itself. Any access via
568 * the memory API will cause an abort().
569 * This function is deprecated. Use memory_region_init_io() with NULL
570 * callbacks instead.
571 *
572 * @mr: the #MemoryRegion to be initialized
573 * @owner: the object that tracks the region's reference count
574 * @name: used for debugging; not visible to the user or ABI
575 * @size: size of the region.
576 */
577 static inline void memory_region_init_reservation(MemoryRegion *mr,
578 Object *owner,
579 const char *name,
580 uint64_t size)
581 {
582 memory_region_init_io(mr, owner, NULL, mr, name, size);
583 }
584
585 /**
586 * memory_region_init_iommu: Initialize a memory region that translates
587 * addresses
588 *
589 * An IOMMU region translates addresses and forwards accesses to a target
590 * memory region.
591 *
592 * @mr: the #MemoryRegion to be initialized
593 * @owner: the object that tracks the region's reference count
594 * @ops: a function that translates addresses into the @target region
595 * @name: used for debugging; not visible to the user or ABI
596 * @size: size of the region.
597 */
598 void memory_region_init_iommu(MemoryRegion *mr,
599 struct Object *owner,
600 const MemoryRegionIOMMUOps *ops,
601 const char *name,
602 uint64_t size);
603
604 /**
605 * memory_region_owner: get a memory region's owner.
606 *
607 * @mr: the memory region being queried.
608 */
609 struct Object *memory_region_owner(MemoryRegion *mr);
610
611 /**
612 * memory_region_size: get a memory region's size.
613 *
614 * @mr: the memory region being queried.
615 */
616 uint64_t memory_region_size(MemoryRegion *mr);
617
618 /**
619 * memory_region_is_ram: check whether a memory region is random access
620 *
621 * Returns %true is a memory region is random access.
622 *
623 * @mr: the memory region being queried
624 */
625 static inline bool memory_region_is_ram(MemoryRegion *mr)
626 {
627 return mr->ram;
628 }
629
630 /**
631 * memory_region_is_ram_device: check whether a memory region is a ram device
632 *
633 * Returns %true is a memory region is a device backed ram region
634 *
635 * @mr: the memory region being queried
636 */
637 bool memory_region_is_ram_device(MemoryRegion *mr);
638
639 /**
640 * memory_region_is_romd: check whether a memory region is in ROMD mode
641 *
642 * Returns %true if a memory region is a ROM device and currently set to allow
643 * direct reads.
644 *
645 * @mr: the memory region being queried
646 */
647 static inline bool memory_region_is_romd(MemoryRegion *mr)
648 {
649 return mr->rom_device && mr->romd_mode;
650 }
651
652 /**
653 * memory_region_is_iommu: check whether a memory region is an iommu
654 *
655 * Returns %true is a memory region is an iommu.
656 *
657 * @mr: the memory region being queried
658 */
659 static inline bool memory_region_is_iommu(MemoryRegion *mr)
660 {
661 if (mr->alias) {
662 return memory_region_is_iommu(mr->alias);
663 }
664 return mr->iommu_ops;
665 }
666
667
668 /**
669 * memory_region_iommu_get_min_page_size: get minimum supported page size
670 * for an iommu
671 *
672 * Returns minimum supported page size for an iommu.
673 *
674 * @mr: the memory region being queried
675 */
676 uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr);
677
678 /**
679 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
680 *
681 * The notification type will be decided by entry.perm bits:
682 *
683 * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE.
684 * - For MAP (newly added entry) notifies: set entry.perm to the
685 * permission of the page (which is definitely !IOMMU_NONE).
686 *
687 * Note: for any IOMMU implementation, an in-place mapping change
688 * should be notified with an UNMAP followed by a MAP.
689 *
690 * @mr: the memory region that was changed
691 * @entry: the new entry in the IOMMU translation table. The entry
692 * replaces all old entries for the same virtual I/O address range.
693 * Deleted entries have .@perm == 0.
694 */
695 void memory_region_notify_iommu(MemoryRegion *mr,
696 IOMMUTLBEntry entry);
697
698 /**
699 * memory_region_notify_one: notify a change in an IOMMU translation
700 * entry to a single notifier
701 *
702 * This works just like memory_region_notify_iommu(), but it only
703 * notifies a specific notifier, not all of them.
704 *
705 * @notifier: the notifier to be notified
706 * @entry: the new entry in the IOMMU translation table. The entry
707 * replaces all old entries for the same virtual I/O address range.
708 * Deleted entries have .@perm == 0.
709 */
710 void memory_region_notify_one(IOMMUNotifier *notifier,
711 IOMMUTLBEntry *entry);
712
713 /**
714 * memory_region_register_iommu_notifier: register a notifier for changes to
715 * IOMMU translation entries.
716 *
717 * @mr: the memory region to observe
718 * @n: the IOMMUNotifier to be added; the notify callback receives a
719 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
720 * ceases to be valid on exit from the notifier.
721 */
722 void memory_region_register_iommu_notifier(MemoryRegion *mr,
723 IOMMUNotifier *n);
724
725 /**
726 * memory_region_iommu_replay: replay existing IOMMU translations to
727 * a notifier with the minimum page granularity returned by
728 * mr->iommu_ops->get_page_size().
729 *
730 * @mr: the memory region to observe
731 * @n: the notifier to which to replay iommu mappings
732 */
733 void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n);
734
735 /**
736 * memory_region_iommu_replay_all: replay existing IOMMU translations
737 * to all the notifiers registered.
738 *
739 * @mr: the memory region to observe
740 */
741 void memory_region_iommu_replay_all(MemoryRegion *mr);
742
743 /**
744 * memory_region_unregister_iommu_notifier: unregister a notifier for
745 * changes to IOMMU translation entries.
746 *
747 * @mr: the memory region which was observed and for which notity_stopped()
748 * needs to be called
749 * @n: the notifier to be removed.
750 */
751 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
752 IOMMUNotifier *n);
753
754 /**
755 * memory_region_name: get a memory region's name
756 *
757 * Returns the string that was used to initialize the memory region.
758 *
759 * @mr: the memory region being queried
760 */
761 const char *memory_region_name(const MemoryRegion *mr);
762
763 /**
764 * memory_region_is_logging: return whether a memory region is logging writes
765 *
766 * Returns %true if the memory region is logging writes for the given client
767 *
768 * @mr: the memory region being queried
769 * @client: the client being queried
770 */
771 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
772
773 /**
774 * memory_region_get_dirty_log_mask: return the clients for which a
775 * memory region is logging writes.
776 *
777 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
778 * are the bit indices.
779 *
780 * @mr: the memory region being queried
781 */
782 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
783
784 /**
785 * memory_region_is_rom: check whether a memory region is ROM
786 *
787 * Returns %true is a memory region is read-only memory.
788 *
789 * @mr: the memory region being queried
790 */
791 static inline bool memory_region_is_rom(MemoryRegion *mr)
792 {
793 return mr->ram && mr->readonly;
794 }
795
796
797 /**
798 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
799 *
800 * Returns a file descriptor backing a file-based RAM memory region,
801 * or -1 if the region is not a file-based RAM memory region.
802 *
803 * @mr: the RAM or alias memory region being queried.
804 */
805 int memory_region_get_fd(MemoryRegion *mr);
806
807 /**
808 * memory_region_set_fd: Mark a RAM memory region as backed by a
809 * file descriptor.
810 *
811 * This function is typically used after memory_region_init_ram_ptr().
812 *
813 * @mr: the memory region being queried.
814 * @fd: the file descriptor that backs @mr.
815 */
816 void memory_region_set_fd(MemoryRegion *mr, int fd);
817
818 /**
819 * memory_region_from_host: Convert a pointer into a RAM memory region
820 * and an offset within it.
821 *
822 * Given a host pointer inside a RAM memory region (created with
823 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
824 * the MemoryRegion and the offset within it.
825 *
826 * Use with care; by the time this function returns, the returned pointer is
827 * not protected by RCU anymore. If the caller is not within an RCU critical
828 * section and does not hold the iothread lock, it must have other means of
829 * protecting the pointer, such as a reference to the region that includes
830 * the incoming ram_addr_t.
831 *
832 * @mr: the memory region being queried.
833 */
834 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
835
836 /**
837 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
838 *
839 * Returns a host pointer to a RAM memory region (created with
840 * memory_region_init_ram() or memory_region_init_ram_ptr()).
841 *
842 * Use with care; by the time this function returns, the returned pointer is
843 * not protected by RCU anymore. If the caller is not within an RCU critical
844 * section and does not hold the iothread lock, it must have other means of
845 * protecting the pointer, such as a reference to the region that includes
846 * the incoming ram_addr_t.
847 *
848 * @mr: the memory region being queried.
849 */
850 void *memory_region_get_ram_ptr(MemoryRegion *mr);
851
852 /* memory_region_ram_resize: Resize a RAM region.
853 *
854 * Only legal before guest might have detected the memory size: e.g. on
855 * incoming migration, or right after reset.
856 *
857 * @mr: a memory region created with @memory_region_init_resizeable_ram.
858 * @newsize: the new size the region
859 * @errp: pointer to Error*, to store an error if it happens.
860 */
861 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
862 Error **errp);
863
864 /**
865 * memory_region_set_log: Turn dirty logging on or off for a region.
866 *
867 * Turns dirty logging on or off for a specified client (display, migration).
868 * Only meaningful for RAM regions.
869 *
870 * @mr: the memory region being updated.
871 * @log: whether dirty logging is to be enabled or disabled.
872 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
873 */
874 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
875
876 /**
877 * memory_region_get_dirty: Check whether a range of bytes is dirty
878 * for a specified client.
879 *
880 * Checks whether a range of bytes has been written to since the last
881 * call to memory_region_reset_dirty() with the same @client. Dirty logging
882 * must be enabled.
883 *
884 * @mr: the memory region being queried.
885 * @addr: the address (relative to the start of the region) being queried.
886 * @size: the size of the range being queried.
887 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
888 * %DIRTY_MEMORY_VGA.
889 */
890 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
891 hwaddr size, unsigned client);
892
893 /**
894 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
895 *
896 * Marks a range of bytes as dirty, after it has been dirtied outside
897 * guest code.
898 *
899 * @mr: the memory region being dirtied.
900 * @addr: the address (relative to the start of the region) being dirtied.
901 * @size: size of the range being dirtied.
902 */
903 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
904 hwaddr size);
905
906 /**
907 * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty
908 * for a specified client. It clears them.
909 *
910 * Checks whether a range of bytes has been written to since the last
911 * call to memory_region_reset_dirty() with the same @client. Dirty logging
912 * must be enabled.
913 *
914 * @mr: the memory region being queried.
915 * @addr: the address (relative to the start of the region) being queried.
916 * @size: the size of the range being queried.
917 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
918 * %DIRTY_MEMORY_VGA.
919 */
920 bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
921 hwaddr size, unsigned client);
922
923 /**
924 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
925 * bitmap and clear it.
926 *
927 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
928 * returns the snapshot. The snapshot can then be used to query dirty
929 * status, using memory_region_snapshot_get_dirty. Unlike
930 * memory_region_test_and_clear_dirty this allows to query the same
931 * page multiple times, which is especially useful for display updates
932 * where the scanlines often are not page aligned.
933 *
934 * The dirty bitmap region which gets copyed into the snapshot (and
935 * cleared afterwards) can be larger than requested. The boundaries
936 * are rounded up/down so complete bitmap longs (covering 64 pages on
937 * 64bit hosts) can be copied over into the bitmap snapshot. Which
938 * isn't a problem for display updates as the extra pages are outside
939 * the visible area, and in case the visible area changes a full
940 * display redraw is due anyway. Should other use cases for this
941 * function emerge we might have to revisit this implementation
942 * detail.
943 *
944 * Use g_free to release DirtyBitmapSnapshot.
945 *
946 * @mr: the memory region being queried.
947 * @addr: the address (relative to the start of the region) being queried.
948 * @size: the size of the range being queried.
949 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
950 */
951 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
952 hwaddr addr,
953 hwaddr size,
954 unsigned client);
955
956 /**
957 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
958 * in the specified dirty bitmap snapshot.
959 *
960 * @mr: the memory region being queried.
961 * @snap: the dirty bitmap snapshot
962 * @addr: the address (relative to the start of the region) being queried.
963 * @size: the size of the range being queried.
964 */
965 bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
966 DirtyBitmapSnapshot *snap,
967 hwaddr addr, hwaddr size);
968
969 /**
970 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
971 * any external TLBs (e.g. kvm)
972 *
973 * Flushes dirty information from accelerators such as kvm and vhost-net
974 * and makes it available to users of the memory API.
975 *
976 * @mr: the region being flushed.
977 */
978 void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
979
980 /**
981 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
982 * client.
983 *
984 * Marks a range of pages as no longer dirty.
985 *
986 * @mr: the region being updated.
987 * @addr: the start of the subrange being cleaned.
988 * @size: the size of the subrange being cleaned.
989 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
990 * %DIRTY_MEMORY_VGA.
991 */
992 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
993 hwaddr size, unsigned client);
994
995 /**
996 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
997 *
998 * Allows a memory region to be marked as read-only (turning it into a ROM).
999 * only useful on RAM regions.
1000 *
1001 * @mr: the region being updated.
1002 * @readonly: whether rhe region is to be ROM or RAM.
1003 */
1004 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
1005
1006 /**
1007 * memory_region_rom_device_set_romd: enable/disable ROMD mode
1008 *
1009 * Allows a ROM device (initialized with memory_region_init_rom_device() to
1010 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
1011 * device is mapped to guest memory and satisfies read access directly.
1012 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
1013 * Writes are always handled by the #MemoryRegion.write function.
1014 *
1015 * @mr: the memory region to be updated
1016 * @romd_mode: %true to put the region into ROMD mode
1017 */
1018 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
1019
1020 /**
1021 * memory_region_set_coalescing: Enable memory coalescing for the region.
1022 *
1023 * Enabled writes to a region to be queued for later processing. MMIO ->write
1024 * callbacks may be delayed until a non-coalesced MMIO is issued.
1025 * Only useful for IO regions. Roughly similar to write-combining hardware.
1026 *
1027 * @mr: the memory region to be write coalesced
1028 */
1029 void memory_region_set_coalescing(MemoryRegion *mr);
1030
1031 /**
1032 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
1033 * a region.
1034 *
1035 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
1036 * Multiple calls can be issued coalesced disjoint ranges.
1037 *
1038 * @mr: the memory region to be updated.
1039 * @offset: the start of the range within the region to be coalesced.
1040 * @size: the size of the subrange to be coalesced.
1041 */
1042 void memory_region_add_coalescing(MemoryRegion *mr,
1043 hwaddr offset,
1044 uint64_t size);
1045
1046 /**
1047 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
1048 *
1049 * Disables any coalescing caused by memory_region_set_coalescing() or
1050 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
1051 * hardware.
1052 *
1053 * @mr: the memory region to be updated.
1054 */
1055 void memory_region_clear_coalescing(MemoryRegion *mr);
1056
1057 /**
1058 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
1059 * accesses.
1060 *
1061 * Ensure that pending coalesced MMIO request are flushed before the memory
1062 * region is accessed. This property is automatically enabled for all regions
1063 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
1064 *
1065 * @mr: the memory region to be updated.
1066 */
1067 void memory_region_set_flush_coalesced(MemoryRegion *mr);
1068
1069 /**
1070 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
1071 * accesses.
1072 *
1073 * Clear the automatic coalesced MMIO flushing enabled via
1074 * memory_region_set_flush_coalesced. Note that this service has no effect on
1075 * memory regions that have MMIO coalescing enabled for themselves. For them,
1076 * automatic flushing will stop once coalescing is disabled.
1077 *
1078 * @mr: the memory region to be updated.
1079 */
1080 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
1081
1082 /**
1083 * memory_region_set_global_locking: Declares the access processing requires
1084 * QEMU's global lock.
1085 *
1086 * When this is invoked, accesses to the memory region will be processed while
1087 * holding the global lock of QEMU. This is the default behavior of memory
1088 * regions.
1089 *
1090 * @mr: the memory region to be updated.
1091 */
1092 void memory_region_set_global_locking(MemoryRegion *mr);
1093
1094 /**
1095 * memory_region_clear_global_locking: Declares that access processing does
1096 * not depend on the QEMU global lock.
1097 *
1098 * By clearing this property, accesses to the memory region will be processed
1099 * outside of QEMU's global lock (unless the lock is held on when issuing the
1100 * access request). In this case, the device model implementing the access
1101 * handlers is responsible for synchronization of concurrency.
1102 *
1103 * @mr: the memory region to be updated.
1104 */
1105 void memory_region_clear_global_locking(MemoryRegion *mr);
1106
1107 /**
1108 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
1109 * is written to a location.
1110 *
1111 * Marks a word in an IO region (initialized with memory_region_init_io())
1112 * as a trigger for an eventfd event. The I/O callback will not be called.
1113 * The caller must be prepared to handle failure (that is, take the required
1114 * action if the callback _is_ called).
1115 *
1116 * @mr: the memory region being updated.
1117 * @addr: the address within @mr that is to be monitored
1118 * @size: the size of the access to trigger the eventfd
1119 * @match_data: whether to match against @data, instead of just @addr
1120 * @data: the data to match against the guest write
1121 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
1122 **/
1123 void memory_region_add_eventfd(MemoryRegion *mr,
1124 hwaddr addr,
1125 unsigned size,
1126 bool match_data,
1127 uint64_t data,
1128 EventNotifier *e);
1129
1130 /**
1131 * memory_region_del_eventfd: Cancel an eventfd.
1132 *
1133 * Cancels an eventfd trigger requested by a previous
1134 * memory_region_add_eventfd() call.
1135 *
1136 * @mr: the memory region being updated.
1137 * @addr: the address within @mr that is to be monitored
1138 * @size: the size of the access to trigger the eventfd
1139 * @match_data: whether to match against @data, instead of just @addr
1140 * @data: the data to match against the guest write
1141 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
1142 */
1143 void memory_region_del_eventfd(MemoryRegion *mr,
1144 hwaddr addr,
1145 unsigned size,
1146 bool match_data,
1147 uint64_t data,
1148 EventNotifier *e);
1149
1150 /**
1151 * memory_region_add_subregion: Add a subregion to a container.
1152 *
1153 * Adds a subregion at @offset. The subregion may not overlap with other
1154 * subregions (except for those explicitly marked as overlapping). A region
1155 * may only be added once as a subregion (unless removed with
1156 * memory_region_del_subregion()); use memory_region_init_alias() if you
1157 * want a region to be a subregion in multiple locations.
1158 *
1159 * @mr: the region to contain the new subregion; must be a container
1160 * initialized with memory_region_init().
1161 * @offset: the offset relative to @mr where @subregion is added.
1162 * @subregion: the subregion to be added.
1163 */
1164 void memory_region_add_subregion(MemoryRegion *mr,
1165 hwaddr offset,
1166 MemoryRegion *subregion);
1167 /**
1168 * memory_region_add_subregion_overlap: Add a subregion to a container
1169 * with overlap.
1170 *
1171 * Adds a subregion at @offset. The subregion may overlap with other
1172 * subregions. Conflicts are resolved by having a higher @priority hide a
1173 * lower @priority. Subregions without priority are taken as @priority 0.
1174 * A region may only be added once as a subregion (unless removed with
1175 * memory_region_del_subregion()); use memory_region_init_alias() if you
1176 * want a region to be a subregion in multiple locations.
1177 *
1178 * @mr: the region to contain the new subregion; must be a container
1179 * initialized with memory_region_init().
1180 * @offset: the offset relative to @mr where @subregion is added.
1181 * @subregion: the subregion to be added.
1182 * @priority: used for resolving overlaps; highest priority wins.
1183 */
1184 void memory_region_add_subregion_overlap(MemoryRegion *mr,
1185 hwaddr offset,
1186 MemoryRegion *subregion,
1187 int priority);
1188
1189 /**
1190 * memory_region_get_ram_addr: Get the ram address associated with a memory
1191 * region
1192 */
1193 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
1194
1195 uint64_t memory_region_get_alignment(const MemoryRegion *mr);
1196 /**
1197 * memory_region_del_subregion: Remove a subregion.
1198 *
1199 * Removes a subregion from its container.
1200 *
1201 * @mr: the container to be updated.
1202 * @subregion: the region being removed; must be a current subregion of @mr.
1203 */
1204 void memory_region_del_subregion(MemoryRegion *mr,
1205 MemoryRegion *subregion);
1206
1207 /*
1208 * memory_region_set_enabled: dynamically enable or disable a region
1209 *
1210 * Enables or disables a memory region. A disabled memory region
1211 * ignores all accesses to itself and its subregions. It does not
1212 * obscure sibling subregions with lower priority - it simply behaves as
1213 * if it was removed from the hierarchy.
1214 *
1215 * Regions default to being enabled.
1216 *
1217 * @mr: the region to be updated
1218 * @enabled: whether to enable or disable the region
1219 */
1220 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
1221
1222 /*
1223 * memory_region_set_address: dynamically update the address of a region
1224 *
1225 * Dynamically updates the address of a region, relative to its container.
1226 * May be used on regions are currently part of a memory hierarchy.
1227 *
1228 * @mr: the region to be updated
1229 * @addr: new address, relative to container region
1230 */
1231 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
1232
1233 /*
1234 * memory_region_set_size: dynamically update the size of a region.
1235 *
1236 * Dynamically updates the size of a region.
1237 *
1238 * @mr: the region to be updated
1239 * @size: used size of the region.
1240 */
1241 void memory_region_set_size(MemoryRegion *mr, uint64_t size);
1242
1243 /*
1244 * memory_region_set_alias_offset: dynamically update a memory alias's offset
1245 *
1246 * Dynamically updates the offset into the target region that an alias points
1247 * to, as if the fourth argument to memory_region_init_alias() has changed.
1248 *
1249 * @mr: the #MemoryRegion to be updated; should be an alias.
1250 * @offset: the new offset into the target memory region
1251 */
1252 void memory_region_set_alias_offset(MemoryRegion *mr,
1253 hwaddr offset);
1254
1255 /**
1256 * memory_region_present: checks if an address relative to a @container
1257 * translates into #MemoryRegion within @container
1258 *
1259 * Answer whether a #MemoryRegion within @container covers the address
1260 * @addr.
1261 *
1262 * @container: a #MemoryRegion within which @addr is a relative address
1263 * @addr: the area within @container to be searched
1264 */
1265 bool memory_region_present(MemoryRegion *container, hwaddr addr);
1266
1267 /**
1268 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
1269 * into any address space.
1270 *
1271 * @mr: a #MemoryRegion which should be checked if it's mapped
1272 */
1273 bool memory_region_is_mapped(MemoryRegion *mr);
1274
1275 /**
1276 * memory_region_find: translate an address/size relative to a
1277 * MemoryRegion into a #MemoryRegionSection.
1278 *
1279 * Locates the first #MemoryRegion within @mr that overlaps the range
1280 * given by @addr and @size.
1281 *
1282 * Returns a #MemoryRegionSection that describes a contiguous overlap.
1283 * It will have the following characteristics:
1284 * .@size = 0 iff no overlap was found
1285 * .@mr is non-%NULL iff an overlap was found
1286 *
1287 * Remember that in the return value the @offset_within_region is
1288 * relative to the returned region (in the .@mr field), not to the
1289 * @mr argument.
1290 *
1291 * Similarly, the .@offset_within_address_space is relative to the
1292 * address space that contains both regions, the passed and the
1293 * returned one. However, in the special case where the @mr argument
1294 * has no container (and thus is the root of the address space), the
1295 * following will hold:
1296 * .@offset_within_address_space >= @addr
1297 * .@offset_within_address_space + .@size <= @addr + @size
1298 *
1299 * @mr: a MemoryRegion within which @addr is a relative address
1300 * @addr: start of the area within @as to be searched
1301 * @size: size of the area to be searched
1302 */
1303 MemoryRegionSection memory_region_find(MemoryRegion *mr,
1304 hwaddr addr, uint64_t size);
1305
1306 /**
1307 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
1308 *
1309 * Synchronizes the dirty page log for all address spaces.
1310 */
1311 void memory_global_dirty_log_sync(void);
1312
1313 /**
1314 * memory_region_transaction_begin: Start a transaction.
1315 *
1316 * During a transaction, changes will be accumulated and made visible
1317 * only when the transaction ends (is committed).
1318 */
1319 void memory_region_transaction_begin(void);
1320
1321 /**
1322 * memory_region_transaction_commit: Commit a transaction and make changes
1323 * visible to the guest.
1324 */
1325 void memory_region_transaction_commit(void);
1326
1327 /**
1328 * memory_listener_register: register callbacks to be called when memory
1329 * sections are mapped or unmapped into an address
1330 * space
1331 *
1332 * @listener: an object containing the callbacks to be called
1333 * @filter: if non-%NULL, only regions in this address space will be observed
1334 */
1335 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
1336
1337 /**
1338 * memory_listener_unregister: undo the effect of memory_listener_register()
1339 *
1340 * @listener: an object containing the callbacks to be removed
1341 */
1342 void memory_listener_unregister(MemoryListener *listener);
1343
1344 /**
1345 * memory_global_dirty_log_start: begin dirty logging for all regions
1346 */
1347 void memory_global_dirty_log_start(void);
1348
1349 /**
1350 * memory_global_dirty_log_stop: end dirty logging for all regions
1351 */
1352 void memory_global_dirty_log_stop(void);
1353
1354 void mtree_info(fprintf_function mon_printf, void *f, bool flatview);
1355
1356 /**
1357 * memory_region_dispatch_read: perform a read directly to the specified
1358 * MemoryRegion.
1359 *
1360 * @mr: #MemoryRegion to access
1361 * @addr: address within that region
1362 * @pval: pointer to uint64_t which the data is written to
1363 * @size: size of the access in bytes
1364 * @attrs: memory transaction attributes to use for the access
1365 */
1366 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1367 hwaddr addr,
1368 uint64_t *pval,
1369 unsigned size,
1370 MemTxAttrs attrs);
1371 /**
1372 * memory_region_dispatch_write: perform a write directly to the specified
1373 * MemoryRegion.
1374 *
1375 * @mr: #MemoryRegion to access
1376 * @addr: address within that region
1377 * @data: data to write
1378 * @size: size of the access in bytes
1379 * @attrs: memory transaction attributes to use for the access
1380 */
1381 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1382 hwaddr addr,
1383 uint64_t data,
1384 unsigned size,
1385 MemTxAttrs attrs);
1386
1387 /**
1388 * address_space_init: initializes an address space
1389 *
1390 * @as: an uninitialized #AddressSpace
1391 * @root: a #MemoryRegion that routes addresses for the address space
1392 * @name: an address space name. The name is only used for debugging
1393 * output.
1394 */
1395 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
1396
1397 /**
1398 * address_space_init_shareable: return an address space for a memory region,
1399 * creating it if it does not already exist
1400 *
1401 * @root: a #MemoryRegion that routes addresses for the address space
1402 * @name: an address space name. The name is only used for debugging
1403 * output.
1404 *
1405 * This function will return a pointer to an existing AddressSpace
1406 * which was initialized with the specified MemoryRegion, or it will
1407 * create and initialize one if it does not already exist. The ASes
1408 * are reference-counted, so the memory will be freed automatically
1409 * when the AddressSpace is destroyed via address_space_destroy.
1410 */
1411 AddressSpace *address_space_init_shareable(MemoryRegion *root,
1412 const char *name);
1413
1414 /**
1415 * address_space_destroy: destroy an address space
1416 *
1417 * Releases all resources associated with an address space. After an address space
1418 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
1419 * as well.
1420 *
1421 * @as: address space to be destroyed
1422 */
1423 void address_space_destroy(AddressSpace *as);
1424
1425 /**
1426 * address_space_rw: read from or write to an address space.
1427 *
1428 * Return a MemTxResult indicating whether the operation succeeded
1429 * or failed (eg unassigned memory, device rejected the transaction,
1430 * IOMMU fault).
1431 *
1432 * @as: #AddressSpace to be accessed
1433 * @addr: address within that address space
1434 * @attrs: memory transaction attributes
1435 * @buf: buffer with the data transferred
1436 * @is_write: indicates the transfer direction
1437 */
1438 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
1439 MemTxAttrs attrs, uint8_t *buf,
1440 int len, bool is_write);
1441
1442 /**
1443 * address_space_write: write to address space.
1444 *
1445 * Return a MemTxResult indicating whether the operation succeeded
1446 * or failed (eg unassigned memory, device rejected the transaction,
1447 * IOMMU fault).
1448 *
1449 * @as: #AddressSpace to be accessed
1450 * @addr: address within that address space
1451 * @attrs: memory transaction attributes
1452 * @buf: buffer with the data transferred
1453 */
1454 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
1455 MemTxAttrs attrs,
1456 const uint8_t *buf, int len);
1457
1458 /* address_space_ld*: load from an address space
1459 * address_space_st*: store to an address space
1460 *
1461 * These functions perform a load or store of the byte, word,
1462 * longword or quad to the specified address within the AddressSpace.
1463 * The _le suffixed functions treat the data as little endian;
1464 * _be indicates big endian; no suffix indicates "same endianness
1465 * as guest CPU".
1466 *
1467 * The "guest CPU endianness" accessors are deprecated for use outside
1468 * target-* code; devices should be CPU-agnostic and use either the LE
1469 * or the BE accessors.
1470 *
1471 * @as #AddressSpace to be accessed
1472 * @addr: address within that address space
1473 * @val: data value, for stores
1474 * @attrs: memory transaction attributes
1475 * @result: location to write the success/failure of the transaction;
1476 * if NULL, this information is discarded
1477 */
1478 uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
1479 MemTxAttrs attrs, MemTxResult *result);
1480 uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
1481 MemTxAttrs attrs, MemTxResult *result);
1482 uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
1483 MemTxAttrs attrs, MemTxResult *result);
1484 uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
1485 MemTxAttrs attrs, MemTxResult *result);
1486 uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
1487 MemTxAttrs attrs, MemTxResult *result);
1488 uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
1489 MemTxAttrs attrs, MemTxResult *result);
1490 uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
1491 MemTxAttrs attrs, MemTxResult *result);
1492 void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
1493 MemTxAttrs attrs, MemTxResult *result);
1494 void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
1495 MemTxAttrs attrs, MemTxResult *result);
1496 void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
1497 MemTxAttrs attrs, MemTxResult *result);
1498 void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
1499 MemTxAttrs attrs, MemTxResult *result);
1500 void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
1501 MemTxAttrs attrs, MemTxResult *result);
1502 void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
1503 MemTxAttrs attrs, MemTxResult *result);
1504 void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
1505 MemTxAttrs attrs, MemTxResult *result);
1506
1507 uint32_t ldub_phys(AddressSpace *as, hwaddr addr);
1508 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr);
1509 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr);
1510 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr);
1511 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr);
1512 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr);
1513 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr);
1514 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1515 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1516 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1517 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1518 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1519 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val);
1520 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val);
1521
1522 struct MemoryRegionCache {
1523 hwaddr xlat;
1524 hwaddr len;
1525 AddressSpace *as;
1526 };
1527
1528 #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .as = NULL })
1529
1530 /* address_space_cache_init: prepare for repeated access to a physical
1531 * memory region
1532 *
1533 * @cache: #MemoryRegionCache to be filled
1534 * @as: #AddressSpace to be accessed
1535 * @addr: address within that address space
1536 * @len: length of buffer
1537 * @is_write: indicates the transfer direction
1538 *
1539 * Will only work with RAM, and may map a subset of the requested range by
1540 * returning a value that is less than @len. On failure, return a negative
1541 * errno value.
1542 *
1543 * Because it only works with RAM, this function can be used for
1544 * read-modify-write operations. In this case, is_write should be %true.
1545 *
1546 * Note that addresses passed to the address_space_*_cached functions
1547 * are relative to @addr.
1548 */
1549 int64_t address_space_cache_init(MemoryRegionCache *cache,
1550 AddressSpace *as,
1551 hwaddr addr,
1552 hwaddr len,
1553 bool is_write);
1554
1555 /**
1556 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
1557 *
1558 * @cache: The #MemoryRegionCache to operate on.
1559 * @addr: The first physical address that was written, relative to the
1560 * address that was passed to @address_space_cache_init.
1561 * @access_len: The number of bytes that were written starting at @addr.
1562 */
1563 void address_space_cache_invalidate(MemoryRegionCache *cache,
1564 hwaddr addr,
1565 hwaddr access_len);
1566
1567 /**
1568 * address_space_cache_destroy: free a #MemoryRegionCache
1569 *
1570 * @cache: The #MemoryRegionCache whose memory should be released.
1571 */
1572 void address_space_cache_destroy(MemoryRegionCache *cache);
1573
1574 /* address_space_ld*_cached: load from a cached #MemoryRegion
1575 * address_space_st*_cached: store into a cached #MemoryRegion
1576 *
1577 * These functions perform a load or store of the byte, word,
1578 * longword or quad to the specified address. The address is
1579 * a physical address in the AddressSpace, but it must lie within
1580 * a #MemoryRegion that was mapped with address_space_cache_init.
1581 *
1582 * The _le suffixed functions treat the data as little endian;
1583 * _be indicates big endian; no suffix indicates "same endianness
1584 * as guest CPU".
1585 *
1586 * The "guest CPU endianness" accessors are deprecated for use outside
1587 * target-* code; devices should be CPU-agnostic and use either the LE
1588 * or the BE accessors.
1589 *
1590 * @cache: previously initialized #MemoryRegionCache to be accessed
1591 * @addr: address within the address space
1592 * @val: data value, for stores
1593 * @attrs: memory transaction attributes
1594 * @result: location to write the success/failure of the transaction;
1595 * if NULL, this information is discarded
1596 */
1597 uint32_t address_space_ldub_cached(MemoryRegionCache *cache, hwaddr addr,
1598 MemTxAttrs attrs, MemTxResult *result);
1599 uint32_t address_space_lduw_le_cached(MemoryRegionCache *cache, hwaddr addr,
1600 MemTxAttrs attrs, MemTxResult *result);
1601 uint32_t address_space_lduw_be_cached(MemoryRegionCache *cache, hwaddr addr,
1602 MemTxAttrs attrs, MemTxResult *result);
1603 uint32_t address_space_ldl_le_cached(MemoryRegionCache *cache, hwaddr addr,
1604 MemTxAttrs attrs, MemTxResult *result);
1605 uint32_t address_space_ldl_be_cached(MemoryRegionCache *cache, hwaddr addr,
1606 MemTxAttrs attrs, MemTxResult *result);
1607 uint64_t address_space_ldq_le_cached(MemoryRegionCache *cache, hwaddr addr,
1608 MemTxAttrs attrs, MemTxResult *result);
1609 uint64_t address_space_ldq_be_cached(MemoryRegionCache *cache, hwaddr addr,
1610 MemTxAttrs attrs, MemTxResult *result);
1611 void address_space_stb_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1612 MemTxAttrs attrs, MemTxResult *result);
1613 void address_space_stw_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1614 MemTxAttrs attrs, MemTxResult *result);
1615 void address_space_stw_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1616 MemTxAttrs attrs, MemTxResult *result);
1617 void address_space_stl_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1618 MemTxAttrs attrs, MemTxResult *result);
1619 void address_space_stl_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1620 MemTxAttrs attrs, MemTxResult *result);
1621 void address_space_stq_le_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val,
1622 MemTxAttrs attrs, MemTxResult *result);
1623 void address_space_stq_be_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val,
1624 MemTxAttrs attrs, MemTxResult *result);
1625
1626 uint32_t ldub_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1627 uint32_t lduw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1628 uint32_t lduw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1629 uint32_t ldl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1630 uint32_t ldl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1631 uint64_t ldq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1632 uint64_t ldq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1633 void stb_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1634 void stw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1635 void stw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1636 void stl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1637 void stl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1638 void stq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
1639 void stq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
1640 /* address_space_get_iotlb_entry: translate an address into an IOTLB
1641 * entry. Should be called from an RCU critical section.
1642 */
1643 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
1644 bool is_write);
1645
1646 /* address_space_translate: translate an address range into an address space
1647 * into a MemoryRegion and an address range into that section. Should be
1648 * called from an RCU critical section, to avoid that the last reference
1649 * to the returned region disappears after address_space_translate returns.
1650 *
1651 * @as: #AddressSpace to be accessed
1652 * @addr: address within that address space
1653 * @xlat: pointer to address within the returned memory region section's
1654 * #MemoryRegion.
1655 * @len: pointer to length
1656 * @is_write: indicates the transfer direction
1657 */
1658 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
1659 hwaddr *xlat, hwaddr *len,
1660 bool is_write);
1661
1662 /* address_space_access_valid: check for validity of accessing an address
1663 * space range
1664 *
1665 * Check whether memory is assigned to the given address space range, and
1666 * access is permitted by any IOMMU regions that are active for the address
1667 * space.
1668 *
1669 * For now, addr and len should be aligned to a page size. This limitation
1670 * will be lifted in the future.
1671 *
1672 * @as: #AddressSpace to be accessed
1673 * @addr: address within that address space
1674 * @len: length of the area to be checked
1675 * @is_write: indicates the transfer direction
1676 */
1677 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write);
1678
1679 /* address_space_map: map a physical memory region into a host virtual address
1680 *
1681 * May map a subset of the requested range, given by and returned in @plen.
1682 * May return %NULL if resources needed to perform the mapping are exhausted.
1683 * Use only for reads OR writes - not for read-modify-write operations.
1684 * Use cpu_register_map_client() to know when retrying the map operation is
1685 * likely to succeed.
1686 *
1687 * @as: #AddressSpace to be accessed
1688 * @addr: address within that address space
1689 * @plen: pointer to length of buffer; updated on return
1690 * @is_write: indicates the transfer direction
1691 */
1692 void *address_space_map(AddressSpace *as, hwaddr addr,
1693 hwaddr *plen, bool is_write);
1694
1695 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
1696 *
1697 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
1698 * the amount of memory that was actually read or written by the caller.
1699 *
1700 * @as: #AddressSpace used
1701 * @addr: address within that address space
1702 * @len: buffer length as returned by address_space_map()
1703 * @access_len: amount of data actually transferred
1704 * @is_write: indicates the transfer direction
1705 */
1706 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
1707 int is_write, hwaddr access_len);
1708
1709
1710 /* Internal functions, part of the implementation of address_space_read. */
1711 MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
1712 MemTxAttrs attrs, uint8_t *buf,
1713 int len, hwaddr addr1, hwaddr l,
1714 MemoryRegion *mr);
1715 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
1716 MemTxAttrs attrs, uint8_t *buf, int len);
1717 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
1718
1719 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1720 {
1721 if (is_write) {
1722 return memory_region_is_ram(mr) &&
1723 !mr->readonly && !memory_region_is_ram_device(mr);
1724 } else {
1725 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
1726 memory_region_is_romd(mr);
1727 }
1728 }
1729
1730 /**
1731 * address_space_read: read from an address space.
1732 *
1733 * Return a MemTxResult indicating whether the operation succeeded
1734 * or failed (eg unassigned memory, device rejected the transaction,
1735 * IOMMU fault).
1736 *
1737 * @as: #AddressSpace to be accessed
1738 * @addr: address within that address space
1739 * @attrs: memory transaction attributes
1740 * @buf: buffer with the data transferred
1741 */
1742 static inline __attribute__((__always_inline__))
1743 MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
1744 uint8_t *buf, int len)
1745 {
1746 MemTxResult result = MEMTX_OK;
1747 hwaddr l, addr1;
1748 void *ptr;
1749 MemoryRegion *mr;
1750
1751 if (__builtin_constant_p(len)) {
1752 if (len) {
1753 rcu_read_lock();
1754 l = len;
1755 mr = address_space_translate(as, addr, &addr1, &l, false);
1756 if (len == l && memory_access_is_direct(mr, false)) {
1757 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
1758 memcpy(buf, ptr, len);
1759 } else {
1760 result = address_space_read_continue(as, addr, attrs, buf, len,
1761 addr1, l, mr);
1762 }
1763 rcu_read_unlock();
1764 }
1765 } else {
1766 result = address_space_read_full(as, addr, attrs, buf, len);
1767 }
1768 return result;
1769 }
1770
1771 /**
1772 * address_space_read_cached: read from a cached RAM region
1773 *
1774 * @cache: Cached region to be addressed
1775 * @addr: address relative to the base of the RAM region
1776 * @buf: buffer with the data transferred
1777 * @len: length of the data transferred
1778 */
1779 static inline void
1780 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
1781 void *buf, int len)
1782 {
1783 assert(addr < cache->len && len <= cache->len - addr);
1784 address_space_read(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len);
1785 }
1786
1787 /**
1788 * address_space_write_cached: write to a cached RAM region
1789 *
1790 * @cache: Cached region to be addressed
1791 * @addr: address relative to the base of the RAM region
1792 * @buf: buffer with the data transferred
1793 * @len: length of the data transferred
1794 */
1795 static inline void
1796 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
1797 void *buf, int len)
1798 {
1799 assert(addr < cache->len && len <= cache->len - addr);
1800 address_space_write(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len);
1801 }
1802
1803 #endif
1804
1805 #endif