]> git.proxmox.com Git - mirror_qemu.git/blob - include/exec/memory.h
Add memory_region_init_ram_from_fd()
[mirror_qemu.git] / include / exec / memory.h
1 /*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef MEMORY_H
15 #define MEMORY_H
16
17 #ifndef CONFIG_USER_ONLY
18
19 #include "exec/cpu-common.h"
20 #include "exec/hwaddr.h"
21 #include "exec/memattrs.h"
22 #include "exec/ramlist.h"
23 #include "qemu/queue.h"
24 #include "qemu/int128.h"
25 #include "qemu/notify.h"
26 #include "qom/object.h"
27 #include "qemu/rcu.h"
28
29 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
30
31 #define MAX_PHYS_ADDR_SPACE_BITS 62
32 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
33
34 #define TYPE_MEMORY_REGION "qemu:memory-region"
35 #define MEMORY_REGION(obj) \
36 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
37
38 typedef struct MemoryRegionOps MemoryRegionOps;
39 typedef struct MemoryRegionMmio MemoryRegionMmio;
40
41 struct MemoryRegionMmio {
42 CPUReadMemoryFunc *read[3];
43 CPUWriteMemoryFunc *write[3];
44 };
45
46 typedef struct IOMMUTLBEntry IOMMUTLBEntry;
47
48 /* See address_space_translate: bit 0 is read, bit 1 is write. */
49 typedef enum {
50 IOMMU_NONE = 0,
51 IOMMU_RO = 1,
52 IOMMU_WO = 2,
53 IOMMU_RW = 3,
54 } IOMMUAccessFlags;
55
56 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
57
58 struct IOMMUTLBEntry {
59 AddressSpace *target_as;
60 hwaddr iova;
61 hwaddr translated_addr;
62 hwaddr addr_mask; /* 0xfff = 4k translation */
63 IOMMUAccessFlags perm;
64 };
65
66 /*
67 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
68 * register with one or multiple IOMMU Notifier capability bit(s).
69 */
70 typedef enum {
71 IOMMU_NOTIFIER_NONE = 0,
72 /* Notify cache invalidations */
73 IOMMU_NOTIFIER_UNMAP = 0x1,
74 /* Notify entry changes (newly created entries) */
75 IOMMU_NOTIFIER_MAP = 0x2,
76 } IOMMUNotifierFlag;
77
78 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
79
80 struct IOMMUNotifier;
81 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
82 IOMMUTLBEntry *data);
83
84 struct IOMMUNotifier {
85 IOMMUNotify notify;
86 IOMMUNotifierFlag notifier_flags;
87 /* Notify for address space range start <= addr <= end */
88 hwaddr start;
89 hwaddr end;
90 QLIST_ENTRY(IOMMUNotifier) node;
91 };
92 typedef struct IOMMUNotifier IOMMUNotifier;
93
94 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
95 IOMMUNotifierFlag flags,
96 hwaddr start, hwaddr end)
97 {
98 n->notify = fn;
99 n->notifier_flags = flags;
100 n->start = start;
101 n->end = end;
102 }
103
104 /* New-style MMIO accessors can indicate that the transaction failed.
105 * A zero (MEMTX_OK) response means success; anything else is a failure
106 * of some kind. The memory subsystem will bitwise-OR together results
107 * if it is synthesizing an operation from multiple smaller accesses.
108 */
109 #define MEMTX_OK 0
110 #define MEMTX_ERROR (1U << 0) /* device returned an error */
111 #define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */
112 typedef uint32_t MemTxResult;
113
114 /*
115 * Memory region callbacks
116 */
117 struct MemoryRegionOps {
118 /* Read from the memory region. @addr is relative to @mr; @size is
119 * in bytes. */
120 uint64_t (*read)(void *opaque,
121 hwaddr addr,
122 unsigned size);
123 /* Write to the memory region. @addr is relative to @mr; @size is
124 * in bytes. */
125 void (*write)(void *opaque,
126 hwaddr addr,
127 uint64_t data,
128 unsigned size);
129
130 MemTxResult (*read_with_attrs)(void *opaque,
131 hwaddr addr,
132 uint64_t *data,
133 unsigned size,
134 MemTxAttrs attrs);
135 MemTxResult (*write_with_attrs)(void *opaque,
136 hwaddr addr,
137 uint64_t data,
138 unsigned size,
139 MemTxAttrs attrs);
140
141 enum device_endian endianness;
142 /* Guest-visible constraints: */
143 struct {
144 /* If nonzero, specify bounds on access sizes beyond which a machine
145 * check is thrown.
146 */
147 unsigned min_access_size;
148 unsigned max_access_size;
149 /* If true, unaligned accesses are supported. Otherwise unaligned
150 * accesses throw machine checks.
151 */
152 bool unaligned;
153 /*
154 * If present, and returns #false, the transaction is not accepted
155 * by the device (and results in machine dependent behaviour such
156 * as a machine check exception).
157 */
158 bool (*accepts)(void *opaque, hwaddr addr,
159 unsigned size, bool is_write);
160 } valid;
161 /* Internal implementation constraints: */
162 struct {
163 /* If nonzero, specifies the minimum size implemented. Smaller sizes
164 * will be rounded upwards and a partial result will be returned.
165 */
166 unsigned min_access_size;
167 /* If nonzero, specifies the maximum size implemented. Larger sizes
168 * will be done as a series of accesses with smaller sizes.
169 */
170 unsigned max_access_size;
171 /* If true, unaligned accesses are supported. Otherwise all accesses
172 * are converted to (possibly multiple) naturally aligned accesses.
173 */
174 bool unaligned;
175 } impl;
176
177 /* If .read and .write are not present, old_mmio may be used for
178 * backwards compatibility with old mmio registration
179 */
180 const MemoryRegionMmio old_mmio;
181 };
182
183 typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps;
184
185 struct MemoryRegionIOMMUOps {
186 /*
187 * Return a TLB entry that contains a given address. Flag should
188 * be the access permission of this translation operation. We can
189 * set flag to IOMMU_NONE to mean that we don't need any
190 * read/write permission checks, like, when for region replay.
191 */
192 IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr,
193 IOMMUAccessFlags flag);
194 /* Returns minimum supported page size */
195 uint64_t (*get_min_page_size)(MemoryRegion *iommu);
196 /* Called when IOMMU Notifier flag changed */
197 void (*notify_flag_changed)(MemoryRegion *iommu,
198 IOMMUNotifierFlag old_flags,
199 IOMMUNotifierFlag new_flags);
200 /* Set this up to provide customized IOMMU replay function */
201 void (*replay)(MemoryRegion *iommu, IOMMUNotifier *notifier);
202 };
203
204 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
205 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
206
207 struct MemoryRegion {
208 Object parent_obj;
209
210 /* All fields are private - violators will be prosecuted */
211
212 /* The following fields should fit in a cache line */
213 bool romd_mode;
214 bool ram;
215 bool subpage;
216 bool readonly; /* For RAM regions */
217 bool rom_device;
218 bool flush_coalesced_mmio;
219 bool global_locking;
220 uint8_t dirty_log_mask;
221 RAMBlock *ram_block;
222 Object *owner;
223 const MemoryRegionIOMMUOps *iommu_ops;
224
225 const MemoryRegionOps *ops;
226 void *opaque;
227 MemoryRegion *container;
228 Int128 size;
229 hwaddr addr;
230 void (*destructor)(MemoryRegion *mr);
231 uint64_t align;
232 bool terminates;
233 bool ram_device;
234 bool enabled;
235 bool warning_printed; /* For reservations */
236 uint8_t vga_logging_count;
237 MemoryRegion *alias;
238 hwaddr alias_offset;
239 int32_t priority;
240 QTAILQ_HEAD(subregions, MemoryRegion) subregions;
241 QTAILQ_ENTRY(MemoryRegion) subregions_link;
242 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
243 const char *name;
244 unsigned ioeventfd_nb;
245 MemoryRegionIoeventfd *ioeventfds;
246 QLIST_HEAD(, IOMMUNotifier) iommu_notify;
247 IOMMUNotifierFlag iommu_notify_flags;
248 };
249
250 #define IOMMU_NOTIFIER_FOREACH(n, mr) \
251 QLIST_FOREACH((n), &(mr)->iommu_notify, node)
252
253 /**
254 * MemoryListener: callbacks structure for updates to the physical memory map
255 *
256 * Allows a component to adjust to changes in the guest-visible memory map.
257 * Use with memory_listener_register() and memory_listener_unregister().
258 */
259 struct MemoryListener {
260 void (*begin)(MemoryListener *listener);
261 void (*commit)(MemoryListener *listener);
262 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
263 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
264 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
265 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
266 int old, int new);
267 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
268 int old, int new);
269 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
270 void (*log_global_start)(MemoryListener *listener);
271 void (*log_global_stop)(MemoryListener *listener);
272 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
273 bool match_data, uint64_t data, EventNotifier *e);
274 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
275 bool match_data, uint64_t data, EventNotifier *e);
276 void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
277 hwaddr addr, hwaddr len);
278 void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
279 hwaddr addr, hwaddr len);
280 /* Lower = earlier (during add), later (during del) */
281 unsigned priority;
282 AddressSpace *address_space;
283 QTAILQ_ENTRY(MemoryListener) link;
284 QTAILQ_ENTRY(MemoryListener) link_as;
285 };
286
287 /**
288 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
289 */
290 struct AddressSpace {
291 /* All fields are private. */
292 struct rcu_head rcu;
293 char *name;
294 MemoryRegion *root;
295 int ref_count;
296 bool malloced;
297
298 /* Accessed via RCU. */
299 struct FlatView *current_map;
300
301 int ioeventfd_nb;
302 struct MemoryRegionIoeventfd *ioeventfds;
303 struct AddressSpaceDispatch *dispatch;
304 struct AddressSpaceDispatch *next_dispatch;
305 MemoryListener dispatch_listener;
306 QTAILQ_HEAD(memory_listeners_as, MemoryListener) listeners;
307 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
308 };
309
310 /**
311 * MemoryRegionSection: describes a fragment of a #MemoryRegion
312 *
313 * @mr: the region, or %NULL if empty
314 * @address_space: the address space the region is mapped in
315 * @offset_within_region: the beginning of the section, relative to @mr's start
316 * @size: the size of the section; will not exceed @mr's boundaries
317 * @offset_within_address_space: the address of the first byte of the section
318 * relative to the region's address space
319 * @readonly: writes to this section are ignored
320 */
321 struct MemoryRegionSection {
322 MemoryRegion *mr;
323 AddressSpace *address_space;
324 hwaddr offset_within_region;
325 Int128 size;
326 hwaddr offset_within_address_space;
327 bool readonly;
328 };
329
330 /**
331 * memory_region_init: Initialize a memory region
332 *
333 * The region typically acts as a container for other memory regions. Use
334 * memory_region_add_subregion() to add subregions.
335 *
336 * @mr: the #MemoryRegion to be initialized
337 * @owner: the object that tracks the region's reference count
338 * @name: used for debugging; not visible to the user or ABI
339 * @size: size of the region; any subregions beyond this size will be clipped
340 */
341 void memory_region_init(MemoryRegion *mr,
342 struct Object *owner,
343 const char *name,
344 uint64_t size);
345
346 /**
347 * memory_region_ref: Add 1 to a memory region's reference count
348 *
349 * Whenever memory regions are accessed outside the BQL, they need to be
350 * preserved against hot-unplug. MemoryRegions actually do not have their
351 * own reference count; they piggyback on a QOM object, their "owner".
352 * This function adds a reference to the owner.
353 *
354 * All MemoryRegions must have an owner if they can disappear, even if the
355 * device they belong to operates exclusively under the BQL. This is because
356 * the region could be returned at any time by memory_region_find, and this
357 * is usually under guest control.
358 *
359 * @mr: the #MemoryRegion
360 */
361 void memory_region_ref(MemoryRegion *mr);
362
363 /**
364 * memory_region_unref: Remove 1 to a memory region's reference count
365 *
366 * Whenever memory regions are accessed outside the BQL, they need to be
367 * preserved against hot-unplug. MemoryRegions actually do not have their
368 * own reference count; they piggyback on a QOM object, their "owner".
369 * This function removes a reference to the owner and possibly destroys it.
370 *
371 * @mr: the #MemoryRegion
372 */
373 void memory_region_unref(MemoryRegion *mr);
374
375 /**
376 * memory_region_init_io: Initialize an I/O memory region.
377 *
378 * Accesses into the region will cause the callbacks in @ops to be called.
379 * if @size is nonzero, subregions will be clipped to @size.
380 *
381 * @mr: the #MemoryRegion to be initialized.
382 * @owner: the object that tracks the region's reference count
383 * @ops: a structure containing read and write callbacks to be used when
384 * I/O is performed on the region.
385 * @opaque: passed to the read and write callbacks of the @ops structure.
386 * @name: used for debugging; not visible to the user or ABI
387 * @size: size of the region.
388 */
389 void memory_region_init_io(MemoryRegion *mr,
390 struct Object *owner,
391 const MemoryRegionOps *ops,
392 void *opaque,
393 const char *name,
394 uint64_t size);
395
396 /**
397 * memory_region_init_ram: Initialize RAM memory region. Accesses into the
398 * region will modify memory directly.
399 *
400 * @mr: the #MemoryRegion to be initialized.
401 * @owner: the object that tracks the region's reference count
402 * @name: Region name, becomes part of RAMBlock name used in migration stream
403 * must be unique within any device
404 * @size: size of the region.
405 * @errp: pointer to Error*, to store an error if it happens.
406 */
407 void memory_region_init_ram(MemoryRegion *mr,
408 struct Object *owner,
409 const char *name,
410 uint64_t size,
411 Error **errp);
412
413 /**
414 * memory_region_init_resizeable_ram: Initialize memory region with resizeable
415 * RAM. Accesses into the region will
416 * modify memory directly. Only an initial
417 * portion of this RAM is actually used.
418 * The used size can change across reboots.
419 *
420 * @mr: the #MemoryRegion to be initialized.
421 * @owner: the object that tracks the region's reference count
422 * @name: Region name, becomes part of RAMBlock name used in migration stream
423 * must be unique within any device
424 * @size: used size of the region.
425 * @max_size: max size of the region.
426 * @resized: callback to notify owner about used size change.
427 * @errp: pointer to Error*, to store an error if it happens.
428 */
429 void memory_region_init_resizeable_ram(MemoryRegion *mr,
430 struct Object *owner,
431 const char *name,
432 uint64_t size,
433 uint64_t max_size,
434 void (*resized)(const char*,
435 uint64_t length,
436 void *host),
437 Error **errp);
438 #ifdef __linux__
439 /**
440 * memory_region_init_ram_from_file: Initialize RAM memory region with a
441 * mmap-ed backend.
442 *
443 * @mr: the #MemoryRegion to be initialized.
444 * @owner: the object that tracks the region's reference count
445 * @name: Region name, becomes part of RAMBlock name used in migration stream
446 * must be unique within any device
447 * @size: size of the region.
448 * @share: %true if memory must be mmaped with the MAP_SHARED flag
449 * @path: the path in which to allocate the RAM.
450 * @errp: pointer to Error*, to store an error if it happens.
451 */
452 void memory_region_init_ram_from_file(MemoryRegion *mr,
453 struct Object *owner,
454 const char *name,
455 uint64_t size,
456 bool share,
457 const char *path,
458 Error **errp);
459
460 /**
461 * memory_region_init_ram_from_fd: Initialize RAM memory region with a
462 * mmap-ed backend.
463 *
464 * @mr: the #MemoryRegion to be initialized.
465 * @owner: the object that tracks the region's reference count
466 * @name: the name of the region.
467 * @size: size of the region.
468 * @share: %true if memory must be mmaped with the MAP_SHARED flag
469 * @fd: the fd to mmap.
470 * @errp: pointer to Error*, to store an error if it happens.
471 */
472 void memory_region_init_ram_from_fd(MemoryRegion *mr,
473 struct Object *owner,
474 const char *name,
475 uint64_t size,
476 bool share,
477 int fd,
478 Error **errp);
479 #endif
480
481 /**
482 * memory_region_init_ram_ptr: Initialize RAM memory region from a
483 * user-provided pointer. Accesses into the
484 * region will modify memory directly.
485 *
486 * @mr: the #MemoryRegion to be initialized.
487 * @owner: the object that tracks the region's reference count
488 * @name: Region name, becomes part of RAMBlock name used in migration stream
489 * must be unique within any device
490 * @size: size of the region.
491 * @ptr: memory to be mapped; must contain at least @size bytes.
492 */
493 void memory_region_init_ram_ptr(MemoryRegion *mr,
494 struct Object *owner,
495 const char *name,
496 uint64_t size,
497 void *ptr);
498
499 /**
500 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
501 * a user-provided pointer.
502 *
503 * A RAM device represents a mapping to a physical device, such as to a PCI
504 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
505 * into the VM address space and access to the region will modify memory
506 * directly. However, the memory region should not be included in a memory
507 * dump (device may not be enabled/mapped at the time of the dump), and
508 * operations incompatible with manipulating MMIO should be avoided. Replaces
509 * skip_dump flag.
510 *
511 * @mr: the #MemoryRegion to be initialized.
512 * @owner: the object that tracks the region's reference count
513 * @name: the name of the region.
514 * @size: size of the region.
515 * @ptr: memory to be mapped; must contain at least @size bytes.
516 */
517 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
518 struct Object *owner,
519 const char *name,
520 uint64_t size,
521 void *ptr);
522
523 /**
524 * memory_region_init_alias: Initialize a memory region that aliases all or a
525 * part of another memory region.
526 *
527 * @mr: the #MemoryRegion to be initialized.
528 * @owner: the object that tracks the region's reference count
529 * @name: used for debugging; not visible to the user or ABI
530 * @orig: the region to be referenced; @mr will be equivalent to
531 * @orig between @offset and @offset + @size - 1.
532 * @offset: start of the section in @orig to be referenced.
533 * @size: size of the region.
534 */
535 void memory_region_init_alias(MemoryRegion *mr,
536 struct Object *owner,
537 const char *name,
538 MemoryRegion *orig,
539 hwaddr offset,
540 uint64_t size);
541
542 /**
543 * memory_region_init_rom: Initialize a ROM memory region.
544 *
545 * This has the same effect as calling memory_region_init_ram()
546 * and then marking the resulting region read-only with
547 * memory_region_set_readonly().
548 *
549 * @mr: the #MemoryRegion to be initialized.
550 * @owner: the object that tracks the region's reference count
551 * @name: Region name, becomes part of RAMBlock name used in migration stream
552 * must be unique within any device
553 * @size: size of the region.
554 * @errp: pointer to Error*, to store an error if it happens.
555 */
556 void memory_region_init_rom(MemoryRegion *mr,
557 struct Object *owner,
558 const char *name,
559 uint64_t size,
560 Error **errp);
561
562 /**
563 * memory_region_init_rom_device: Initialize a ROM memory region. Writes are
564 * handled via callbacks.
565 *
566 * @mr: the #MemoryRegion to be initialized.
567 * @owner: the object that tracks the region's reference count
568 * @ops: callbacks for write access handling (must not be NULL).
569 * @name: Region name, becomes part of RAMBlock name used in migration stream
570 * must be unique within any device
571 * @size: size of the region.
572 * @errp: pointer to Error*, to store an error if it happens.
573 */
574 void memory_region_init_rom_device(MemoryRegion *mr,
575 struct Object *owner,
576 const MemoryRegionOps *ops,
577 void *opaque,
578 const char *name,
579 uint64_t size,
580 Error **errp);
581
582 /**
583 * memory_region_init_reservation: Initialize a memory region that reserves
584 * I/O space.
585 *
586 * A reservation region primariy serves debugging purposes. It claims I/O
587 * space that is not supposed to be handled by QEMU itself. Any access via
588 * the memory API will cause an abort().
589 * This function is deprecated. Use memory_region_init_io() with NULL
590 * callbacks instead.
591 *
592 * @mr: the #MemoryRegion to be initialized
593 * @owner: the object that tracks the region's reference count
594 * @name: used for debugging; not visible to the user or ABI
595 * @size: size of the region.
596 */
597 static inline void memory_region_init_reservation(MemoryRegion *mr,
598 Object *owner,
599 const char *name,
600 uint64_t size)
601 {
602 memory_region_init_io(mr, owner, NULL, mr, name, size);
603 }
604
605 /**
606 * memory_region_init_iommu: Initialize a memory region that translates
607 * addresses
608 *
609 * An IOMMU region translates addresses and forwards accesses to a target
610 * memory region.
611 *
612 * @mr: the #MemoryRegion to be initialized
613 * @owner: the object that tracks the region's reference count
614 * @ops: a function that translates addresses into the @target region
615 * @name: used for debugging; not visible to the user or ABI
616 * @size: size of the region.
617 */
618 void memory_region_init_iommu(MemoryRegion *mr,
619 struct Object *owner,
620 const MemoryRegionIOMMUOps *ops,
621 const char *name,
622 uint64_t size);
623
624 /**
625 * memory_region_owner: get a memory region's owner.
626 *
627 * @mr: the memory region being queried.
628 */
629 struct Object *memory_region_owner(MemoryRegion *mr);
630
631 /**
632 * memory_region_size: get a memory region's size.
633 *
634 * @mr: the memory region being queried.
635 */
636 uint64_t memory_region_size(MemoryRegion *mr);
637
638 /**
639 * memory_region_is_ram: check whether a memory region is random access
640 *
641 * Returns %true is a memory region is random access.
642 *
643 * @mr: the memory region being queried
644 */
645 static inline bool memory_region_is_ram(MemoryRegion *mr)
646 {
647 return mr->ram;
648 }
649
650 /**
651 * memory_region_is_ram_device: check whether a memory region is a ram device
652 *
653 * Returns %true is a memory region is a device backed ram region
654 *
655 * @mr: the memory region being queried
656 */
657 bool memory_region_is_ram_device(MemoryRegion *mr);
658
659 /**
660 * memory_region_is_romd: check whether a memory region is in ROMD mode
661 *
662 * Returns %true if a memory region is a ROM device and currently set to allow
663 * direct reads.
664 *
665 * @mr: the memory region being queried
666 */
667 static inline bool memory_region_is_romd(MemoryRegion *mr)
668 {
669 return mr->rom_device && mr->romd_mode;
670 }
671
672 /**
673 * memory_region_is_iommu: check whether a memory region is an iommu
674 *
675 * Returns %true is a memory region is an iommu.
676 *
677 * @mr: the memory region being queried
678 */
679 static inline bool memory_region_is_iommu(MemoryRegion *mr)
680 {
681 if (mr->alias) {
682 return memory_region_is_iommu(mr->alias);
683 }
684 return mr->iommu_ops;
685 }
686
687
688 /**
689 * memory_region_iommu_get_min_page_size: get minimum supported page size
690 * for an iommu
691 *
692 * Returns minimum supported page size for an iommu.
693 *
694 * @mr: the memory region being queried
695 */
696 uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr);
697
698 /**
699 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
700 *
701 * The notification type will be decided by entry.perm bits:
702 *
703 * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE.
704 * - For MAP (newly added entry) notifies: set entry.perm to the
705 * permission of the page (which is definitely !IOMMU_NONE).
706 *
707 * Note: for any IOMMU implementation, an in-place mapping change
708 * should be notified with an UNMAP followed by a MAP.
709 *
710 * @mr: the memory region that was changed
711 * @entry: the new entry in the IOMMU translation table. The entry
712 * replaces all old entries for the same virtual I/O address range.
713 * Deleted entries have .@perm == 0.
714 */
715 void memory_region_notify_iommu(MemoryRegion *mr,
716 IOMMUTLBEntry entry);
717
718 /**
719 * memory_region_notify_one: notify a change in an IOMMU translation
720 * entry to a single notifier
721 *
722 * This works just like memory_region_notify_iommu(), but it only
723 * notifies a specific notifier, not all of them.
724 *
725 * @notifier: the notifier to be notified
726 * @entry: the new entry in the IOMMU translation table. The entry
727 * replaces all old entries for the same virtual I/O address range.
728 * Deleted entries have .@perm == 0.
729 */
730 void memory_region_notify_one(IOMMUNotifier *notifier,
731 IOMMUTLBEntry *entry);
732
733 /**
734 * memory_region_register_iommu_notifier: register a notifier for changes to
735 * IOMMU translation entries.
736 *
737 * @mr: the memory region to observe
738 * @n: the IOMMUNotifier to be added; the notify callback receives a
739 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
740 * ceases to be valid on exit from the notifier.
741 */
742 void memory_region_register_iommu_notifier(MemoryRegion *mr,
743 IOMMUNotifier *n);
744
745 /**
746 * memory_region_iommu_replay: replay existing IOMMU translations to
747 * a notifier with the minimum page granularity returned by
748 * mr->iommu_ops->get_page_size().
749 *
750 * @mr: the memory region to observe
751 * @n: the notifier to which to replay iommu mappings
752 */
753 void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n);
754
755 /**
756 * memory_region_iommu_replay_all: replay existing IOMMU translations
757 * to all the notifiers registered.
758 *
759 * @mr: the memory region to observe
760 */
761 void memory_region_iommu_replay_all(MemoryRegion *mr);
762
763 /**
764 * memory_region_unregister_iommu_notifier: unregister a notifier for
765 * changes to IOMMU translation entries.
766 *
767 * @mr: the memory region which was observed and for which notity_stopped()
768 * needs to be called
769 * @n: the notifier to be removed.
770 */
771 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
772 IOMMUNotifier *n);
773
774 /**
775 * memory_region_name: get a memory region's name
776 *
777 * Returns the string that was used to initialize the memory region.
778 *
779 * @mr: the memory region being queried
780 */
781 const char *memory_region_name(const MemoryRegion *mr);
782
783 /**
784 * memory_region_is_logging: return whether a memory region is logging writes
785 *
786 * Returns %true if the memory region is logging writes for the given client
787 *
788 * @mr: the memory region being queried
789 * @client: the client being queried
790 */
791 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
792
793 /**
794 * memory_region_get_dirty_log_mask: return the clients for which a
795 * memory region is logging writes.
796 *
797 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
798 * are the bit indices.
799 *
800 * @mr: the memory region being queried
801 */
802 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
803
804 /**
805 * memory_region_is_rom: check whether a memory region is ROM
806 *
807 * Returns %true is a memory region is read-only memory.
808 *
809 * @mr: the memory region being queried
810 */
811 static inline bool memory_region_is_rom(MemoryRegion *mr)
812 {
813 return mr->ram && mr->readonly;
814 }
815
816
817 /**
818 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
819 *
820 * Returns a file descriptor backing a file-based RAM memory region,
821 * or -1 if the region is not a file-based RAM memory region.
822 *
823 * @mr: the RAM or alias memory region being queried.
824 */
825 int memory_region_get_fd(MemoryRegion *mr);
826
827 /**
828 * memory_region_set_fd: Mark a RAM memory region as backed by a
829 * file descriptor.
830 *
831 * This function is typically used after memory_region_init_ram_ptr().
832 *
833 * @mr: the memory region being queried.
834 * @fd: the file descriptor that backs @mr.
835 */
836 void memory_region_set_fd(MemoryRegion *mr, int fd);
837
838 /**
839 * memory_region_from_host: Convert a pointer into a RAM memory region
840 * and an offset within it.
841 *
842 * Given a host pointer inside a RAM memory region (created with
843 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
844 * the MemoryRegion and the offset within it.
845 *
846 * Use with care; by the time this function returns, the returned pointer is
847 * not protected by RCU anymore. If the caller is not within an RCU critical
848 * section and does not hold the iothread lock, it must have other means of
849 * protecting the pointer, such as a reference to the region that includes
850 * the incoming ram_addr_t.
851 *
852 * @mr: the memory region being queried.
853 */
854 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
855
856 /**
857 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
858 *
859 * Returns a host pointer to a RAM memory region (created with
860 * memory_region_init_ram() or memory_region_init_ram_ptr()).
861 *
862 * Use with care; by the time this function returns, the returned pointer is
863 * not protected by RCU anymore. If the caller is not within an RCU critical
864 * section and does not hold the iothread lock, it must have other means of
865 * protecting the pointer, such as a reference to the region that includes
866 * the incoming ram_addr_t.
867 *
868 * @mr: the memory region being queried.
869 */
870 void *memory_region_get_ram_ptr(MemoryRegion *mr);
871
872 /* memory_region_ram_resize: Resize a RAM region.
873 *
874 * Only legal before guest might have detected the memory size: e.g. on
875 * incoming migration, or right after reset.
876 *
877 * @mr: a memory region created with @memory_region_init_resizeable_ram.
878 * @newsize: the new size the region
879 * @errp: pointer to Error*, to store an error if it happens.
880 */
881 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
882 Error **errp);
883
884 /**
885 * memory_region_set_log: Turn dirty logging on or off for a region.
886 *
887 * Turns dirty logging on or off for a specified client (display, migration).
888 * Only meaningful for RAM regions.
889 *
890 * @mr: the memory region being updated.
891 * @log: whether dirty logging is to be enabled or disabled.
892 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
893 */
894 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
895
896 /**
897 * memory_region_get_dirty: Check whether a range of bytes is dirty
898 * for a specified client.
899 *
900 * Checks whether a range of bytes has been written to since the last
901 * call to memory_region_reset_dirty() with the same @client. Dirty logging
902 * must be enabled.
903 *
904 * @mr: the memory region being queried.
905 * @addr: the address (relative to the start of the region) being queried.
906 * @size: the size of the range being queried.
907 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
908 * %DIRTY_MEMORY_VGA.
909 */
910 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
911 hwaddr size, unsigned client);
912
913 /**
914 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
915 *
916 * Marks a range of bytes as dirty, after it has been dirtied outside
917 * guest code.
918 *
919 * @mr: the memory region being dirtied.
920 * @addr: the address (relative to the start of the region) being dirtied.
921 * @size: size of the range being dirtied.
922 */
923 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
924 hwaddr size);
925
926 /**
927 * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty
928 * for a specified client. It clears them.
929 *
930 * Checks whether a range of bytes has been written to since the last
931 * call to memory_region_reset_dirty() with the same @client. Dirty logging
932 * must be enabled.
933 *
934 * @mr: the memory region being queried.
935 * @addr: the address (relative to the start of the region) being queried.
936 * @size: the size of the range being queried.
937 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
938 * %DIRTY_MEMORY_VGA.
939 */
940 bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
941 hwaddr size, unsigned client);
942
943 /**
944 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
945 * bitmap and clear it.
946 *
947 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
948 * returns the snapshot. The snapshot can then be used to query dirty
949 * status, using memory_region_snapshot_get_dirty. Unlike
950 * memory_region_test_and_clear_dirty this allows to query the same
951 * page multiple times, which is especially useful for display updates
952 * where the scanlines often are not page aligned.
953 *
954 * The dirty bitmap region which gets copyed into the snapshot (and
955 * cleared afterwards) can be larger than requested. The boundaries
956 * are rounded up/down so complete bitmap longs (covering 64 pages on
957 * 64bit hosts) can be copied over into the bitmap snapshot. Which
958 * isn't a problem for display updates as the extra pages are outside
959 * the visible area, and in case the visible area changes a full
960 * display redraw is due anyway. Should other use cases for this
961 * function emerge we might have to revisit this implementation
962 * detail.
963 *
964 * Use g_free to release DirtyBitmapSnapshot.
965 *
966 * @mr: the memory region being queried.
967 * @addr: the address (relative to the start of the region) being queried.
968 * @size: the size of the range being queried.
969 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
970 */
971 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
972 hwaddr addr,
973 hwaddr size,
974 unsigned client);
975
976 /**
977 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
978 * in the specified dirty bitmap snapshot.
979 *
980 * @mr: the memory region being queried.
981 * @snap: the dirty bitmap snapshot
982 * @addr: the address (relative to the start of the region) being queried.
983 * @size: the size of the range being queried.
984 */
985 bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
986 DirtyBitmapSnapshot *snap,
987 hwaddr addr, hwaddr size);
988
989 /**
990 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
991 * any external TLBs (e.g. kvm)
992 *
993 * Flushes dirty information from accelerators such as kvm and vhost-net
994 * and makes it available to users of the memory API.
995 *
996 * @mr: the region being flushed.
997 */
998 void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
999
1000 /**
1001 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
1002 * client.
1003 *
1004 * Marks a range of pages as no longer dirty.
1005 *
1006 * @mr: the region being updated.
1007 * @addr: the start of the subrange being cleaned.
1008 * @size: the size of the subrange being cleaned.
1009 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1010 * %DIRTY_MEMORY_VGA.
1011 */
1012 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1013 hwaddr size, unsigned client);
1014
1015 /**
1016 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
1017 *
1018 * Allows a memory region to be marked as read-only (turning it into a ROM).
1019 * only useful on RAM regions.
1020 *
1021 * @mr: the region being updated.
1022 * @readonly: whether rhe region is to be ROM or RAM.
1023 */
1024 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
1025
1026 /**
1027 * memory_region_rom_device_set_romd: enable/disable ROMD mode
1028 *
1029 * Allows a ROM device (initialized with memory_region_init_rom_device() to
1030 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
1031 * device is mapped to guest memory and satisfies read access directly.
1032 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
1033 * Writes are always handled by the #MemoryRegion.write function.
1034 *
1035 * @mr: the memory region to be updated
1036 * @romd_mode: %true to put the region into ROMD mode
1037 */
1038 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
1039
1040 /**
1041 * memory_region_set_coalescing: Enable memory coalescing for the region.
1042 *
1043 * Enabled writes to a region to be queued for later processing. MMIO ->write
1044 * callbacks may be delayed until a non-coalesced MMIO is issued.
1045 * Only useful for IO regions. Roughly similar to write-combining hardware.
1046 *
1047 * @mr: the memory region to be write coalesced
1048 */
1049 void memory_region_set_coalescing(MemoryRegion *mr);
1050
1051 /**
1052 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
1053 * a region.
1054 *
1055 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
1056 * Multiple calls can be issued coalesced disjoint ranges.
1057 *
1058 * @mr: the memory region to be updated.
1059 * @offset: the start of the range within the region to be coalesced.
1060 * @size: the size of the subrange to be coalesced.
1061 */
1062 void memory_region_add_coalescing(MemoryRegion *mr,
1063 hwaddr offset,
1064 uint64_t size);
1065
1066 /**
1067 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
1068 *
1069 * Disables any coalescing caused by memory_region_set_coalescing() or
1070 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
1071 * hardware.
1072 *
1073 * @mr: the memory region to be updated.
1074 */
1075 void memory_region_clear_coalescing(MemoryRegion *mr);
1076
1077 /**
1078 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
1079 * accesses.
1080 *
1081 * Ensure that pending coalesced MMIO request are flushed before the memory
1082 * region is accessed. This property is automatically enabled for all regions
1083 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
1084 *
1085 * @mr: the memory region to be updated.
1086 */
1087 void memory_region_set_flush_coalesced(MemoryRegion *mr);
1088
1089 /**
1090 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
1091 * accesses.
1092 *
1093 * Clear the automatic coalesced MMIO flushing enabled via
1094 * memory_region_set_flush_coalesced. Note that this service has no effect on
1095 * memory regions that have MMIO coalescing enabled for themselves. For them,
1096 * automatic flushing will stop once coalescing is disabled.
1097 *
1098 * @mr: the memory region to be updated.
1099 */
1100 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
1101
1102 /**
1103 * memory_region_set_global_locking: Declares the access processing requires
1104 * QEMU's global lock.
1105 *
1106 * When this is invoked, accesses to the memory region will be processed while
1107 * holding the global lock of QEMU. This is the default behavior of memory
1108 * regions.
1109 *
1110 * @mr: the memory region to be updated.
1111 */
1112 void memory_region_set_global_locking(MemoryRegion *mr);
1113
1114 /**
1115 * memory_region_clear_global_locking: Declares that access processing does
1116 * not depend on the QEMU global lock.
1117 *
1118 * By clearing this property, accesses to the memory region will be processed
1119 * outside of QEMU's global lock (unless the lock is held on when issuing the
1120 * access request). In this case, the device model implementing the access
1121 * handlers is responsible for synchronization of concurrency.
1122 *
1123 * @mr: the memory region to be updated.
1124 */
1125 void memory_region_clear_global_locking(MemoryRegion *mr);
1126
1127 /**
1128 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
1129 * is written to a location.
1130 *
1131 * Marks a word in an IO region (initialized with memory_region_init_io())
1132 * as a trigger for an eventfd event. The I/O callback will not be called.
1133 * The caller must be prepared to handle failure (that is, take the required
1134 * action if the callback _is_ called).
1135 *
1136 * @mr: the memory region being updated.
1137 * @addr: the address within @mr that is to be monitored
1138 * @size: the size of the access to trigger the eventfd
1139 * @match_data: whether to match against @data, instead of just @addr
1140 * @data: the data to match against the guest write
1141 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
1142 **/
1143 void memory_region_add_eventfd(MemoryRegion *mr,
1144 hwaddr addr,
1145 unsigned size,
1146 bool match_data,
1147 uint64_t data,
1148 EventNotifier *e);
1149
1150 /**
1151 * memory_region_del_eventfd: Cancel an eventfd.
1152 *
1153 * Cancels an eventfd trigger requested by a previous
1154 * memory_region_add_eventfd() call.
1155 *
1156 * @mr: the memory region being updated.
1157 * @addr: the address within @mr that is to be monitored
1158 * @size: the size of the access to trigger the eventfd
1159 * @match_data: whether to match against @data, instead of just @addr
1160 * @data: the data to match against the guest write
1161 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
1162 */
1163 void memory_region_del_eventfd(MemoryRegion *mr,
1164 hwaddr addr,
1165 unsigned size,
1166 bool match_data,
1167 uint64_t data,
1168 EventNotifier *e);
1169
1170 /**
1171 * memory_region_add_subregion: Add a subregion to a container.
1172 *
1173 * Adds a subregion at @offset. The subregion may not overlap with other
1174 * subregions (except for those explicitly marked as overlapping). A region
1175 * may only be added once as a subregion (unless removed with
1176 * memory_region_del_subregion()); use memory_region_init_alias() if you
1177 * want a region to be a subregion in multiple locations.
1178 *
1179 * @mr: the region to contain the new subregion; must be a container
1180 * initialized with memory_region_init().
1181 * @offset: the offset relative to @mr where @subregion is added.
1182 * @subregion: the subregion to be added.
1183 */
1184 void memory_region_add_subregion(MemoryRegion *mr,
1185 hwaddr offset,
1186 MemoryRegion *subregion);
1187 /**
1188 * memory_region_add_subregion_overlap: Add a subregion to a container
1189 * with overlap.
1190 *
1191 * Adds a subregion at @offset. The subregion may overlap with other
1192 * subregions. Conflicts are resolved by having a higher @priority hide a
1193 * lower @priority. Subregions without priority are taken as @priority 0.
1194 * A region may only be added once as a subregion (unless removed with
1195 * memory_region_del_subregion()); use memory_region_init_alias() if you
1196 * want a region to be a subregion in multiple locations.
1197 *
1198 * @mr: the region to contain the new subregion; must be a container
1199 * initialized with memory_region_init().
1200 * @offset: the offset relative to @mr where @subregion is added.
1201 * @subregion: the subregion to be added.
1202 * @priority: used for resolving overlaps; highest priority wins.
1203 */
1204 void memory_region_add_subregion_overlap(MemoryRegion *mr,
1205 hwaddr offset,
1206 MemoryRegion *subregion,
1207 int priority);
1208
1209 /**
1210 * memory_region_get_ram_addr: Get the ram address associated with a memory
1211 * region
1212 */
1213 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
1214
1215 uint64_t memory_region_get_alignment(const MemoryRegion *mr);
1216 /**
1217 * memory_region_del_subregion: Remove a subregion.
1218 *
1219 * Removes a subregion from its container.
1220 *
1221 * @mr: the container to be updated.
1222 * @subregion: the region being removed; must be a current subregion of @mr.
1223 */
1224 void memory_region_del_subregion(MemoryRegion *mr,
1225 MemoryRegion *subregion);
1226
1227 /*
1228 * memory_region_set_enabled: dynamically enable or disable a region
1229 *
1230 * Enables or disables a memory region. A disabled memory region
1231 * ignores all accesses to itself and its subregions. It does not
1232 * obscure sibling subregions with lower priority - it simply behaves as
1233 * if it was removed from the hierarchy.
1234 *
1235 * Regions default to being enabled.
1236 *
1237 * @mr: the region to be updated
1238 * @enabled: whether to enable or disable the region
1239 */
1240 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
1241
1242 /*
1243 * memory_region_set_address: dynamically update the address of a region
1244 *
1245 * Dynamically updates the address of a region, relative to its container.
1246 * May be used on regions are currently part of a memory hierarchy.
1247 *
1248 * @mr: the region to be updated
1249 * @addr: new address, relative to container region
1250 */
1251 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
1252
1253 /*
1254 * memory_region_set_size: dynamically update the size of a region.
1255 *
1256 * Dynamically updates the size of a region.
1257 *
1258 * @mr: the region to be updated
1259 * @size: used size of the region.
1260 */
1261 void memory_region_set_size(MemoryRegion *mr, uint64_t size);
1262
1263 /*
1264 * memory_region_set_alias_offset: dynamically update a memory alias's offset
1265 *
1266 * Dynamically updates the offset into the target region that an alias points
1267 * to, as if the fourth argument to memory_region_init_alias() has changed.
1268 *
1269 * @mr: the #MemoryRegion to be updated; should be an alias.
1270 * @offset: the new offset into the target memory region
1271 */
1272 void memory_region_set_alias_offset(MemoryRegion *mr,
1273 hwaddr offset);
1274
1275 /**
1276 * memory_region_present: checks if an address relative to a @container
1277 * translates into #MemoryRegion within @container
1278 *
1279 * Answer whether a #MemoryRegion within @container covers the address
1280 * @addr.
1281 *
1282 * @container: a #MemoryRegion within which @addr is a relative address
1283 * @addr: the area within @container to be searched
1284 */
1285 bool memory_region_present(MemoryRegion *container, hwaddr addr);
1286
1287 /**
1288 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
1289 * into any address space.
1290 *
1291 * @mr: a #MemoryRegion which should be checked if it's mapped
1292 */
1293 bool memory_region_is_mapped(MemoryRegion *mr);
1294
1295 /**
1296 * memory_region_find: translate an address/size relative to a
1297 * MemoryRegion into a #MemoryRegionSection.
1298 *
1299 * Locates the first #MemoryRegion within @mr that overlaps the range
1300 * given by @addr and @size.
1301 *
1302 * Returns a #MemoryRegionSection that describes a contiguous overlap.
1303 * It will have the following characteristics:
1304 * .@size = 0 iff no overlap was found
1305 * .@mr is non-%NULL iff an overlap was found
1306 *
1307 * Remember that in the return value the @offset_within_region is
1308 * relative to the returned region (in the .@mr field), not to the
1309 * @mr argument.
1310 *
1311 * Similarly, the .@offset_within_address_space is relative to the
1312 * address space that contains both regions, the passed and the
1313 * returned one. However, in the special case where the @mr argument
1314 * has no container (and thus is the root of the address space), the
1315 * following will hold:
1316 * .@offset_within_address_space >= @addr
1317 * .@offset_within_address_space + .@size <= @addr + @size
1318 *
1319 * @mr: a MemoryRegion within which @addr is a relative address
1320 * @addr: start of the area within @as to be searched
1321 * @size: size of the area to be searched
1322 */
1323 MemoryRegionSection memory_region_find(MemoryRegion *mr,
1324 hwaddr addr, uint64_t size);
1325
1326 /**
1327 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
1328 *
1329 * Synchronizes the dirty page log for all address spaces.
1330 */
1331 void memory_global_dirty_log_sync(void);
1332
1333 /**
1334 * memory_region_transaction_begin: Start a transaction.
1335 *
1336 * During a transaction, changes will be accumulated and made visible
1337 * only when the transaction ends (is committed).
1338 */
1339 void memory_region_transaction_begin(void);
1340
1341 /**
1342 * memory_region_transaction_commit: Commit a transaction and make changes
1343 * visible to the guest.
1344 */
1345 void memory_region_transaction_commit(void);
1346
1347 /**
1348 * memory_listener_register: register callbacks to be called when memory
1349 * sections are mapped or unmapped into an address
1350 * space
1351 *
1352 * @listener: an object containing the callbacks to be called
1353 * @filter: if non-%NULL, only regions in this address space will be observed
1354 */
1355 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
1356
1357 /**
1358 * memory_listener_unregister: undo the effect of memory_listener_register()
1359 *
1360 * @listener: an object containing the callbacks to be removed
1361 */
1362 void memory_listener_unregister(MemoryListener *listener);
1363
1364 /**
1365 * memory_global_dirty_log_start: begin dirty logging for all regions
1366 */
1367 void memory_global_dirty_log_start(void);
1368
1369 /**
1370 * memory_global_dirty_log_stop: end dirty logging for all regions
1371 */
1372 void memory_global_dirty_log_stop(void);
1373
1374 void mtree_info(fprintf_function mon_printf, void *f, bool flatview);
1375
1376 /**
1377 * memory_region_dispatch_read: perform a read directly to the specified
1378 * MemoryRegion.
1379 *
1380 * @mr: #MemoryRegion to access
1381 * @addr: address within that region
1382 * @pval: pointer to uint64_t which the data is written to
1383 * @size: size of the access in bytes
1384 * @attrs: memory transaction attributes to use for the access
1385 */
1386 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1387 hwaddr addr,
1388 uint64_t *pval,
1389 unsigned size,
1390 MemTxAttrs attrs);
1391 /**
1392 * memory_region_dispatch_write: perform a write directly to the specified
1393 * MemoryRegion.
1394 *
1395 * @mr: #MemoryRegion to access
1396 * @addr: address within that region
1397 * @data: data to write
1398 * @size: size of the access in bytes
1399 * @attrs: memory transaction attributes to use for the access
1400 */
1401 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1402 hwaddr addr,
1403 uint64_t data,
1404 unsigned size,
1405 MemTxAttrs attrs);
1406
1407 /**
1408 * address_space_init: initializes an address space
1409 *
1410 * @as: an uninitialized #AddressSpace
1411 * @root: a #MemoryRegion that routes addresses for the address space
1412 * @name: an address space name. The name is only used for debugging
1413 * output.
1414 */
1415 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
1416
1417 /**
1418 * address_space_init_shareable: return an address space for a memory region,
1419 * creating it if it does not already exist
1420 *
1421 * @root: a #MemoryRegion that routes addresses for the address space
1422 * @name: an address space name. The name is only used for debugging
1423 * output.
1424 *
1425 * This function will return a pointer to an existing AddressSpace
1426 * which was initialized with the specified MemoryRegion, or it will
1427 * create and initialize one if it does not already exist. The ASes
1428 * are reference-counted, so the memory will be freed automatically
1429 * when the AddressSpace is destroyed via address_space_destroy.
1430 */
1431 AddressSpace *address_space_init_shareable(MemoryRegion *root,
1432 const char *name);
1433
1434 /**
1435 * address_space_destroy: destroy an address space
1436 *
1437 * Releases all resources associated with an address space. After an address space
1438 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
1439 * as well.
1440 *
1441 * @as: address space to be destroyed
1442 */
1443 void address_space_destroy(AddressSpace *as);
1444
1445 /**
1446 * address_space_rw: read from or write to an address space.
1447 *
1448 * Return a MemTxResult indicating whether the operation succeeded
1449 * or failed (eg unassigned memory, device rejected the transaction,
1450 * IOMMU fault).
1451 *
1452 * @as: #AddressSpace to be accessed
1453 * @addr: address within that address space
1454 * @attrs: memory transaction attributes
1455 * @buf: buffer with the data transferred
1456 * @is_write: indicates the transfer direction
1457 */
1458 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
1459 MemTxAttrs attrs, uint8_t *buf,
1460 int len, bool is_write);
1461
1462 /**
1463 * address_space_write: write to address space.
1464 *
1465 * Return a MemTxResult indicating whether the operation succeeded
1466 * or failed (eg unassigned memory, device rejected the transaction,
1467 * IOMMU fault).
1468 *
1469 * @as: #AddressSpace to be accessed
1470 * @addr: address within that address space
1471 * @attrs: memory transaction attributes
1472 * @buf: buffer with the data transferred
1473 */
1474 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
1475 MemTxAttrs attrs,
1476 const uint8_t *buf, int len);
1477
1478 /* address_space_ld*: load from an address space
1479 * address_space_st*: store to an address space
1480 *
1481 * These functions perform a load or store of the byte, word,
1482 * longword or quad to the specified address within the AddressSpace.
1483 * The _le suffixed functions treat the data as little endian;
1484 * _be indicates big endian; no suffix indicates "same endianness
1485 * as guest CPU".
1486 *
1487 * The "guest CPU endianness" accessors are deprecated for use outside
1488 * target-* code; devices should be CPU-agnostic and use either the LE
1489 * or the BE accessors.
1490 *
1491 * @as #AddressSpace to be accessed
1492 * @addr: address within that address space
1493 * @val: data value, for stores
1494 * @attrs: memory transaction attributes
1495 * @result: location to write the success/failure of the transaction;
1496 * if NULL, this information is discarded
1497 */
1498 uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
1499 MemTxAttrs attrs, MemTxResult *result);
1500 uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
1501 MemTxAttrs attrs, MemTxResult *result);
1502 uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
1503 MemTxAttrs attrs, MemTxResult *result);
1504 uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
1505 MemTxAttrs attrs, MemTxResult *result);
1506 uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
1507 MemTxAttrs attrs, MemTxResult *result);
1508 uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
1509 MemTxAttrs attrs, MemTxResult *result);
1510 uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
1511 MemTxAttrs attrs, MemTxResult *result);
1512 void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
1513 MemTxAttrs attrs, MemTxResult *result);
1514 void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
1515 MemTxAttrs attrs, MemTxResult *result);
1516 void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
1517 MemTxAttrs attrs, MemTxResult *result);
1518 void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
1519 MemTxAttrs attrs, MemTxResult *result);
1520 void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
1521 MemTxAttrs attrs, MemTxResult *result);
1522 void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
1523 MemTxAttrs attrs, MemTxResult *result);
1524 void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
1525 MemTxAttrs attrs, MemTxResult *result);
1526
1527 uint32_t ldub_phys(AddressSpace *as, hwaddr addr);
1528 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr);
1529 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr);
1530 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr);
1531 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr);
1532 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr);
1533 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr);
1534 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1535 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1536 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1537 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1538 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1539 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val);
1540 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val);
1541
1542 struct MemoryRegionCache {
1543 hwaddr xlat;
1544 hwaddr len;
1545 AddressSpace *as;
1546 };
1547
1548 #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .as = NULL })
1549
1550 /* address_space_cache_init: prepare for repeated access to a physical
1551 * memory region
1552 *
1553 * @cache: #MemoryRegionCache to be filled
1554 * @as: #AddressSpace to be accessed
1555 * @addr: address within that address space
1556 * @len: length of buffer
1557 * @is_write: indicates the transfer direction
1558 *
1559 * Will only work with RAM, and may map a subset of the requested range by
1560 * returning a value that is less than @len. On failure, return a negative
1561 * errno value.
1562 *
1563 * Because it only works with RAM, this function can be used for
1564 * read-modify-write operations. In this case, is_write should be %true.
1565 *
1566 * Note that addresses passed to the address_space_*_cached functions
1567 * are relative to @addr.
1568 */
1569 int64_t address_space_cache_init(MemoryRegionCache *cache,
1570 AddressSpace *as,
1571 hwaddr addr,
1572 hwaddr len,
1573 bool is_write);
1574
1575 /**
1576 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
1577 *
1578 * @cache: The #MemoryRegionCache to operate on.
1579 * @addr: The first physical address that was written, relative to the
1580 * address that was passed to @address_space_cache_init.
1581 * @access_len: The number of bytes that were written starting at @addr.
1582 */
1583 void address_space_cache_invalidate(MemoryRegionCache *cache,
1584 hwaddr addr,
1585 hwaddr access_len);
1586
1587 /**
1588 * address_space_cache_destroy: free a #MemoryRegionCache
1589 *
1590 * @cache: The #MemoryRegionCache whose memory should be released.
1591 */
1592 void address_space_cache_destroy(MemoryRegionCache *cache);
1593
1594 /* address_space_ld*_cached: load from a cached #MemoryRegion
1595 * address_space_st*_cached: store into a cached #MemoryRegion
1596 *
1597 * These functions perform a load or store of the byte, word,
1598 * longword or quad to the specified address. The address is
1599 * a physical address in the AddressSpace, but it must lie within
1600 * a #MemoryRegion that was mapped with address_space_cache_init.
1601 *
1602 * The _le suffixed functions treat the data as little endian;
1603 * _be indicates big endian; no suffix indicates "same endianness
1604 * as guest CPU".
1605 *
1606 * The "guest CPU endianness" accessors are deprecated for use outside
1607 * target-* code; devices should be CPU-agnostic and use either the LE
1608 * or the BE accessors.
1609 *
1610 * @cache: previously initialized #MemoryRegionCache to be accessed
1611 * @addr: address within the address space
1612 * @val: data value, for stores
1613 * @attrs: memory transaction attributes
1614 * @result: location to write the success/failure of the transaction;
1615 * if NULL, this information is discarded
1616 */
1617 uint32_t address_space_ldub_cached(MemoryRegionCache *cache, hwaddr addr,
1618 MemTxAttrs attrs, MemTxResult *result);
1619 uint32_t address_space_lduw_le_cached(MemoryRegionCache *cache, hwaddr addr,
1620 MemTxAttrs attrs, MemTxResult *result);
1621 uint32_t address_space_lduw_be_cached(MemoryRegionCache *cache, hwaddr addr,
1622 MemTxAttrs attrs, MemTxResult *result);
1623 uint32_t address_space_ldl_le_cached(MemoryRegionCache *cache, hwaddr addr,
1624 MemTxAttrs attrs, MemTxResult *result);
1625 uint32_t address_space_ldl_be_cached(MemoryRegionCache *cache, hwaddr addr,
1626 MemTxAttrs attrs, MemTxResult *result);
1627 uint64_t address_space_ldq_le_cached(MemoryRegionCache *cache, hwaddr addr,
1628 MemTxAttrs attrs, MemTxResult *result);
1629 uint64_t address_space_ldq_be_cached(MemoryRegionCache *cache, hwaddr addr,
1630 MemTxAttrs attrs, MemTxResult *result);
1631 void address_space_stb_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1632 MemTxAttrs attrs, MemTxResult *result);
1633 void address_space_stw_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1634 MemTxAttrs attrs, MemTxResult *result);
1635 void address_space_stw_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1636 MemTxAttrs attrs, MemTxResult *result);
1637 void address_space_stl_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1638 MemTxAttrs attrs, MemTxResult *result);
1639 void address_space_stl_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1640 MemTxAttrs attrs, MemTxResult *result);
1641 void address_space_stq_le_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val,
1642 MemTxAttrs attrs, MemTxResult *result);
1643 void address_space_stq_be_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val,
1644 MemTxAttrs attrs, MemTxResult *result);
1645
1646 uint32_t ldub_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1647 uint32_t lduw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1648 uint32_t lduw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1649 uint32_t ldl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1650 uint32_t ldl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1651 uint64_t ldq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1652 uint64_t ldq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1653 void stb_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1654 void stw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1655 void stw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1656 void stl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1657 void stl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1658 void stq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
1659 void stq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
1660 /* address_space_get_iotlb_entry: translate an address into an IOTLB
1661 * entry. Should be called from an RCU critical section.
1662 */
1663 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
1664 bool is_write);
1665
1666 /* address_space_translate: translate an address range into an address space
1667 * into a MemoryRegion and an address range into that section. Should be
1668 * called from an RCU critical section, to avoid that the last reference
1669 * to the returned region disappears after address_space_translate returns.
1670 *
1671 * @as: #AddressSpace to be accessed
1672 * @addr: address within that address space
1673 * @xlat: pointer to address within the returned memory region section's
1674 * #MemoryRegion.
1675 * @len: pointer to length
1676 * @is_write: indicates the transfer direction
1677 */
1678 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
1679 hwaddr *xlat, hwaddr *len,
1680 bool is_write);
1681
1682 /* address_space_access_valid: check for validity of accessing an address
1683 * space range
1684 *
1685 * Check whether memory is assigned to the given address space range, and
1686 * access is permitted by any IOMMU regions that are active for the address
1687 * space.
1688 *
1689 * For now, addr and len should be aligned to a page size. This limitation
1690 * will be lifted in the future.
1691 *
1692 * @as: #AddressSpace to be accessed
1693 * @addr: address within that address space
1694 * @len: length of the area to be checked
1695 * @is_write: indicates the transfer direction
1696 */
1697 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write);
1698
1699 /* address_space_map: map a physical memory region into a host virtual address
1700 *
1701 * May map a subset of the requested range, given by and returned in @plen.
1702 * May return %NULL if resources needed to perform the mapping are exhausted.
1703 * Use only for reads OR writes - not for read-modify-write operations.
1704 * Use cpu_register_map_client() to know when retrying the map operation is
1705 * likely to succeed.
1706 *
1707 * @as: #AddressSpace to be accessed
1708 * @addr: address within that address space
1709 * @plen: pointer to length of buffer; updated on return
1710 * @is_write: indicates the transfer direction
1711 */
1712 void *address_space_map(AddressSpace *as, hwaddr addr,
1713 hwaddr *plen, bool is_write);
1714
1715 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
1716 *
1717 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
1718 * the amount of memory that was actually read or written by the caller.
1719 *
1720 * @as: #AddressSpace used
1721 * @addr: address within that address space
1722 * @len: buffer length as returned by address_space_map()
1723 * @access_len: amount of data actually transferred
1724 * @is_write: indicates the transfer direction
1725 */
1726 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
1727 int is_write, hwaddr access_len);
1728
1729
1730 /* Internal functions, part of the implementation of address_space_read. */
1731 MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
1732 MemTxAttrs attrs, uint8_t *buf,
1733 int len, hwaddr addr1, hwaddr l,
1734 MemoryRegion *mr);
1735 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
1736 MemTxAttrs attrs, uint8_t *buf, int len);
1737 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
1738
1739 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1740 {
1741 if (is_write) {
1742 return memory_region_is_ram(mr) &&
1743 !mr->readonly && !memory_region_is_ram_device(mr);
1744 } else {
1745 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
1746 memory_region_is_romd(mr);
1747 }
1748 }
1749
1750 /**
1751 * address_space_read: read from an address space.
1752 *
1753 * Return a MemTxResult indicating whether the operation succeeded
1754 * or failed (eg unassigned memory, device rejected the transaction,
1755 * IOMMU fault).
1756 *
1757 * @as: #AddressSpace to be accessed
1758 * @addr: address within that address space
1759 * @attrs: memory transaction attributes
1760 * @buf: buffer with the data transferred
1761 */
1762 static inline __attribute__((__always_inline__))
1763 MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
1764 uint8_t *buf, int len)
1765 {
1766 MemTxResult result = MEMTX_OK;
1767 hwaddr l, addr1;
1768 void *ptr;
1769 MemoryRegion *mr;
1770
1771 if (__builtin_constant_p(len)) {
1772 if (len) {
1773 rcu_read_lock();
1774 l = len;
1775 mr = address_space_translate(as, addr, &addr1, &l, false);
1776 if (len == l && memory_access_is_direct(mr, false)) {
1777 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
1778 memcpy(buf, ptr, len);
1779 } else {
1780 result = address_space_read_continue(as, addr, attrs, buf, len,
1781 addr1, l, mr);
1782 }
1783 rcu_read_unlock();
1784 }
1785 } else {
1786 result = address_space_read_full(as, addr, attrs, buf, len);
1787 }
1788 return result;
1789 }
1790
1791 /**
1792 * address_space_read_cached: read from a cached RAM region
1793 *
1794 * @cache: Cached region to be addressed
1795 * @addr: address relative to the base of the RAM region
1796 * @buf: buffer with the data transferred
1797 * @len: length of the data transferred
1798 */
1799 static inline void
1800 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
1801 void *buf, int len)
1802 {
1803 assert(addr < cache->len && len <= cache->len - addr);
1804 address_space_read(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len);
1805 }
1806
1807 /**
1808 * address_space_write_cached: write to a cached RAM region
1809 *
1810 * @cache: Cached region to be addressed
1811 * @addr: address relative to the base of the RAM region
1812 * @buf: buffer with the data transferred
1813 * @len: length of the data transferred
1814 */
1815 static inline void
1816 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
1817 void *buf, int len)
1818 {
1819 assert(addr < cache->len && len <= cache->len - addr);
1820 address_space_write(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len);
1821 }
1822
1823 #endif
1824
1825 #endif