]> git.proxmox.com Git - mirror_qemu.git/blob - include/exec/memory.h
Merge remote-tracking branch 'shorne/tags/pull-or-20170504' into staging
[mirror_qemu.git] / include / exec / memory.h
1 /*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef MEMORY_H
15 #define MEMORY_H
16
17 #ifndef CONFIG_USER_ONLY
18
19 #include "exec/cpu-common.h"
20 #ifndef CONFIG_USER_ONLY
21 #include "exec/hwaddr.h"
22 #endif
23 #include "exec/memattrs.h"
24 #include "exec/ramlist.h"
25 #include "qemu/queue.h"
26 #include "qemu/int128.h"
27 #include "qemu/notify.h"
28 #include "qom/object.h"
29 #include "qemu/rcu.h"
30
31 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
32
33 #define MAX_PHYS_ADDR_SPACE_BITS 62
34 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
35
36 #define TYPE_MEMORY_REGION "qemu:memory-region"
37 #define MEMORY_REGION(obj) \
38 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
39
40 typedef struct MemoryRegionOps MemoryRegionOps;
41 typedef struct MemoryRegionMmio MemoryRegionMmio;
42
43 struct MemoryRegionMmio {
44 CPUReadMemoryFunc *read[3];
45 CPUWriteMemoryFunc *write[3];
46 };
47
48 typedef struct IOMMUTLBEntry IOMMUTLBEntry;
49
50 /* See address_space_translate: bit 0 is read, bit 1 is write. */
51 typedef enum {
52 IOMMU_NONE = 0,
53 IOMMU_RO = 1,
54 IOMMU_WO = 2,
55 IOMMU_RW = 3,
56 } IOMMUAccessFlags;
57
58 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
59
60 struct IOMMUTLBEntry {
61 AddressSpace *target_as;
62 hwaddr iova;
63 hwaddr translated_addr;
64 hwaddr addr_mask; /* 0xfff = 4k translation */
65 IOMMUAccessFlags perm;
66 };
67
68 /*
69 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
70 * register with one or multiple IOMMU Notifier capability bit(s).
71 */
72 typedef enum {
73 IOMMU_NOTIFIER_NONE = 0,
74 /* Notify cache invalidations */
75 IOMMU_NOTIFIER_UNMAP = 0x1,
76 /* Notify entry changes (newly created entries) */
77 IOMMU_NOTIFIER_MAP = 0x2,
78 } IOMMUNotifierFlag;
79
80 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
81
82 struct IOMMUNotifier;
83 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
84 IOMMUTLBEntry *data);
85
86 struct IOMMUNotifier {
87 IOMMUNotify notify;
88 IOMMUNotifierFlag notifier_flags;
89 /* Notify for address space range start <= addr <= end */
90 hwaddr start;
91 hwaddr end;
92 QLIST_ENTRY(IOMMUNotifier) node;
93 };
94 typedef struct IOMMUNotifier IOMMUNotifier;
95
96 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
97 IOMMUNotifierFlag flags,
98 hwaddr start, hwaddr end)
99 {
100 n->notify = fn;
101 n->notifier_flags = flags;
102 n->start = start;
103 n->end = end;
104 }
105
106 /* New-style MMIO accessors can indicate that the transaction failed.
107 * A zero (MEMTX_OK) response means success; anything else is a failure
108 * of some kind. The memory subsystem will bitwise-OR together results
109 * if it is synthesizing an operation from multiple smaller accesses.
110 */
111 #define MEMTX_OK 0
112 #define MEMTX_ERROR (1U << 0) /* device returned an error */
113 #define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */
114 typedef uint32_t MemTxResult;
115
116 /*
117 * Memory region callbacks
118 */
119 struct MemoryRegionOps {
120 /* Read from the memory region. @addr is relative to @mr; @size is
121 * in bytes. */
122 uint64_t (*read)(void *opaque,
123 hwaddr addr,
124 unsigned size);
125 /* Write to the memory region. @addr is relative to @mr; @size is
126 * in bytes. */
127 void (*write)(void *opaque,
128 hwaddr addr,
129 uint64_t data,
130 unsigned size);
131
132 MemTxResult (*read_with_attrs)(void *opaque,
133 hwaddr addr,
134 uint64_t *data,
135 unsigned size,
136 MemTxAttrs attrs);
137 MemTxResult (*write_with_attrs)(void *opaque,
138 hwaddr addr,
139 uint64_t data,
140 unsigned size,
141 MemTxAttrs attrs);
142
143 enum device_endian endianness;
144 /* Guest-visible constraints: */
145 struct {
146 /* If nonzero, specify bounds on access sizes beyond which a machine
147 * check is thrown.
148 */
149 unsigned min_access_size;
150 unsigned max_access_size;
151 /* If true, unaligned accesses are supported. Otherwise unaligned
152 * accesses throw machine checks.
153 */
154 bool unaligned;
155 /*
156 * If present, and returns #false, the transaction is not accepted
157 * by the device (and results in machine dependent behaviour such
158 * as a machine check exception).
159 */
160 bool (*accepts)(void *opaque, hwaddr addr,
161 unsigned size, bool is_write);
162 } valid;
163 /* Internal implementation constraints: */
164 struct {
165 /* If nonzero, specifies the minimum size implemented. Smaller sizes
166 * will be rounded upwards and a partial result will be returned.
167 */
168 unsigned min_access_size;
169 /* If nonzero, specifies the maximum size implemented. Larger sizes
170 * will be done as a series of accesses with smaller sizes.
171 */
172 unsigned max_access_size;
173 /* If true, unaligned accesses are supported. Otherwise all accesses
174 * are converted to (possibly multiple) naturally aligned accesses.
175 */
176 bool unaligned;
177 } impl;
178
179 /* If .read and .write are not present, old_mmio may be used for
180 * backwards compatibility with old mmio registration
181 */
182 const MemoryRegionMmio old_mmio;
183 };
184
185 typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps;
186
187 struct MemoryRegionIOMMUOps {
188 /* Return a TLB entry that contains a given address. */
189 IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr, bool is_write);
190 /* Returns minimum supported page size */
191 uint64_t (*get_min_page_size)(MemoryRegion *iommu);
192 /* Called when IOMMU Notifier flag changed */
193 void (*notify_flag_changed)(MemoryRegion *iommu,
194 IOMMUNotifierFlag old_flags,
195 IOMMUNotifierFlag new_flags);
196 /* Set this up to provide customized IOMMU replay function */
197 void (*replay)(MemoryRegion *iommu, IOMMUNotifier *notifier);
198 };
199
200 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
201 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
202
203 struct MemoryRegion {
204 Object parent_obj;
205
206 /* All fields are private - violators will be prosecuted */
207
208 /* The following fields should fit in a cache line */
209 bool romd_mode;
210 bool ram;
211 bool subpage;
212 bool readonly; /* For RAM regions */
213 bool rom_device;
214 bool flush_coalesced_mmio;
215 bool global_locking;
216 uint8_t dirty_log_mask;
217 RAMBlock *ram_block;
218 Object *owner;
219 const MemoryRegionIOMMUOps *iommu_ops;
220
221 const MemoryRegionOps *ops;
222 void *opaque;
223 MemoryRegion *container;
224 Int128 size;
225 hwaddr addr;
226 void (*destructor)(MemoryRegion *mr);
227 uint64_t align;
228 bool terminates;
229 bool ram_device;
230 bool enabled;
231 bool warning_printed; /* For reservations */
232 uint8_t vga_logging_count;
233 MemoryRegion *alias;
234 hwaddr alias_offset;
235 int32_t priority;
236 QTAILQ_HEAD(subregions, MemoryRegion) subregions;
237 QTAILQ_ENTRY(MemoryRegion) subregions_link;
238 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
239 const char *name;
240 unsigned ioeventfd_nb;
241 MemoryRegionIoeventfd *ioeventfds;
242 QLIST_HEAD(, IOMMUNotifier) iommu_notify;
243 IOMMUNotifierFlag iommu_notify_flags;
244 };
245
246 #define IOMMU_NOTIFIER_FOREACH(n, mr) \
247 QLIST_FOREACH((n), &(mr)->iommu_notify, node)
248
249 /**
250 * MemoryListener: callbacks structure for updates to the physical memory map
251 *
252 * Allows a component to adjust to changes in the guest-visible memory map.
253 * Use with memory_listener_register() and memory_listener_unregister().
254 */
255 struct MemoryListener {
256 void (*begin)(MemoryListener *listener);
257 void (*commit)(MemoryListener *listener);
258 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
259 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
260 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
261 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
262 int old, int new);
263 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
264 int old, int new);
265 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
266 void (*log_global_start)(MemoryListener *listener);
267 void (*log_global_stop)(MemoryListener *listener);
268 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
269 bool match_data, uint64_t data, EventNotifier *e);
270 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
271 bool match_data, uint64_t data, EventNotifier *e);
272 void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
273 hwaddr addr, hwaddr len);
274 void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
275 hwaddr addr, hwaddr len);
276 /* Lower = earlier (during add), later (during del) */
277 unsigned priority;
278 AddressSpace *address_space;
279 QTAILQ_ENTRY(MemoryListener) link;
280 QTAILQ_ENTRY(MemoryListener) link_as;
281 };
282
283 /**
284 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
285 */
286 struct AddressSpace {
287 /* All fields are private. */
288 struct rcu_head rcu;
289 char *name;
290 MemoryRegion *root;
291 int ref_count;
292 bool malloced;
293
294 /* Accessed via RCU. */
295 struct FlatView *current_map;
296
297 int ioeventfd_nb;
298 struct MemoryRegionIoeventfd *ioeventfds;
299 struct AddressSpaceDispatch *dispatch;
300 struct AddressSpaceDispatch *next_dispatch;
301 MemoryListener dispatch_listener;
302 QTAILQ_HEAD(memory_listeners_as, MemoryListener) listeners;
303 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
304 };
305
306 /**
307 * MemoryRegionSection: describes a fragment of a #MemoryRegion
308 *
309 * @mr: the region, or %NULL if empty
310 * @address_space: the address space the region is mapped in
311 * @offset_within_region: the beginning of the section, relative to @mr's start
312 * @size: the size of the section; will not exceed @mr's boundaries
313 * @offset_within_address_space: the address of the first byte of the section
314 * relative to the region's address space
315 * @readonly: writes to this section are ignored
316 */
317 struct MemoryRegionSection {
318 MemoryRegion *mr;
319 AddressSpace *address_space;
320 hwaddr offset_within_region;
321 Int128 size;
322 hwaddr offset_within_address_space;
323 bool readonly;
324 };
325
326 /**
327 * memory_region_init: Initialize a memory region
328 *
329 * The region typically acts as a container for other memory regions. Use
330 * memory_region_add_subregion() to add subregions.
331 *
332 * @mr: the #MemoryRegion to be initialized
333 * @owner: the object that tracks the region's reference count
334 * @name: used for debugging; not visible to the user or ABI
335 * @size: size of the region; any subregions beyond this size will be clipped
336 */
337 void memory_region_init(MemoryRegion *mr,
338 struct Object *owner,
339 const char *name,
340 uint64_t size);
341
342 /**
343 * memory_region_ref: Add 1 to a memory region's reference count
344 *
345 * Whenever memory regions are accessed outside the BQL, they need to be
346 * preserved against hot-unplug. MemoryRegions actually do not have their
347 * own reference count; they piggyback on a QOM object, their "owner".
348 * This function adds a reference to the owner.
349 *
350 * All MemoryRegions must have an owner if they can disappear, even if the
351 * device they belong to operates exclusively under the BQL. This is because
352 * the region could be returned at any time by memory_region_find, and this
353 * is usually under guest control.
354 *
355 * @mr: the #MemoryRegion
356 */
357 void memory_region_ref(MemoryRegion *mr);
358
359 /**
360 * memory_region_unref: Remove 1 to a memory region's reference count
361 *
362 * Whenever memory regions are accessed outside the BQL, they need to be
363 * preserved against hot-unplug. MemoryRegions actually do not have their
364 * own reference count; they piggyback on a QOM object, their "owner".
365 * This function removes a reference to the owner and possibly destroys it.
366 *
367 * @mr: the #MemoryRegion
368 */
369 void memory_region_unref(MemoryRegion *mr);
370
371 /**
372 * memory_region_init_io: Initialize an I/O memory region.
373 *
374 * Accesses into the region will cause the callbacks in @ops to be called.
375 * if @size is nonzero, subregions will be clipped to @size.
376 *
377 * @mr: the #MemoryRegion to be initialized.
378 * @owner: the object that tracks the region's reference count
379 * @ops: a structure containing read and write callbacks to be used when
380 * I/O is performed on the region.
381 * @opaque: passed to the read and write callbacks of the @ops structure.
382 * @name: used for debugging; not visible to the user or ABI
383 * @size: size of the region.
384 */
385 void memory_region_init_io(MemoryRegion *mr,
386 struct Object *owner,
387 const MemoryRegionOps *ops,
388 void *opaque,
389 const char *name,
390 uint64_t size);
391
392 /**
393 * memory_region_init_ram: Initialize RAM memory region. Accesses into the
394 * region will modify memory directly.
395 *
396 * @mr: the #MemoryRegion to be initialized.
397 * @owner: the object that tracks the region's reference count
398 * @name: Region name, becomes part of RAMBlock name used in migration stream
399 * must be unique within any device
400 * @size: size of the region.
401 * @errp: pointer to Error*, to store an error if it happens.
402 */
403 void memory_region_init_ram(MemoryRegion *mr,
404 struct Object *owner,
405 const char *name,
406 uint64_t size,
407 Error **errp);
408
409 /**
410 * memory_region_init_resizeable_ram: Initialize memory region with resizeable
411 * RAM. Accesses into the region will
412 * modify memory directly. Only an initial
413 * portion of this RAM is actually used.
414 * The used size can change across reboots.
415 *
416 * @mr: the #MemoryRegion to be initialized.
417 * @owner: the object that tracks the region's reference count
418 * @name: Region name, becomes part of RAMBlock name used in migration stream
419 * must be unique within any device
420 * @size: used size of the region.
421 * @max_size: max size of the region.
422 * @resized: callback to notify owner about used size change.
423 * @errp: pointer to Error*, to store an error if it happens.
424 */
425 void memory_region_init_resizeable_ram(MemoryRegion *mr,
426 struct Object *owner,
427 const char *name,
428 uint64_t size,
429 uint64_t max_size,
430 void (*resized)(const char*,
431 uint64_t length,
432 void *host),
433 Error **errp);
434 #ifdef __linux__
435 /**
436 * memory_region_init_ram_from_file: Initialize RAM memory region with a
437 * mmap-ed backend.
438 *
439 * @mr: the #MemoryRegion to be initialized.
440 * @owner: the object that tracks the region's reference count
441 * @name: Region name, becomes part of RAMBlock name used in migration stream
442 * must be unique within any device
443 * @size: size of the region.
444 * @share: %true if memory must be mmaped with the MAP_SHARED flag
445 * @path: the path in which to allocate the RAM.
446 * @errp: pointer to Error*, to store an error if it happens.
447 */
448 void memory_region_init_ram_from_file(MemoryRegion *mr,
449 struct Object *owner,
450 const char *name,
451 uint64_t size,
452 bool share,
453 const char *path,
454 Error **errp);
455 #endif
456
457 /**
458 * memory_region_init_ram_ptr: Initialize RAM memory region from a
459 * user-provided pointer. Accesses into the
460 * region will modify memory directly.
461 *
462 * @mr: the #MemoryRegion to be initialized.
463 * @owner: the object that tracks the region's reference count
464 * @name: Region name, becomes part of RAMBlock name used in migration stream
465 * must be unique within any device
466 * @size: size of the region.
467 * @ptr: memory to be mapped; must contain at least @size bytes.
468 */
469 void memory_region_init_ram_ptr(MemoryRegion *mr,
470 struct Object *owner,
471 const char *name,
472 uint64_t size,
473 void *ptr);
474
475 /**
476 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
477 * a user-provided pointer.
478 *
479 * A RAM device represents a mapping to a physical device, such as to a PCI
480 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
481 * into the VM address space and access to the region will modify memory
482 * directly. However, the memory region should not be included in a memory
483 * dump (device may not be enabled/mapped at the time of the dump), and
484 * operations incompatible with manipulating MMIO should be avoided. Replaces
485 * skip_dump flag.
486 *
487 * @mr: the #MemoryRegion to be initialized.
488 * @owner: the object that tracks the region's reference count
489 * @name: the name of the region.
490 * @size: size of the region.
491 * @ptr: memory to be mapped; must contain at least @size bytes.
492 */
493 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
494 struct Object *owner,
495 const char *name,
496 uint64_t size,
497 void *ptr);
498
499 /**
500 * memory_region_init_alias: Initialize a memory region that aliases all or a
501 * part of another memory region.
502 *
503 * @mr: the #MemoryRegion to be initialized.
504 * @owner: the object that tracks the region's reference count
505 * @name: used for debugging; not visible to the user or ABI
506 * @orig: the region to be referenced; @mr will be equivalent to
507 * @orig between @offset and @offset + @size - 1.
508 * @offset: start of the section in @orig to be referenced.
509 * @size: size of the region.
510 */
511 void memory_region_init_alias(MemoryRegion *mr,
512 struct Object *owner,
513 const char *name,
514 MemoryRegion *orig,
515 hwaddr offset,
516 uint64_t size);
517
518 /**
519 * memory_region_init_rom: Initialize a ROM memory region.
520 *
521 * This has the same effect as calling memory_region_init_ram()
522 * and then marking the resulting region read-only with
523 * memory_region_set_readonly().
524 *
525 * @mr: the #MemoryRegion to be initialized.
526 * @owner: the object that tracks the region's reference count
527 * @name: Region name, becomes part of RAMBlock name used in migration stream
528 * must be unique within any device
529 * @size: size of the region.
530 * @errp: pointer to Error*, to store an error if it happens.
531 */
532 void memory_region_init_rom(MemoryRegion *mr,
533 struct Object *owner,
534 const char *name,
535 uint64_t size,
536 Error **errp);
537
538 /**
539 * memory_region_init_rom_device: Initialize a ROM memory region. Writes are
540 * handled via callbacks.
541 *
542 * @mr: the #MemoryRegion to be initialized.
543 * @owner: the object that tracks the region's reference count
544 * @ops: callbacks for write access handling (must not be NULL).
545 * @name: Region name, becomes part of RAMBlock name used in migration stream
546 * must be unique within any device
547 * @size: size of the region.
548 * @errp: pointer to Error*, to store an error if it happens.
549 */
550 void memory_region_init_rom_device(MemoryRegion *mr,
551 struct Object *owner,
552 const MemoryRegionOps *ops,
553 void *opaque,
554 const char *name,
555 uint64_t size,
556 Error **errp);
557
558 /**
559 * memory_region_init_reservation: Initialize a memory region that reserves
560 * I/O space.
561 *
562 * A reservation region primariy serves debugging purposes. It claims I/O
563 * space that is not supposed to be handled by QEMU itself. Any access via
564 * the memory API will cause an abort().
565 * This function is deprecated. Use memory_region_init_io() with NULL
566 * callbacks instead.
567 *
568 * @mr: the #MemoryRegion to be initialized
569 * @owner: the object that tracks the region's reference count
570 * @name: used for debugging; not visible to the user or ABI
571 * @size: size of the region.
572 */
573 static inline void memory_region_init_reservation(MemoryRegion *mr,
574 Object *owner,
575 const char *name,
576 uint64_t size)
577 {
578 memory_region_init_io(mr, owner, NULL, mr, name, size);
579 }
580
581 /**
582 * memory_region_init_iommu: Initialize a memory region that translates
583 * addresses
584 *
585 * An IOMMU region translates addresses and forwards accesses to a target
586 * memory region.
587 *
588 * @mr: the #MemoryRegion to be initialized
589 * @owner: the object that tracks the region's reference count
590 * @ops: a function that translates addresses into the @target region
591 * @name: used for debugging; not visible to the user or ABI
592 * @size: size of the region.
593 */
594 void memory_region_init_iommu(MemoryRegion *mr,
595 struct Object *owner,
596 const MemoryRegionIOMMUOps *ops,
597 const char *name,
598 uint64_t size);
599
600 /**
601 * memory_region_owner: get a memory region's owner.
602 *
603 * @mr: the memory region being queried.
604 */
605 struct Object *memory_region_owner(MemoryRegion *mr);
606
607 /**
608 * memory_region_size: get a memory region's size.
609 *
610 * @mr: the memory region being queried.
611 */
612 uint64_t memory_region_size(MemoryRegion *mr);
613
614 /**
615 * memory_region_is_ram: check whether a memory region is random access
616 *
617 * Returns %true is a memory region is random access.
618 *
619 * @mr: the memory region being queried
620 */
621 static inline bool memory_region_is_ram(MemoryRegion *mr)
622 {
623 return mr->ram;
624 }
625
626 /**
627 * memory_region_is_ram_device: check whether a memory region is a ram device
628 *
629 * Returns %true is a memory region is a device backed ram region
630 *
631 * @mr: the memory region being queried
632 */
633 bool memory_region_is_ram_device(MemoryRegion *mr);
634
635 /**
636 * memory_region_is_romd: check whether a memory region is in ROMD mode
637 *
638 * Returns %true if a memory region is a ROM device and currently set to allow
639 * direct reads.
640 *
641 * @mr: the memory region being queried
642 */
643 static inline bool memory_region_is_romd(MemoryRegion *mr)
644 {
645 return mr->rom_device && mr->romd_mode;
646 }
647
648 /**
649 * memory_region_is_iommu: check whether a memory region is an iommu
650 *
651 * Returns %true is a memory region is an iommu.
652 *
653 * @mr: the memory region being queried
654 */
655 static inline bool memory_region_is_iommu(MemoryRegion *mr)
656 {
657 if (mr->alias) {
658 return memory_region_is_iommu(mr->alias);
659 }
660 return mr->iommu_ops;
661 }
662
663
664 /**
665 * memory_region_iommu_get_min_page_size: get minimum supported page size
666 * for an iommu
667 *
668 * Returns minimum supported page size for an iommu.
669 *
670 * @mr: the memory region being queried
671 */
672 uint64_t memory_region_iommu_get_min_page_size(MemoryRegion *mr);
673
674 /**
675 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
676 *
677 * The notification type will be decided by entry.perm bits:
678 *
679 * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE.
680 * - For MAP (newly added entry) notifies: set entry.perm to the
681 * permission of the page (which is definitely !IOMMU_NONE).
682 *
683 * Note: for any IOMMU implementation, an in-place mapping change
684 * should be notified with an UNMAP followed by a MAP.
685 *
686 * @mr: the memory region that was changed
687 * @entry: the new entry in the IOMMU translation table. The entry
688 * replaces all old entries for the same virtual I/O address range.
689 * Deleted entries have .@perm == 0.
690 */
691 void memory_region_notify_iommu(MemoryRegion *mr,
692 IOMMUTLBEntry entry);
693
694 /**
695 * memory_region_notify_one: notify a change in an IOMMU translation
696 * entry to a single notifier
697 *
698 * This works just like memory_region_notify_iommu(), but it only
699 * notifies a specific notifier, not all of them.
700 *
701 * @notifier: the notifier to be notified
702 * @entry: the new entry in the IOMMU translation table. The entry
703 * replaces all old entries for the same virtual I/O address range.
704 * Deleted entries have .@perm == 0.
705 */
706 void memory_region_notify_one(IOMMUNotifier *notifier,
707 IOMMUTLBEntry *entry);
708
709 /**
710 * memory_region_register_iommu_notifier: register a notifier for changes to
711 * IOMMU translation entries.
712 *
713 * @mr: the memory region to observe
714 * @n: the IOMMUNotifier to be added; the notify callback receives a
715 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
716 * ceases to be valid on exit from the notifier.
717 */
718 void memory_region_register_iommu_notifier(MemoryRegion *mr,
719 IOMMUNotifier *n);
720
721 /**
722 * memory_region_iommu_replay: replay existing IOMMU translations to
723 * a notifier with the minimum page granularity returned by
724 * mr->iommu_ops->get_page_size().
725 *
726 * @mr: the memory region to observe
727 * @n: the notifier to which to replay iommu mappings
728 * @is_write: Whether to treat the replay as a translate "write"
729 * through the iommu
730 */
731 void memory_region_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n,
732 bool is_write);
733
734 /**
735 * memory_region_iommu_replay_all: replay existing IOMMU translations
736 * to all the notifiers registered.
737 *
738 * @mr: the memory region to observe
739 */
740 void memory_region_iommu_replay_all(MemoryRegion *mr);
741
742 /**
743 * memory_region_unregister_iommu_notifier: unregister a notifier for
744 * changes to IOMMU translation entries.
745 *
746 * @mr: the memory region which was observed and for which notity_stopped()
747 * needs to be called
748 * @n: the notifier to be removed.
749 */
750 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
751 IOMMUNotifier *n);
752
753 /**
754 * memory_region_name: get a memory region's name
755 *
756 * Returns the string that was used to initialize the memory region.
757 *
758 * @mr: the memory region being queried
759 */
760 const char *memory_region_name(const MemoryRegion *mr);
761
762 /**
763 * memory_region_is_logging: return whether a memory region is logging writes
764 *
765 * Returns %true if the memory region is logging writes for the given client
766 *
767 * @mr: the memory region being queried
768 * @client: the client being queried
769 */
770 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
771
772 /**
773 * memory_region_get_dirty_log_mask: return the clients for which a
774 * memory region is logging writes.
775 *
776 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
777 * are the bit indices.
778 *
779 * @mr: the memory region being queried
780 */
781 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
782
783 /**
784 * memory_region_is_rom: check whether a memory region is ROM
785 *
786 * Returns %true is a memory region is read-only memory.
787 *
788 * @mr: the memory region being queried
789 */
790 static inline bool memory_region_is_rom(MemoryRegion *mr)
791 {
792 return mr->ram && mr->readonly;
793 }
794
795
796 /**
797 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
798 *
799 * Returns a file descriptor backing a file-based RAM memory region,
800 * or -1 if the region is not a file-based RAM memory region.
801 *
802 * @mr: the RAM or alias memory region being queried.
803 */
804 int memory_region_get_fd(MemoryRegion *mr);
805
806 /**
807 * memory_region_set_fd: Mark a RAM memory region as backed by a
808 * file descriptor.
809 *
810 * This function is typically used after memory_region_init_ram_ptr().
811 *
812 * @mr: the memory region being queried.
813 * @fd: the file descriptor that backs @mr.
814 */
815 void memory_region_set_fd(MemoryRegion *mr, int fd);
816
817 /**
818 * memory_region_from_host: Convert a pointer into a RAM memory region
819 * and an offset within it.
820 *
821 * Given a host pointer inside a RAM memory region (created with
822 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
823 * the MemoryRegion and the offset within it.
824 *
825 * Use with care; by the time this function returns, the returned pointer is
826 * not protected by RCU anymore. If the caller is not within an RCU critical
827 * section and does not hold the iothread lock, it must have other means of
828 * protecting the pointer, such as a reference to the region that includes
829 * the incoming ram_addr_t.
830 *
831 * @mr: the memory region being queried.
832 */
833 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
834
835 /**
836 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
837 *
838 * Returns a host pointer to a RAM memory region (created with
839 * memory_region_init_ram() or memory_region_init_ram_ptr()).
840 *
841 * Use with care; by the time this function returns, the returned pointer is
842 * not protected by RCU anymore. If the caller is not within an RCU critical
843 * section and does not hold the iothread lock, it must have other means of
844 * protecting the pointer, such as a reference to the region that includes
845 * the incoming ram_addr_t.
846 *
847 * @mr: the memory region being queried.
848 */
849 void *memory_region_get_ram_ptr(MemoryRegion *mr);
850
851 /* memory_region_ram_resize: Resize a RAM region.
852 *
853 * Only legal before guest might have detected the memory size: e.g. on
854 * incoming migration, or right after reset.
855 *
856 * @mr: a memory region created with @memory_region_init_resizeable_ram.
857 * @newsize: the new size the region
858 * @errp: pointer to Error*, to store an error if it happens.
859 */
860 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
861 Error **errp);
862
863 /**
864 * memory_region_set_log: Turn dirty logging on or off for a region.
865 *
866 * Turns dirty logging on or off for a specified client (display, migration).
867 * Only meaningful for RAM regions.
868 *
869 * @mr: the memory region being updated.
870 * @log: whether dirty logging is to be enabled or disabled.
871 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
872 */
873 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
874
875 /**
876 * memory_region_get_dirty: Check whether a range of bytes is dirty
877 * for a specified client.
878 *
879 * Checks whether a range of bytes has been written to since the last
880 * call to memory_region_reset_dirty() with the same @client. Dirty logging
881 * must be enabled.
882 *
883 * @mr: the memory region being queried.
884 * @addr: the address (relative to the start of the region) being queried.
885 * @size: the size of the range being queried.
886 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
887 * %DIRTY_MEMORY_VGA.
888 */
889 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
890 hwaddr size, unsigned client);
891
892 /**
893 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
894 *
895 * Marks a range of bytes as dirty, after it has been dirtied outside
896 * guest code.
897 *
898 * @mr: the memory region being dirtied.
899 * @addr: the address (relative to the start of the region) being dirtied.
900 * @size: size of the range being dirtied.
901 */
902 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
903 hwaddr size);
904
905 /**
906 * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty
907 * for a specified client. It clears them.
908 *
909 * Checks whether a range of bytes has been written to since the last
910 * call to memory_region_reset_dirty() with the same @client. Dirty logging
911 * must be enabled.
912 *
913 * @mr: the memory region being queried.
914 * @addr: the address (relative to the start of the region) being queried.
915 * @size: the size of the range being queried.
916 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
917 * %DIRTY_MEMORY_VGA.
918 */
919 bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
920 hwaddr size, unsigned client);
921
922 /**
923 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
924 * bitmap and clear it.
925 *
926 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
927 * returns the snapshot. The snapshot can then be used to query dirty
928 * status, using memory_region_snapshot_get_dirty. Unlike
929 * memory_region_test_and_clear_dirty this allows to query the same
930 * page multiple times, which is especially useful for display updates
931 * where the scanlines often are not page aligned.
932 *
933 * The dirty bitmap region which gets copyed into the snapshot (and
934 * cleared afterwards) can be larger than requested. The boundaries
935 * are rounded up/down so complete bitmap longs (covering 64 pages on
936 * 64bit hosts) can be copied over into the bitmap snapshot. Which
937 * isn't a problem for display updates as the extra pages are outside
938 * the visible area, and in case the visible area changes a full
939 * display redraw is due anyway. Should other use cases for this
940 * function emerge we might have to revisit this implementation
941 * detail.
942 *
943 * Use g_free to release DirtyBitmapSnapshot.
944 *
945 * @mr: the memory region being queried.
946 * @addr: the address (relative to the start of the region) being queried.
947 * @size: the size of the range being queried.
948 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
949 */
950 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
951 hwaddr addr,
952 hwaddr size,
953 unsigned client);
954
955 /**
956 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
957 * in the specified dirty bitmap snapshot.
958 *
959 * @mr: the memory region being queried.
960 * @snap: the dirty bitmap snapshot
961 * @addr: the address (relative to the start of the region) being queried.
962 * @size: the size of the range being queried.
963 */
964 bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
965 DirtyBitmapSnapshot *snap,
966 hwaddr addr, hwaddr size);
967
968 /**
969 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
970 * any external TLBs (e.g. kvm)
971 *
972 * Flushes dirty information from accelerators such as kvm and vhost-net
973 * and makes it available to users of the memory API.
974 *
975 * @mr: the region being flushed.
976 */
977 void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
978
979 /**
980 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
981 * client.
982 *
983 * Marks a range of pages as no longer dirty.
984 *
985 * @mr: the region being updated.
986 * @addr: the start of the subrange being cleaned.
987 * @size: the size of the subrange being cleaned.
988 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
989 * %DIRTY_MEMORY_VGA.
990 */
991 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
992 hwaddr size, unsigned client);
993
994 /**
995 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
996 *
997 * Allows a memory region to be marked as read-only (turning it into a ROM).
998 * only useful on RAM regions.
999 *
1000 * @mr: the region being updated.
1001 * @readonly: whether rhe region is to be ROM or RAM.
1002 */
1003 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
1004
1005 /**
1006 * memory_region_rom_device_set_romd: enable/disable ROMD mode
1007 *
1008 * Allows a ROM device (initialized with memory_region_init_rom_device() to
1009 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
1010 * device is mapped to guest memory and satisfies read access directly.
1011 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
1012 * Writes are always handled by the #MemoryRegion.write function.
1013 *
1014 * @mr: the memory region to be updated
1015 * @romd_mode: %true to put the region into ROMD mode
1016 */
1017 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
1018
1019 /**
1020 * memory_region_set_coalescing: Enable memory coalescing for the region.
1021 *
1022 * Enabled writes to a region to be queued for later processing. MMIO ->write
1023 * callbacks may be delayed until a non-coalesced MMIO is issued.
1024 * Only useful for IO regions. Roughly similar to write-combining hardware.
1025 *
1026 * @mr: the memory region to be write coalesced
1027 */
1028 void memory_region_set_coalescing(MemoryRegion *mr);
1029
1030 /**
1031 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
1032 * a region.
1033 *
1034 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
1035 * Multiple calls can be issued coalesced disjoint ranges.
1036 *
1037 * @mr: the memory region to be updated.
1038 * @offset: the start of the range within the region to be coalesced.
1039 * @size: the size of the subrange to be coalesced.
1040 */
1041 void memory_region_add_coalescing(MemoryRegion *mr,
1042 hwaddr offset,
1043 uint64_t size);
1044
1045 /**
1046 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
1047 *
1048 * Disables any coalescing caused by memory_region_set_coalescing() or
1049 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
1050 * hardware.
1051 *
1052 * @mr: the memory region to be updated.
1053 */
1054 void memory_region_clear_coalescing(MemoryRegion *mr);
1055
1056 /**
1057 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
1058 * accesses.
1059 *
1060 * Ensure that pending coalesced MMIO request are flushed before the memory
1061 * region is accessed. This property is automatically enabled for all regions
1062 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
1063 *
1064 * @mr: the memory region to be updated.
1065 */
1066 void memory_region_set_flush_coalesced(MemoryRegion *mr);
1067
1068 /**
1069 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
1070 * accesses.
1071 *
1072 * Clear the automatic coalesced MMIO flushing enabled via
1073 * memory_region_set_flush_coalesced. Note that this service has no effect on
1074 * memory regions that have MMIO coalescing enabled for themselves. For them,
1075 * automatic flushing will stop once coalescing is disabled.
1076 *
1077 * @mr: the memory region to be updated.
1078 */
1079 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
1080
1081 /**
1082 * memory_region_set_global_locking: Declares the access processing requires
1083 * QEMU's global lock.
1084 *
1085 * When this is invoked, accesses to the memory region will be processed while
1086 * holding the global lock of QEMU. This is the default behavior of memory
1087 * regions.
1088 *
1089 * @mr: the memory region to be updated.
1090 */
1091 void memory_region_set_global_locking(MemoryRegion *mr);
1092
1093 /**
1094 * memory_region_clear_global_locking: Declares that access processing does
1095 * not depend on the QEMU global lock.
1096 *
1097 * By clearing this property, accesses to the memory region will be processed
1098 * outside of QEMU's global lock (unless the lock is held on when issuing the
1099 * access request). In this case, the device model implementing the access
1100 * handlers is responsible for synchronization of concurrency.
1101 *
1102 * @mr: the memory region to be updated.
1103 */
1104 void memory_region_clear_global_locking(MemoryRegion *mr);
1105
1106 /**
1107 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
1108 * is written to a location.
1109 *
1110 * Marks a word in an IO region (initialized with memory_region_init_io())
1111 * as a trigger for an eventfd event. The I/O callback will not be called.
1112 * The caller must be prepared to handle failure (that is, take the required
1113 * action if the callback _is_ called).
1114 *
1115 * @mr: the memory region being updated.
1116 * @addr: the address within @mr that is to be monitored
1117 * @size: the size of the access to trigger the eventfd
1118 * @match_data: whether to match against @data, instead of just @addr
1119 * @data: the data to match against the guest write
1120 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
1121 **/
1122 void memory_region_add_eventfd(MemoryRegion *mr,
1123 hwaddr addr,
1124 unsigned size,
1125 bool match_data,
1126 uint64_t data,
1127 EventNotifier *e);
1128
1129 /**
1130 * memory_region_del_eventfd: Cancel an eventfd.
1131 *
1132 * Cancels an eventfd trigger requested by a previous
1133 * memory_region_add_eventfd() call.
1134 *
1135 * @mr: the memory region being updated.
1136 * @addr: the address within @mr that is to be monitored
1137 * @size: the size of the access to trigger the eventfd
1138 * @match_data: whether to match against @data, instead of just @addr
1139 * @data: the data to match against the guest write
1140 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
1141 */
1142 void memory_region_del_eventfd(MemoryRegion *mr,
1143 hwaddr addr,
1144 unsigned size,
1145 bool match_data,
1146 uint64_t data,
1147 EventNotifier *e);
1148
1149 /**
1150 * memory_region_add_subregion: Add a subregion to a container.
1151 *
1152 * Adds a subregion at @offset. The subregion may not overlap with other
1153 * subregions (except for those explicitly marked as overlapping). A region
1154 * may only be added once as a subregion (unless removed with
1155 * memory_region_del_subregion()); use memory_region_init_alias() if you
1156 * want a region to be a subregion in multiple locations.
1157 *
1158 * @mr: the region to contain the new subregion; must be a container
1159 * initialized with memory_region_init().
1160 * @offset: the offset relative to @mr where @subregion is added.
1161 * @subregion: the subregion to be added.
1162 */
1163 void memory_region_add_subregion(MemoryRegion *mr,
1164 hwaddr offset,
1165 MemoryRegion *subregion);
1166 /**
1167 * memory_region_add_subregion_overlap: Add a subregion to a container
1168 * with overlap.
1169 *
1170 * Adds a subregion at @offset. The subregion may overlap with other
1171 * subregions. Conflicts are resolved by having a higher @priority hide a
1172 * lower @priority. Subregions without priority are taken as @priority 0.
1173 * A region may only be added once as a subregion (unless removed with
1174 * memory_region_del_subregion()); use memory_region_init_alias() if you
1175 * want a region to be a subregion in multiple locations.
1176 *
1177 * @mr: the region to contain the new subregion; must be a container
1178 * initialized with memory_region_init().
1179 * @offset: the offset relative to @mr where @subregion is added.
1180 * @subregion: the subregion to be added.
1181 * @priority: used for resolving overlaps; highest priority wins.
1182 */
1183 void memory_region_add_subregion_overlap(MemoryRegion *mr,
1184 hwaddr offset,
1185 MemoryRegion *subregion,
1186 int priority);
1187
1188 /**
1189 * memory_region_get_ram_addr: Get the ram address associated with a memory
1190 * region
1191 */
1192 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
1193
1194 uint64_t memory_region_get_alignment(const MemoryRegion *mr);
1195 /**
1196 * memory_region_del_subregion: Remove a subregion.
1197 *
1198 * Removes a subregion from its container.
1199 *
1200 * @mr: the container to be updated.
1201 * @subregion: the region being removed; must be a current subregion of @mr.
1202 */
1203 void memory_region_del_subregion(MemoryRegion *mr,
1204 MemoryRegion *subregion);
1205
1206 /*
1207 * memory_region_set_enabled: dynamically enable or disable a region
1208 *
1209 * Enables or disables a memory region. A disabled memory region
1210 * ignores all accesses to itself and its subregions. It does not
1211 * obscure sibling subregions with lower priority - it simply behaves as
1212 * if it was removed from the hierarchy.
1213 *
1214 * Regions default to being enabled.
1215 *
1216 * @mr: the region to be updated
1217 * @enabled: whether to enable or disable the region
1218 */
1219 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
1220
1221 /*
1222 * memory_region_set_address: dynamically update the address of a region
1223 *
1224 * Dynamically updates the address of a region, relative to its container.
1225 * May be used on regions are currently part of a memory hierarchy.
1226 *
1227 * @mr: the region to be updated
1228 * @addr: new address, relative to container region
1229 */
1230 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
1231
1232 /*
1233 * memory_region_set_size: dynamically update the size of a region.
1234 *
1235 * Dynamically updates the size of a region.
1236 *
1237 * @mr: the region to be updated
1238 * @size: used size of the region.
1239 */
1240 void memory_region_set_size(MemoryRegion *mr, uint64_t size);
1241
1242 /*
1243 * memory_region_set_alias_offset: dynamically update a memory alias's offset
1244 *
1245 * Dynamically updates the offset into the target region that an alias points
1246 * to, as if the fourth argument to memory_region_init_alias() has changed.
1247 *
1248 * @mr: the #MemoryRegion to be updated; should be an alias.
1249 * @offset: the new offset into the target memory region
1250 */
1251 void memory_region_set_alias_offset(MemoryRegion *mr,
1252 hwaddr offset);
1253
1254 /**
1255 * memory_region_present: checks if an address relative to a @container
1256 * translates into #MemoryRegion within @container
1257 *
1258 * Answer whether a #MemoryRegion within @container covers the address
1259 * @addr.
1260 *
1261 * @container: a #MemoryRegion within which @addr is a relative address
1262 * @addr: the area within @container to be searched
1263 */
1264 bool memory_region_present(MemoryRegion *container, hwaddr addr);
1265
1266 /**
1267 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
1268 * into any address space.
1269 *
1270 * @mr: a #MemoryRegion which should be checked if it's mapped
1271 */
1272 bool memory_region_is_mapped(MemoryRegion *mr);
1273
1274 /**
1275 * memory_region_find: translate an address/size relative to a
1276 * MemoryRegion into a #MemoryRegionSection.
1277 *
1278 * Locates the first #MemoryRegion within @mr that overlaps the range
1279 * given by @addr and @size.
1280 *
1281 * Returns a #MemoryRegionSection that describes a contiguous overlap.
1282 * It will have the following characteristics:
1283 * .@size = 0 iff no overlap was found
1284 * .@mr is non-%NULL iff an overlap was found
1285 *
1286 * Remember that in the return value the @offset_within_region is
1287 * relative to the returned region (in the .@mr field), not to the
1288 * @mr argument.
1289 *
1290 * Similarly, the .@offset_within_address_space is relative to the
1291 * address space that contains both regions, the passed and the
1292 * returned one. However, in the special case where the @mr argument
1293 * has no container (and thus is the root of the address space), the
1294 * following will hold:
1295 * .@offset_within_address_space >= @addr
1296 * .@offset_within_address_space + .@size <= @addr + @size
1297 *
1298 * @mr: a MemoryRegion within which @addr is a relative address
1299 * @addr: start of the area within @as to be searched
1300 * @size: size of the area to be searched
1301 */
1302 MemoryRegionSection memory_region_find(MemoryRegion *mr,
1303 hwaddr addr, uint64_t size);
1304
1305 /**
1306 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
1307 *
1308 * Synchronizes the dirty page log for all address spaces.
1309 */
1310 void memory_global_dirty_log_sync(void);
1311
1312 /**
1313 * memory_region_transaction_begin: Start a transaction.
1314 *
1315 * During a transaction, changes will be accumulated and made visible
1316 * only when the transaction ends (is committed).
1317 */
1318 void memory_region_transaction_begin(void);
1319
1320 /**
1321 * memory_region_transaction_commit: Commit a transaction and make changes
1322 * visible to the guest.
1323 */
1324 void memory_region_transaction_commit(void);
1325
1326 /**
1327 * memory_listener_register: register callbacks to be called when memory
1328 * sections are mapped or unmapped into an address
1329 * space
1330 *
1331 * @listener: an object containing the callbacks to be called
1332 * @filter: if non-%NULL, only regions in this address space will be observed
1333 */
1334 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
1335
1336 /**
1337 * memory_listener_unregister: undo the effect of memory_listener_register()
1338 *
1339 * @listener: an object containing the callbacks to be removed
1340 */
1341 void memory_listener_unregister(MemoryListener *listener);
1342
1343 /**
1344 * memory_global_dirty_log_start: begin dirty logging for all regions
1345 */
1346 void memory_global_dirty_log_start(void);
1347
1348 /**
1349 * memory_global_dirty_log_stop: end dirty logging for all regions
1350 */
1351 void memory_global_dirty_log_stop(void);
1352
1353 void mtree_info(fprintf_function mon_printf, void *f, bool flatview);
1354
1355 /**
1356 * memory_region_dispatch_read: perform a read directly to the specified
1357 * MemoryRegion.
1358 *
1359 * @mr: #MemoryRegion to access
1360 * @addr: address within that region
1361 * @pval: pointer to uint64_t which the data is written to
1362 * @size: size of the access in bytes
1363 * @attrs: memory transaction attributes to use for the access
1364 */
1365 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1366 hwaddr addr,
1367 uint64_t *pval,
1368 unsigned size,
1369 MemTxAttrs attrs);
1370 /**
1371 * memory_region_dispatch_write: perform a write directly to the specified
1372 * MemoryRegion.
1373 *
1374 * @mr: #MemoryRegion to access
1375 * @addr: address within that region
1376 * @data: data to write
1377 * @size: size of the access in bytes
1378 * @attrs: memory transaction attributes to use for the access
1379 */
1380 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1381 hwaddr addr,
1382 uint64_t data,
1383 unsigned size,
1384 MemTxAttrs attrs);
1385
1386 /**
1387 * address_space_init: initializes an address space
1388 *
1389 * @as: an uninitialized #AddressSpace
1390 * @root: a #MemoryRegion that routes addresses for the address space
1391 * @name: an address space name. The name is only used for debugging
1392 * output.
1393 */
1394 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
1395
1396 /**
1397 * address_space_init_shareable: return an address space for a memory region,
1398 * creating it if it does not already exist
1399 *
1400 * @root: a #MemoryRegion that routes addresses for the address space
1401 * @name: an address space name. The name is only used for debugging
1402 * output.
1403 *
1404 * This function will return a pointer to an existing AddressSpace
1405 * which was initialized with the specified MemoryRegion, or it will
1406 * create and initialize one if it does not already exist. The ASes
1407 * are reference-counted, so the memory will be freed automatically
1408 * when the AddressSpace is destroyed via address_space_destroy.
1409 */
1410 AddressSpace *address_space_init_shareable(MemoryRegion *root,
1411 const char *name);
1412
1413 /**
1414 * address_space_destroy: destroy an address space
1415 *
1416 * Releases all resources associated with an address space. After an address space
1417 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
1418 * as well.
1419 *
1420 * @as: address space to be destroyed
1421 */
1422 void address_space_destroy(AddressSpace *as);
1423
1424 /**
1425 * address_space_rw: read from or write to an address space.
1426 *
1427 * Return a MemTxResult indicating whether the operation succeeded
1428 * or failed (eg unassigned memory, device rejected the transaction,
1429 * IOMMU fault).
1430 *
1431 * @as: #AddressSpace to be accessed
1432 * @addr: address within that address space
1433 * @attrs: memory transaction attributes
1434 * @buf: buffer with the data transferred
1435 * @is_write: indicates the transfer direction
1436 */
1437 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
1438 MemTxAttrs attrs, uint8_t *buf,
1439 int len, bool is_write);
1440
1441 /**
1442 * address_space_write: write to address space.
1443 *
1444 * Return a MemTxResult indicating whether the operation succeeded
1445 * or failed (eg unassigned memory, device rejected the transaction,
1446 * IOMMU fault).
1447 *
1448 * @as: #AddressSpace to be accessed
1449 * @addr: address within that address space
1450 * @attrs: memory transaction attributes
1451 * @buf: buffer with the data transferred
1452 */
1453 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
1454 MemTxAttrs attrs,
1455 const uint8_t *buf, int len);
1456
1457 /* address_space_ld*: load from an address space
1458 * address_space_st*: store to an address space
1459 *
1460 * These functions perform a load or store of the byte, word,
1461 * longword or quad to the specified address within the AddressSpace.
1462 * The _le suffixed functions treat the data as little endian;
1463 * _be indicates big endian; no suffix indicates "same endianness
1464 * as guest CPU".
1465 *
1466 * The "guest CPU endianness" accessors are deprecated for use outside
1467 * target-* code; devices should be CPU-agnostic and use either the LE
1468 * or the BE accessors.
1469 *
1470 * @as #AddressSpace to be accessed
1471 * @addr: address within that address space
1472 * @val: data value, for stores
1473 * @attrs: memory transaction attributes
1474 * @result: location to write the success/failure of the transaction;
1475 * if NULL, this information is discarded
1476 */
1477 uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
1478 MemTxAttrs attrs, MemTxResult *result);
1479 uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
1480 MemTxAttrs attrs, MemTxResult *result);
1481 uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
1482 MemTxAttrs attrs, MemTxResult *result);
1483 uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
1484 MemTxAttrs attrs, MemTxResult *result);
1485 uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
1486 MemTxAttrs attrs, MemTxResult *result);
1487 uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
1488 MemTxAttrs attrs, MemTxResult *result);
1489 uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
1490 MemTxAttrs attrs, MemTxResult *result);
1491 void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
1492 MemTxAttrs attrs, MemTxResult *result);
1493 void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
1494 MemTxAttrs attrs, MemTxResult *result);
1495 void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
1496 MemTxAttrs attrs, MemTxResult *result);
1497 void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
1498 MemTxAttrs attrs, MemTxResult *result);
1499 void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
1500 MemTxAttrs attrs, MemTxResult *result);
1501 void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
1502 MemTxAttrs attrs, MemTxResult *result);
1503 void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
1504 MemTxAttrs attrs, MemTxResult *result);
1505
1506 uint32_t ldub_phys(AddressSpace *as, hwaddr addr);
1507 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr);
1508 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr);
1509 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr);
1510 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr);
1511 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr);
1512 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr);
1513 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1514 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1515 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1516 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1517 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1518 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val);
1519 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val);
1520
1521 struct MemoryRegionCache {
1522 hwaddr xlat;
1523 hwaddr len;
1524 AddressSpace *as;
1525 };
1526
1527 #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .as = NULL })
1528
1529 /* address_space_cache_init: prepare for repeated access to a physical
1530 * memory region
1531 *
1532 * @cache: #MemoryRegionCache to be filled
1533 * @as: #AddressSpace to be accessed
1534 * @addr: address within that address space
1535 * @len: length of buffer
1536 * @is_write: indicates the transfer direction
1537 *
1538 * Will only work with RAM, and may map a subset of the requested range by
1539 * returning a value that is less than @len. On failure, return a negative
1540 * errno value.
1541 *
1542 * Because it only works with RAM, this function can be used for
1543 * read-modify-write operations. In this case, is_write should be %true.
1544 *
1545 * Note that addresses passed to the address_space_*_cached functions
1546 * are relative to @addr.
1547 */
1548 int64_t address_space_cache_init(MemoryRegionCache *cache,
1549 AddressSpace *as,
1550 hwaddr addr,
1551 hwaddr len,
1552 bool is_write);
1553
1554 /**
1555 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
1556 *
1557 * @cache: The #MemoryRegionCache to operate on.
1558 * @addr: The first physical address that was written, relative to the
1559 * address that was passed to @address_space_cache_init.
1560 * @access_len: The number of bytes that were written starting at @addr.
1561 */
1562 void address_space_cache_invalidate(MemoryRegionCache *cache,
1563 hwaddr addr,
1564 hwaddr access_len);
1565
1566 /**
1567 * address_space_cache_destroy: free a #MemoryRegionCache
1568 *
1569 * @cache: The #MemoryRegionCache whose memory should be released.
1570 */
1571 void address_space_cache_destroy(MemoryRegionCache *cache);
1572
1573 /* address_space_ld*_cached: load from a cached #MemoryRegion
1574 * address_space_st*_cached: store into a cached #MemoryRegion
1575 *
1576 * These functions perform a load or store of the byte, word,
1577 * longword or quad to the specified address. The address is
1578 * a physical address in the AddressSpace, but it must lie within
1579 * a #MemoryRegion that was mapped with address_space_cache_init.
1580 *
1581 * The _le suffixed functions treat the data as little endian;
1582 * _be indicates big endian; no suffix indicates "same endianness
1583 * as guest CPU".
1584 *
1585 * The "guest CPU endianness" accessors are deprecated for use outside
1586 * target-* code; devices should be CPU-agnostic and use either the LE
1587 * or the BE accessors.
1588 *
1589 * @cache: previously initialized #MemoryRegionCache to be accessed
1590 * @addr: address within the address space
1591 * @val: data value, for stores
1592 * @attrs: memory transaction attributes
1593 * @result: location to write the success/failure of the transaction;
1594 * if NULL, this information is discarded
1595 */
1596 uint32_t address_space_ldub_cached(MemoryRegionCache *cache, hwaddr addr,
1597 MemTxAttrs attrs, MemTxResult *result);
1598 uint32_t address_space_lduw_le_cached(MemoryRegionCache *cache, hwaddr addr,
1599 MemTxAttrs attrs, MemTxResult *result);
1600 uint32_t address_space_lduw_be_cached(MemoryRegionCache *cache, hwaddr addr,
1601 MemTxAttrs attrs, MemTxResult *result);
1602 uint32_t address_space_ldl_le_cached(MemoryRegionCache *cache, hwaddr addr,
1603 MemTxAttrs attrs, MemTxResult *result);
1604 uint32_t address_space_ldl_be_cached(MemoryRegionCache *cache, hwaddr addr,
1605 MemTxAttrs attrs, MemTxResult *result);
1606 uint64_t address_space_ldq_le_cached(MemoryRegionCache *cache, hwaddr addr,
1607 MemTxAttrs attrs, MemTxResult *result);
1608 uint64_t address_space_ldq_be_cached(MemoryRegionCache *cache, hwaddr addr,
1609 MemTxAttrs attrs, MemTxResult *result);
1610 void address_space_stb_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1611 MemTxAttrs attrs, MemTxResult *result);
1612 void address_space_stw_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1613 MemTxAttrs attrs, MemTxResult *result);
1614 void address_space_stw_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1615 MemTxAttrs attrs, MemTxResult *result);
1616 void address_space_stl_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1617 MemTxAttrs attrs, MemTxResult *result);
1618 void address_space_stl_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1619 MemTxAttrs attrs, MemTxResult *result);
1620 void address_space_stq_le_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val,
1621 MemTxAttrs attrs, MemTxResult *result);
1622 void address_space_stq_be_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val,
1623 MemTxAttrs attrs, MemTxResult *result);
1624
1625 uint32_t ldub_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1626 uint32_t lduw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1627 uint32_t lduw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1628 uint32_t ldl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1629 uint32_t ldl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1630 uint64_t ldq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1631 uint64_t ldq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1632 void stb_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1633 void stw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1634 void stw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1635 void stl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1636 void stl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1637 void stq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
1638 void stq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
1639 /* address_space_get_iotlb_entry: translate an address into an IOTLB
1640 * entry. Should be called from an RCU critical section.
1641 */
1642 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
1643 bool is_write);
1644
1645 /* address_space_translate: translate an address range into an address space
1646 * into a MemoryRegion and an address range into that section. Should be
1647 * called from an RCU critical section, to avoid that the last reference
1648 * to the returned region disappears after address_space_translate returns.
1649 *
1650 * @as: #AddressSpace to be accessed
1651 * @addr: address within that address space
1652 * @xlat: pointer to address within the returned memory region section's
1653 * #MemoryRegion.
1654 * @len: pointer to length
1655 * @is_write: indicates the transfer direction
1656 */
1657 MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
1658 hwaddr *xlat, hwaddr *len,
1659 bool is_write);
1660
1661 /* address_space_access_valid: check for validity of accessing an address
1662 * space range
1663 *
1664 * Check whether memory is assigned to the given address space range, and
1665 * access is permitted by any IOMMU regions that are active for the address
1666 * space.
1667 *
1668 * For now, addr and len should be aligned to a page size. This limitation
1669 * will be lifted in the future.
1670 *
1671 * @as: #AddressSpace to be accessed
1672 * @addr: address within that address space
1673 * @len: length of the area to be checked
1674 * @is_write: indicates the transfer direction
1675 */
1676 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write);
1677
1678 /* address_space_map: map a physical memory region into a host virtual address
1679 *
1680 * May map a subset of the requested range, given by and returned in @plen.
1681 * May return %NULL if resources needed to perform the mapping are exhausted.
1682 * Use only for reads OR writes - not for read-modify-write operations.
1683 * Use cpu_register_map_client() to know when retrying the map operation is
1684 * likely to succeed.
1685 *
1686 * @as: #AddressSpace to be accessed
1687 * @addr: address within that address space
1688 * @plen: pointer to length of buffer; updated on return
1689 * @is_write: indicates the transfer direction
1690 */
1691 void *address_space_map(AddressSpace *as, hwaddr addr,
1692 hwaddr *plen, bool is_write);
1693
1694 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
1695 *
1696 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
1697 * the amount of memory that was actually read or written by the caller.
1698 *
1699 * @as: #AddressSpace used
1700 * @addr: address within that address space
1701 * @len: buffer length as returned by address_space_map()
1702 * @access_len: amount of data actually transferred
1703 * @is_write: indicates the transfer direction
1704 */
1705 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
1706 int is_write, hwaddr access_len);
1707
1708
1709 /* Internal functions, part of the implementation of address_space_read. */
1710 MemTxResult address_space_read_continue(AddressSpace *as, hwaddr addr,
1711 MemTxAttrs attrs, uint8_t *buf,
1712 int len, hwaddr addr1, hwaddr l,
1713 MemoryRegion *mr);
1714 MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr,
1715 MemTxAttrs attrs, uint8_t *buf, int len);
1716 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
1717
1718 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1719 {
1720 if (is_write) {
1721 return memory_region_is_ram(mr) &&
1722 !mr->readonly && !memory_region_is_ram_device(mr);
1723 } else {
1724 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
1725 memory_region_is_romd(mr);
1726 }
1727 }
1728
1729 /**
1730 * address_space_read: read from an address space.
1731 *
1732 * Return a MemTxResult indicating whether the operation succeeded
1733 * or failed (eg unassigned memory, device rejected the transaction,
1734 * IOMMU fault).
1735 *
1736 * @as: #AddressSpace to be accessed
1737 * @addr: address within that address space
1738 * @attrs: memory transaction attributes
1739 * @buf: buffer with the data transferred
1740 */
1741 static inline __attribute__((__always_inline__))
1742 MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
1743 uint8_t *buf, int len)
1744 {
1745 MemTxResult result = MEMTX_OK;
1746 hwaddr l, addr1;
1747 void *ptr;
1748 MemoryRegion *mr;
1749
1750 if (__builtin_constant_p(len)) {
1751 if (len) {
1752 rcu_read_lock();
1753 l = len;
1754 mr = address_space_translate(as, addr, &addr1, &l, false);
1755 if (len == l && memory_access_is_direct(mr, false)) {
1756 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
1757 memcpy(buf, ptr, len);
1758 } else {
1759 result = address_space_read_continue(as, addr, attrs, buf, len,
1760 addr1, l, mr);
1761 }
1762 rcu_read_unlock();
1763 }
1764 } else {
1765 result = address_space_read_full(as, addr, attrs, buf, len);
1766 }
1767 return result;
1768 }
1769
1770 /**
1771 * address_space_read_cached: read from a cached RAM region
1772 *
1773 * @cache: Cached region to be addressed
1774 * @addr: address relative to the base of the RAM region
1775 * @buf: buffer with the data transferred
1776 * @len: length of the data transferred
1777 */
1778 static inline void
1779 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
1780 void *buf, int len)
1781 {
1782 assert(addr < cache->len && len <= cache->len - addr);
1783 address_space_read(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len);
1784 }
1785
1786 /**
1787 * address_space_write_cached: write to a cached RAM region
1788 *
1789 * @cache: Cached region to be addressed
1790 * @addr: address relative to the base of the RAM region
1791 * @buf: buffer with the data transferred
1792 * @len: length of the data transferred
1793 */
1794 static inline void
1795 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
1796 void *buf, int len)
1797 {
1798 assert(addr < cache->len && len <= cache->len - addr);
1799 address_space_write(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len);
1800 }
1801
1802 #endif
1803
1804 #endif