]> git.proxmox.com Git - mirror_qemu.git/blob - include/exec/memory.h
memory: Get rid of address_space_init_shareable
[mirror_qemu.git] / include / exec / memory.h
1 /*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef MEMORY_H
15 #define MEMORY_H
16
17 #ifndef CONFIG_USER_ONLY
18
19 #include "exec/cpu-common.h"
20 #include "exec/hwaddr.h"
21 #include "exec/memattrs.h"
22 #include "exec/ramlist.h"
23 #include "qemu/queue.h"
24 #include "qemu/int128.h"
25 #include "qemu/notify.h"
26 #include "qom/object.h"
27 #include "qemu/rcu.h"
28 #include "hw/qdev-core.h"
29
30 #define RAM_ADDR_INVALID (~(ram_addr_t)0)
31
32 #define MAX_PHYS_ADDR_SPACE_BITS 62
33 #define MAX_PHYS_ADDR (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
34
35 #define TYPE_MEMORY_REGION "qemu:memory-region"
36 #define MEMORY_REGION(obj) \
37 OBJECT_CHECK(MemoryRegion, (obj), TYPE_MEMORY_REGION)
38
39 #define TYPE_IOMMU_MEMORY_REGION "qemu:iommu-memory-region"
40 #define IOMMU_MEMORY_REGION(obj) \
41 OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_IOMMU_MEMORY_REGION)
42 #define IOMMU_MEMORY_REGION_CLASS(klass) \
43 OBJECT_CLASS_CHECK(IOMMUMemoryRegionClass, (klass), \
44 TYPE_IOMMU_MEMORY_REGION)
45 #define IOMMU_MEMORY_REGION_GET_CLASS(obj) \
46 OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \
47 TYPE_IOMMU_MEMORY_REGION)
48
49 typedef struct MemoryRegionOps MemoryRegionOps;
50 typedef struct MemoryRegionMmio MemoryRegionMmio;
51 typedef struct FlatView FlatView;
52
53 struct MemoryRegionMmio {
54 CPUReadMemoryFunc *read[3];
55 CPUWriteMemoryFunc *write[3];
56 };
57
58 typedef struct IOMMUTLBEntry IOMMUTLBEntry;
59
60 /* See address_space_translate: bit 0 is read, bit 1 is write. */
61 typedef enum {
62 IOMMU_NONE = 0,
63 IOMMU_RO = 1,
64 IOMMU_WO = 2,
65 IOMMU_RW = 3,
66 } IOMMUAccessFlags;
67
68 #define IOMMU_ACCESS_FLAG(r, w) (((r) ? IOMMU_RO : 0) | ((w) ? IOMMU_WO : 0))
69
70 struct IOMMUTLBEntry {
71 AddressSpace *target_as;
72 hwaddr iova;
73 hwaddr translated_addr;
74 hwaddr addr_mask; /* 0xfff = 4k translation */
75 IOMMUAccessFlags perm;
76 };
77
78 /*
79 * Bitmap for different IOMMUNotifier capabilities. Each notifier can
80 * register with one or multiple IOMMU Notifier capability bit(s).
81 */
82 typedef enum {
83 IOMMU_NOTIFIER_NONE = 0,
84 /* Notify cache invalidations */
85 IOMMU_NOTIFIER_UNMAP = 0x1,
86 /* Notify entry changes (newly created entries) */
87 IOMMU_NOTIFIER_MAP = 0x2,
88 } IOMMUNotifierFlag;
89
90 #define IOMMU_NOTIFIER_ALL (IOMMU_NOTIFIER_MAP | IOMMU_NOTIFIER_UNMAP)
91
92 struct IOMMUNotifier;
93 typedef void (*IOMMUNotify)(struct IOMMUNotifier *notifier,
94 IOMMUTLBEntry *data);
95
96 struct IOMMUNotifier {
97 IOMMUNotify notify;
98 IOMMUNotifierFlag notifier_flags;
99 /* Notify for address space range start <= addr <= end */
100 hwaddr start;
101 hwaddr end;
102 QLIST_ENTRY(IOMMUNotifier) node;
103 };
104 typedef struct IOMMUNotifier IOMMUNotifier;
105
106 static inline void iommu_notifier_init(IOMMUNotifier *n, IOMMUNotify fn,
107 IOMMUNotifierFlag flags,
108 hwaddr start, hwaddr end)
109 {
110 n->notify = fn;
111 n->notifier_flags = flags;
112 n->start = start;
113 n->end = end;
114 }
115
116 /*
117 * Memory region callbacks
118 */
119 struct MemoryRegionOps {
120 /* Read from the memory region. @addr is relative to @mr; @size is
121 * in bytes. */
122 uint64_t (*read)(void *opaque,
123 hwaddr addr,
124 unsigned size);
125 /* Write to the memory region. @addr is relative to @mr; @size is
126 * in bytes. */
127 void (*write)(void *opaque,
128 hwaddr addr,
129 uint64_t data,
130 unsigned size);
131
132 MemTxResult (*read_with_attrs)(void *opaque,
133 hwaddr addr,
134 uint64_t *data,
135 unsigned size,
136 MemTxAttrs attrs);
137 MemTxResult (*write_with_attrs)(void *opaque,
138 hwaddr addr,
139 uint64_t data,
140 unsigned size,
141 MemTxAttrs attrs);
142 /* Instruction execution pre-callback:
143 * @addr is the address of the access relative to the @mr.
144 * @size is the size of the area returned by the callback.
145 * @offset is the location of the pointer inside @mr.
146 *
147 * Returns a pointer to a location which contains guest code.
148 */
149 void *(*request_ptr)(void *opaque, hwaddr addr, unsigned *size,
150 unsigned *offset);
151
152 enum device_endian endianness;
153 /* Guest-visible constraints: */
154 struct {
155 /* If nonzero, specify bounds on access sizes beyond which a machine
156 * check is thrown.
157 */
158 unsigned min_access_size;
159 unsigned max_access_size;
160 /* If true, unaligned accesses are supported. Otherwise unaligned
161 * accesses throw machine checks.
162 */
163 bool unaligned;
164 /*
165 * If present, and returns #false, the transaction is not accepted
166 * by the device (and results in machine dependent behaviour such
167 * as a machine check exception).
168 */
169 bool (*accepts)(void *opaque, hwaddr addr,
170 unsigned size, bool is_write);
171 } valid;
172 /* Internal implementation constraints: */
173 struct {
174 /* If nonzero, specifies the minimum size implemented. Smaller sizes
175 * will be rounded upwards and a partial result will be returned.
176 */
177 unsigned min_access_size;
178 /* If nonzero, specifies the maximum size implemented. Larger sizes
179 * will be done as a series of accesses with smaller sizes.
180 */
181 unsigned max_access_size;
182 /* If true, unaligned accesses are supported. Otherwise all accesses
183 * are converted to (possibly multiple) naturally aligned accesses.
184 */
185 bool unaligned;
186 } impl;
187
188 /* If .read and .write are not present, old_mmio may be used for
189 * backwards compatibility with old mmio registration
190 */
191 const MemoryRegionMmio old_mmio;
192 };
193
194 typedef struct IOMMUMemoryRegionClass {
195 /* private */
196 struct DeviceClass parent_class;
197
198 /*
199 * Return a TLB entry that contains a given address. Flag should
200 * be the access permission of this translation operation. We can
201 * set flag to IOMMU_NONE to mean that we don't need any
202 * read/write permission checks, like, when for region replay.
203 */
204 IOMMUTLBEntry (*translate)(IOMMUMemoryRegion *iommu, hwaddr addr,
205 IOMMUAccessFlags flag);
206 /* Returns minimum supported page size */
207 uint64_t (*get_min_page_size)(IOMMUMemoryRegion *iommu);
208 /* Called when IOMMU Notifier flag changed */
209 void (*notify_flag_changed)(IOMMUMemoryRegion *iommu,
210 IOMMUNotifierFlag old_flags,
211 IOMMUNotifierFlag new_flags);
212 /* Set this up to provide customized IOMMU replay function */
213 void (*replay)(IOMMUMemoryRegion *iommu, IOMMUNotifier *notifier);
214 } IOMMUMemoryRegionClass;
215
216 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
217 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
218
219 struct MemoryRegion {
220 Object parent_obj;
221
222 /* All fields are private - violators will be prosecuted */
223
224 /* The following fields should fit in a cache line */
225 bool romd_mode;
226 bool ram;
227 bool subpage;
228 bool readonly; /* For RAM regions */
229 bool rom_device;
230 bool flush_coalesced_mmio;
231 bool global_locking;
232 uint8_t dirty_log_mask;
233 bool is_iommu;
234 RAMBlock *ram_block;
235 Object *owner;
236
237 const MemoryRegionOps *ops;
238 void *opaque;
239 MemoryRegion *container;
240 Int128 size;
241 hwaddr addr;
242 void (*destructor)(MemoryRegion *mr);
243 uint64_t align;
244 bool terminates;
245 bool ram_device;
246 bool enabled;
247 bool warning_printed; /* For reservations */
248 uint8_t vga_logging_count;
249 MemoryRegion *alias;
250 hwaddr alias_offset;
251 int32_t priority;
252 QTAILQ_HEAD(subregions, MemoryRegion) subregions;
253 QTAILQ_ENTRY(MemoryRegion) subregions_link;
254 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
255 const char *name;
256 unsigned ioeventfd_nb;
257 MemoryRegionIoeventfd *ioeventfds;
258 };
259
260 struct IOMMUMemoryRegion {
261 MemoryRegion parent_obj;
262
263 QLIST_HEAD(, IOMMUNotifier) iommu_notify;
264 IOMMUNotifierFlag iommu_notify_flags;
265 };
266
267 #define IOMMU_NOTIFIER_FOREACH(n, mr) \
268 QLIST_FOREACH((n), &(mr)->iommu_notify, node)
269
270 /**
271 * MemoryListener: callbacks structure for updates to the physical memory map
272 *
273 * Allows a component to adjust to changes in the guest-visible memory map.
274 * Use with memory_listener_register() and memory_listener_unregister().
275 */
276 struct MemoryListener {
277 void (*begin)(MemoryListener *listener);
278 void (*commit)(MemoryListener *listener);
279 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
280 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
281 void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
282 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section,
283 int old, int new);
284 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
285 int old, int new);
286 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
287 void (*log_global_start)(MemoryListener *listener);
288 void (*log_global_stop)(MemoryListener *listener);
289 void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
290 bool match_data, uint64_t data, EventNotifier *e);
291 void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
292 bool match_data, uint64_t data, EventNotifier *e);
293 void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
294 hwaddr addr, hwaddr len);
295 void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
296 hwaddr addr, hwaddr len);
297 /* Lower = earlier (during add), later (during del) */
298 unsigned priority;
299 AddressSpace *address_space;
300 QTAILQ_ENTRY(MemoryListener) link;
301 QTAILQ_ENTRY(MemoryListener) link_as;
302 };
303
304 /**
305 * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
306 */
307 struct AddressSpace {
308 /* All fields are private. */
309 struct rcu_head rcu;
310 char *name;
311 MemoryRegion *root;
312
313 /* Accessed via RCU. */
314 struct FlatView *current_map;
315
316 int ioeventfd_nb;
317 struct MemoryRegionIoeventfd *ioeventfds;
318 QTAILQ_HEAD(memory_listeners_as, MemoryListener) listeners;
319 QTAILQ_ENTRY(AddressSpace) address_spaces_link;
320 };
321
322 FlatView *address_space_to_flatview(AddressSpace *as);
323
324 /**
325 * MemoryRegionSection: describes a fragment of a #MemoryRegion
326 *
327 * @mr: the region, or %NULL if empty
328 * @address_space: the address space the region is mapped in
329 * @offset_within_region: the beginning of the section, relative to @mr's start
330 * @size: the size of the section; will not exceed @mr's boundaries
331 * @offset_within_address_space: the address of the first byte of the section
332 * relative to the region's address space
333 * @readonly: writes to this section are ignored
334 */
335 struct MemoryRegionSection {
336 MemoryRegion *mr;
337 FlatView *fv;
338 hwaddr offset_within_region;
339 Int128 size;
340 hwaddr offset_within_address_space;
341 bool readonly;
342 };
343
344 /**
345 * memory_region_init: Initialize a memory region
346 *
347 * The region typically acts as a container for other memory regions. Use
348 * memory_region_add_subregion() to add subregions.
349 *
350 * @mr: the #MemoryRegion to be initialized
351 * @owner: the object that tracks the region's reference count
352 * @name: used for debugging; not visible to the user or ABI
353 * @size: size of the region; any subregions beyond this size will be clipped
354 */
355 void memory_region_init(MemoryRegion *mr,
356 struct Object *owner,
357 const char *name,
358 uint64_t size);
359
360 /**
361 * memory_region_ref: Add 1 to a memory region's reference count
362 *
363 * Whenever memory regions are accessed outside the BQL, they need to be
364 * preserved against hot-unplug. MemoryRegions actually do not have their
365 * own reference count; they piggyback on a QOM object, their "owner".
366 * This function adds a reference to the owner.
367 *
368 * All MemoryRegions must have an owner if they can disappear, even if the
369 * device they belong to operates exclusively under the BQL. This is because
370 * the region could be returned at any time by memory_region_find, and this
371 * is usually under guest control.
372 *
373 * @mr: the #MemoryRegion
374 */
375 void memory_region_ref(MemoryRegion *mr);
376
377 /**
378 * memory_region_unref: Remove 1 to a memory region's reference count
379 *
380 * Whenever memory regions are accessed outside the BQL, they need to be
381 * preserved against hot-unplug. MemoryRegions actually do not have their
382 * own reference count; they piggyback on a QOM object, their "owner".
383 * This function removes a reference to the owner and possibly destroys it.
384 *
385 * @mr: the #MemoryRegion
386 */
387 void memory_region_unref(MemoryRegion *mr);
388
389 /**
390 * memory_region_init_io: Initialize an I/O memory region.
391 *
392 * Accesses into the region will cause the callbacks in @ops to be called.
393 * if @size is nonzero, subregions will be clipped to @size.
394 *
395 * @mr: the #MemoryRegion to be initialized.
396 * @owner: the object that tracks the region's reference count
397 * @ops: a structure containing read and write callbacks to be used when
398 * I/O is performed on the region.
399 * @opaque: passed to the read and write callbacks of the @ops structure.
400 * @name: used for debugging; not visible to the user or ABI
401 * @size: size of the region.
402 */
403 void memory_region_init_io(MemoryRegion *mr,
404 struct Object *owner,
405 const MemoryRegionOps *ops,
406 void *opaque,
407 const char *name,
408 uint64_t size);
409
410 /**
411 * memory_region_init_ram_nomigrate: Initialize RAM memory region. Accesses
412 * into the region will modify memory
413 * directly.
414 *
415 * @mr: the #MemoryRegion to be initialized.
416 * @owner: the object that tracks the region's reference count
417 * @name: Region name, becomes part of RAMBlock name used in migration stream
418 * must be unique within any device
419 * @size: size of the region.
420 * @errp: pointer to Error*, to store an error if it happens.
421 *
422 * Note that this function does not do anything to cause the data in the
423 * RAM memory region to be migrated; that is the responsibility of the caller.
424 */
425 void memory_region_init_ram_nomigrate(MemoryRegion *mr,
426 struct Object *owner,
427 const char *name,
428 uint64_t size,
429 Error **errp);
430
431 /**
432 * memory_region_init_resizeable_ram: Initialize memory region with resizeable
433 * RAM. Accesses into the region will
434 * modify memory directly. Only an initial
435 * portion of this RAM is actually used.
436 * The used size can change across reboots.
437 *
438 * @mr: the #MemoryRegion to be initialized.
439 * @owner: the object that tracks the region's reference count
440 * @name: Region name, becomes part of RAMBlock name used in migration stream
441 * must be unique within any device
442 * @size: used size of the region.
443 * @max_size: max size of the region.
444 * @resized: callback to notify owner about used size change.
445 * @errp: pointer to Error*, to store an error if it happens.
446 *
447 * Note that this function does not do anything to cause the data in the
448 * RAM memory region to be migrated; that is the responsibility of the caller.
449 */
450 void memory_region_init_resizeable_ram(MemoryRegion *mr,
451 struct Object *owner,
452 const char *name,
453 uint64_t size,
454 uint64_t max_size,
455 void (*resized)(const char*,
456 uint64_t length,
457 void *host),
458 Error **errp);
459 #ifdef __linux__
460 /**
461 * memory_region_init_ram_from_file: Initialize RAM memory region with a
462 * mmap-ed backend.
463 *
464 * @mr: the #MemoryRegion to be initialized.
465 * @owner: the object that tracks the region's reference count
466 * @name: Region name, becomes part of RAMBlock name used in migration stream
467 * must be unique within any device
468 * @size: size of the region.
469 * @share: %true if memory must be mmaped with the MAP_SHARED flag
470 * @path: the path in which to allocate the RAM.
471 * @errp: pointer to Error*, to store an error if it happens.
472 *
473 * Note that this function does not do anything to cause the data in the
474 * RAM memory region to be migrated; that is the responsibility of the caller.
475 */
476 void memory_region_init_ram_from_file(MemoryRegion *mr,
477 struct Object *owner,
478 const char *name,
479 uint64_t size,
480 bool share,
481 const char *path,
482 Error **errp);
483
484 /**
485 * memory_region_init_ram_from_fd: Initialize RAM memory region with a
486 * mmap-ed backend.
487 *
488 * @mr: the #MemoryRegion to be initialized.
489 * @owner: the object that tracks the region's reference count
490 * @name: the name of the region.
491 * @size: size of the region.
492 * @share: %true if memory must be mmaped with the MAP_SHARED flag
493 * @fd: the fd to mmap.
494 * @errp: pointer to Error*, to store an error if it happens.
495 *
496 * Note that this function does not do anything to cause the data in the
497 * RAM memory region to be migrated; that is the responsibility of the caller.
498 */
499 void memory_region_init_ram_from_fd(MemoryRegion *mr,
500 struct Object *owner,
501 const char *name,
502 uint64_t size,
503 bool share,
504 int fd,
505 Error **errp);
506 #endif
507
508 /**
509 * memory_region_init_ram_ptr: Initialize RAM memory region from a
510 * user-provided pointer. Accesses into the
511 * region will modify memory directly.
512 *
513 * @mr: the #MemoryRegion to be initialized.
514 * @owner: the object that tracks the region's reference count
515 * @name: Region name, becomes part of RAMBlock name used in migration stream
516 * must be unique within any device
517 * @size: size of the region.
518 * @ptr: memory to be mapped; must contain at least @size bytes.
519 *
520 * Note that this function does not do anything to cause the data in the
521 * RAM memory region to be migrated; that is the responsibility of the caller.
522 */
523 void memory_region_init_ram_ptr(MemoryRegion *mr,
524 struct Object *owner,
525 const char *name,
526 uint64_t size,
527 void *ptr);
528
529 /**
530 * memory_region_init_ram_device_ptr: Initialize RAM device memory region from
531 * a user-provided pointer.
532 *
533 * A RAM device represents a mapping to a physical device, such as to a PCI
534 * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped
535 * into the VM address space and access to the region will modify memory
536 * directly. However, the memory region should not be included in a memory
537 * dump (device may not be enabled/mapped at the time of the dump), and
538 * operations incompatible with manipulating MMIO should be avoided. Replaces
539 * skip_dump flag.
540 *
541 * @mr: the #MemoryRegion to be initialized.
542 * @owner: the object that tracks the region's reference count
543 * @name: the name of the region.
544 * @size: size of the region.
545 * @ptr: memory to be mapped; must contain at least @size bytes.
546 *
547 * Note that this function does not do anything to cause the data in the
548 * RAM memory region to be migrated; that is the responsibility of the caller.
549 * (For RAM device memory regions, migrating the contents rarely makes sense.)
550 */
551 void memory_region_init_ram_device_ptr(MemoryRegion *mr,
552 struct Object *owner,
553 const char *name,
554 uint64_t size,
555 void *ptr);
556
557 /**
558 * memory_region_init_alias: Initialize a memory region that aliases all or a
559 * part of another memory region.
560 *
561 * @mr: the #MemoryRegion to be initialized.
562 * @owner: the object that tracks the region's reference count
563 * @name: used for debugging; not visible to the user or ABI
564 * @orig: the region to be referenced; @mr will be equivalent to
565 * @orig between @offset and @offset + @size - 1.
566 * @offset: start of the section in @orig to be referenced.
567 * @size: size of the region.
568 */
569 void memory_region_init_alias(MemoryRegion *mr,
570 struct Object *owner,
571 const char *name,
572 MemoryRegion *orig,
573 hwaddr offset,
574 uint64_t size);
575
576 /**
577 * memory_region_init_rom_nomigrate: Initialize a ROM memory region.
578 *
579 * This has the same effect as calling memory_region_init_ram_nomigrate()
580 * and then marking the resulting region read-only with
581 * memory_region_set_readonly().
582 *
583 * Note that this function does not do anything to cause the data in the
584 * RAM side of the memory region to be migrated; that is the responsibility
585 * of the caller.
586 *
587 * @mr: the #MemoryRegion to be initialized.
588 * @owner: the object that tracks the region's reference count
589 * @name: Region name, becomes part of RAMBlock name used in migration stream
590 * must be unique within any device
591 * @size: size of the region.
592 * @errp: pointer to Error*, to store an error if it happens.
593 */
594 void memory_region_init_rom_nomigrate(MemoryRegion *mr,
595 struct Object *owner,
596 const char *name,
597 uint64_t size,
598 Error **errp);
599
600 /**
601 * memory_region_init_rom_device_nomigrate: Initialize a ROM memory region.
602 * Writes are handled via callbacks.
603 *
604 * Note that this function does not do anything to cause the data in the
605 * RAM side of the memory region to be migrated; that is the responsibility
606 * of the caller.
607 *
608 * @mr: the #MemoryRegion to be initialized.
609 * @owner: the object that tracks the region's reference count
610 * @ops: callbacks for write access handling (must not be NULL).
611 * @name: Region name, becomes part of RAMBlock name used in migration stream
612 * must be unique within any device
613 * @size: size of the region.
614 * @errp: pointer to Error*, to store an error if it happens.
615 */
616 void memory_region_init_rom_device_nomigrate(MemoryRegion *mr,
617 struct Object *owner,
618 const MemoryRegionOps *ops,
619 void *opaque,
620 const char *name,
621 uint64_t size,
622 Error **errp);
623
624 /**
625 * memory_region_init_reservation: Initialize a memory region that reserves
626 * I/O space.
627 *
628 * A reservation region primariy serves debugging purposes. It claims I/O
629 * space that is not supposed to be handled by QEMU itself. Any access via
630 * the memory API will cause an abort().
631 * This function is deprecated. Use memory_region_init_io() with NULL
632 * callbacks instead.
633 *
634 * @mr: the #MemoryRegion to be initialized
635 * @owner: the object that tracks the region's reference count
636 * @name: used for debugging; not visible to the user or ABI
637 * @size: size of the region.
638 */
639 static inline void memory_region_init_reservation(MemoryRegion *mr,
640 Object *owner,
641 const char *name,
642 uint64_t size)
643 {
644 memory_region_init_io(mr, owner, NULL, mr, name, size);
645 }
646
647 /**
648 * memory_region_init_iommu: Initialize a memory region of a custom type
649 * that translates addresses
650 *
651 * An IOMMU region translates addresses and forwards accesses to a target
652 * memory region.
653 *
654 * @typename: QOM class name
655 * @_iommu_mr: the #IOMMUMemoryRegion to be initialized
656 * @instance_size: the IOMMUMemoryRegion subclass instance size
657 * @owner: the object that tracks the region's reference count
658 * @ops: a function that translates addresses into the @target region
659 * @name: used for debugging; not visible to the user or ABI
660 * @size: size of the region.
661 */
662 void memory_region_init_iommu(void *_iommu_mr,
663 size_t instance_size,
664 const char *mrtypename,
665 Object *owner,
666 const char *name,
667 uint64_t size);
668
669 /**
670 * memory_region_init_ram - Initialize RAM memory region. Accesses into the
671 * region will modify memory directly.
672 *
673 * @mr: the #MemoryRegion to be initialized
674 * @owner: the object that tracks the region's reference count (must be
675 * TYPE_DEVICE or a subclass of TYPE_DEVICE, or NULL)
676 * @name: name of the memory region
677 * @size: size of the region in bytes
678 * @errp: pointer to Error*, to store an error if it happens.
679 *
680 * This function allocates RAM for a board model or device, and
681 * arranges for it to be migrated (by calling vmstate_register_ram()
682 * if @owner is a DeviceState, or vmstate_register_ram_global() if
683 * @owner is NULL).
684 *
685 * TODO: Currently we restrict @owner to being either NULL (for
686 * global RAM regions with no owner) or devices, so that we can
687 * give the RAM block a unique name for migration purposes.
688 * We should lift this restriction and allow arbitrary Objects.
689 * If you pass a non-NULL non-device @owner then we will assert.
690 */
691 void memory_region_init_ram(MemoryRegion *mr,
692 struct Object *owner,
693 const char *name,
694 uint64_t size,
695 Error **errp);
696
697 /**
698 * memory_region_init_rom: Initialize a ROM memory region.
699 *
700 * This has the same effect as calling memory_region_init_ram()
701 * and then marking the resulting region read-only with
702 * memory_region_set_readonly(). This includes arranging for the
703 * contents to be migrated.
704 *
705 * TODO: Currently we restrict @owner to being either NULL (for
706 * global RAM regions with no owner) or devices, so that we can
707 * give the RAM block a unique name for migration purposes.
708 * We should lift this restriction and allow arbitrary Objects.
709 * If you pass a non-NULL non-device @owner then we will assert.
710 *
711 * @mr: the #MemoryRegion to be initialized.
712 * @owner: the object that tracks the region's reference count
713 * @name: Region name, becomes part of RAMBlock name used in migration stream
714 * must be unique within any device
715 * @size: size of the region.
716 * @errp: pointer to Error*, to store an error if it happens.
717 */
718 void memory_region_init_rom(MemoryRegion *mr,
719 struct Object *owner,
720 const char *name,
721 uint64_t size,
722 Error **errp);
723
724 /**
725 * memory_region_init_rom_device: Initialize a ROM memory region.
726 * Writes are handled via callbacks.
727 *
728 * This function initializes a memory region backed by RAM for reads
729 * and callbacks for writes, and arranges for the RAM backing to
730 * be migrated (by calling vmstate_register_ram()
731 * if @owner is a DeviceState, or vmstate_register_ram_global() if
732 * @owner is NULL).
733 *
734 * TODO: Currently we restrict @owner to being either NULL (for
735 * global RAM regions with no owner) or devices, so that we can
736 * give the RAM block a unique name for migration purposes.
737 * We should lift this restriction and allow arbitrary Objects.
738 * If you pass a non-NULL non-device @owner then we will assert.
739 *
740 * @mr: the #MemoryRegion to be initialized.
741 * @owner: the object that tracks the region's reference count
742 * @ops: callbacks for write access handling (must not be NULL).
743 * @name: Region name, becomes part of RAMBlock name used in migration stream
744 * must be unique within any device
745 * @size: size of the region.
746 * @errp: pointer to Error*, to store an error if it happens.
747 */
748 void memory_region_init_rom_device(MemoryRegion *mr,
749 struct Object *owner,
750 const MemoryRegionOps *ops,
751 void *opaque,
752 const char *name,
753 uint64_t size,
754 Error **errp);
755
756
757 /**
758 * memory_region_owner: get a memory region's owner.
759 *
760 * @mr: the memory region being queried.
761 */
762 struct Object *memory_region_owner(MemoryRegion *mr);
763
764 /**
765 * memory_region_size: get a memory region's size.
766 *
767 * @mr: the memory region being queried.
768 */
769 uint64_t memory_region_size(MemoryRegion *mr);
770
771 /**
772 * memory_region_is_ram: check whether a memory region is random access
773 *
774 * Returns %true is a memory region is random access.
775 *
776 * @mr: the memory region being queried
777 */
778 static inline bool memory_region_is_ram(MemoryRegion *mr)
779 {
780 return mr->ram;
781 }
782
783 /**
784 * memory_region_is_ram_device: check whether a memory region is a ram device
785 *
786 * Returns %true is a memory region is a device backed ram region
787 *
788 * @mr: the memory region being queried
789 */
790 bool memory_region_is_ram_device(MemoryRegion *mr);
791
792 /**
793 * memory_region_is_romd: check whether a memory region is in ROMD mode
794 *
795 * Returns %true if a memory region is a ROM device and currently set to allow
796 * direct reads.
797 *
798 * @mr: the memory region being queried
799 */
800 static inline bool memory_region_is_romd(MemoryRegion *mr)
801 {
802 return mr->rom_device && mr->romd_mode;
803 }
804
805 /**
806 * memory_region_get_iommu: check whether a memory region is an iommu
807 *
808 * Returns pointer to IOMMUMemoryRegion if a memory region is an iommu,
809 * otherwise NULL.
810 *
811 * @mr: the memory region being queried
812 */
813 static inline IOMMUMemoryRegion *memory_region_get_iommu(MemoryRegion *mr)
814 {
815 if (mr->alias) {
816 return memory_region_get_iommu(mr->alias);
817 }
818 if (mr->is_iommu) {
819 return (IOMMUMemoryRegion *) mr;
820 }
821 return NULL;
822 }
823
824 /**
825 * memory_region_get_iommu_class_nocheck: returns iommu memory region class
826 * if an iommu or NULL if not
827 *
828 * Returns pointer to IOMMUMemoryRegioniClass if a memory region is an iommu,
829 * otherwise NULL. This is fast path avoinding QOM checking, use with caution.
830 *
831 * @mr: the memory region being queried
832 */
833 static inline IOMMUMemoryRegionClass *memory_region_get_iommu_class_nocheck(
834 IOMMUMemoryRegion *iommu_mr)
835 {
836 return (IOMMUMemoryRegionClass *) (((Object *)iommu_mr)->class);
837 }
838
839 #define memory_region_is_iommu(mr) (memory_region_get_iommu(mr) != NULL)
840
841 /**
842 * memory_region_iommu_get_min_page_size: get minimum supported page size
843 * for an iommu
844 *
845 * Returns minimum supported page size for an iommu.
846 *
847 * @iommu_mr: the memory region being queried
848 */
849 uint64_t memory_region_iommu_get_min_page_size(IOMMUMemoryRegion *iommu_mr);
850
851 /**
852 * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
853 *
854 * The notification type will be decided by entry.perm bits:
855 *
856 * - For UNMAP (cache invalidation) notifies: set entry.perm to IOMMU_NONE.
857 * - For MAP (newly added entry) notifies: set entry.perm to the
858 * permission of the page (which is definitely !IOMMU_NONE).
859 *
860 * Note: for any IOMMU implementation, an in-place mapping change
861 * should be notified with an UNMAP followed by a MAP.
862 *
863 * @iommu_mr: the memory region that was changed
864 * @entry: the new entry in the IOMMU translation table. The entry
865 * replaces all old entries for the same virtual I/O address range.
866 * Deleted entries have .@perm == 0.
867 */
868 void memory_region_notify_iommu(IOMMUMemoryRegion *iommu_mr,
869 IOMMUTLBEntry entry);
870
871 /**
872 * memory_region_notify_one: notify a change in an IOMMU translation
873 * entry to a single notifier
874 *
875 * This works just like memory_region_notify_iommu(), but it only
876 * notifies a specific notifier, not all of them.
877 *
878 * @notifier: the notifier to be notified
879 * @entry: the new entry in the IOMMU translation table. The entry
880 * replaces all old entries for the same virtual I/O address range.
881 * Deleted entries have .@perm == 0.
882 */
883 void memory_region_notify_one(IOMMUNotifier *notifier,
884 IOMMUTLBEntry *entry);
885
886 /**
887 * memory_region_register_iommu_notifier: register a notifier for changes to
888 * IOMMU translation entries.
889 *
890 * @mr: the memory region to observe
891 * @n: the IOMMUNotifier to be added; the notify callback receives a
892 * pointer to an #IOMMUTLBEntry as the opaque value; the pointer
893 * ceases to be valid on exit from the notifier.
894 */
895 void memory_region_register_iommu_notifier(MemoryRegion *mr,
896 IOMMUNotifier *n);
897
898 /**
899 * memory_region_iommu_replay: replay existing IOMMU translations to
900 * a notifier with the minimum page granularity returned by
901 * mr->iommu_ops->get_page_size().
902 *
903 * @iommu_mr: the memory region to observe
904 * @n: the notifier to which to replay iommu mappings
905 */
906 void memory_region_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n);
907
908 /**
909 * memory_region_iommu_replay_all: replay existing IOMMU translations
910 * to all the notifiers registered.
911 *
912 * @iommu_mr: the memory region to observe
913 */
914 void memory_region_iommu_replay_all(IOMMUMemoryRegion *iommu_mr);
915
916 /**
917 * memory_region_unregister_iommu_notifier: unregister a notifier for
918 * changes to IOMMU translation entries.
919 *
920 * @mr: the memory region which was observed and for which notity_stopped()
921 * needs to be called
922 * @n: the notifier to be removed.
923 */
924 void memory_region_unregister_iommu_notifier(MemoryRegion *mr,
925 IOMMUNotifier *n);
926
927 /**
928 * memory_region_name: get a memory region's name
929 *
930 * Returns the string that was used to initialize the memory region.
931 *
932 * @mr: the memory region being queried
933 */
934 const char *memory_region_name(const MemoryRegion *mr);
935
936 /**
937 * memory_region_is_logging: return whether a memory region is logging writes
938 *
939 * Returns %true if the memory region is logging writes for the given client
940 *
941 * @mr: the memory region being queried
942 * @client: the client being queried
943 */
944 bool memory_region_is_logging(MemoryRegion *mr, uint8_t client);
945
946 /**
947 * memory_region_get_dirty_log_mask: return the clients for which a
948 * memory region is logging writes.
949 *
950 * Returns a bitmap of clients, in which the DIRTY_MEMORY_* constants
951 * are the bit indices.
952 *
953 * @mr: the memory region being queried
954 */
955 uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr);
956
957 /**
958 * memory_region_is_rom: check whether a memory region is ROM
959 *
960 * Returns %true is a memory region is read-only memory.
961 *
962 * @mr: the memory region being queried
963 */
964 static inline bool memory_region_is_rom(MemoryRegion *mr)
965 {
966 return mr->ram && mr->readonly;
967 }
968
969
970 /**
971 * memory_region_get_fd: Get a file descriptor backing a RAM memory region.
972 *
973 * Returns a file descriptor backing a file-based RAM memory region,
974 * or -1 if the region is not a file-based RAM memory region.
975 *
976 * @mr: the RAM or alias memory region being queried.
977 */
978 int memory_region_get_fd(MemoryRegion *mr);
979
980 /**
981 * memory_region_from_host: Convert a pointer into a RAM memory region
982 * and an offset within it.
983 *
984 * Given a host pointer inside a RAM memory region (created with
985 * memory_region_init_ram() or memory_region_init_ram_ptr()), return
986 * the MemoryRegion and the offset within it.
987 *
988 * Use with care; by the time this function returns, the returned pointer is
989 * not protected by RCU anymore. If the caller is not within an RCU critical
990 * section and does not hold the iothread lock, it must have other means of
991 * protecting the pointer, such as a reference to the region that includes
992 * the incoming ram_addr_t.
993 *
994 * @mr: the memory region being queried.
995 */
996 MemoryRegion *memory_region_from_host(void *ptr, ram_addr_t *offset);
997
998 /**
999 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
1000 *
1001 * Returns a host pointer to a RAM memory region (created with
1002 * memory_region_init_ram() or memory_region_init_ram_ptr()).
1003 *
1004 * Use with care; by the time this function returns, the returned pointer is
1005 * not protected by RCU anymore. If the caller is not within an RCU critical
1006 * section and does not hold the iothread lock, it must have other means of
1007 * protecting the pointer, such as a reference to the region that includes
1008 * the incoming ram_addr_t.
1009 *
1010 * @mr: the memory region being queried.
1011 */
1012 void *memory_region_get_ram_ptr(MemoryRegion *mr);
1013
1014 /* memory_region_ram_resize: Resize a RAM region.
1015 *
1016 * Only legal before guest might have detected the memory size: e.g. on
1017 * incoming migration, or right after reset.
1018 *
1019 * @mr: a memory region created with @memory_region_init_resizeable_ram.
1020 * @newsize: the new size the region
1021 * @errp: pointer to Error*, to store an error if it happens.
1022 */
1023 void memory_region_ram_resize(MemoryRegion *mr, ram_addr_t newsize,
1024 Error **errp);
1025
1026 /**
1027 * memory_region_set_log: Turn dirty logging on or off for a region.
1028 *
1029 * Turns dirty logging on or off for a specified client (display, migration).
1030 * Only meaningful for RAM regions.
1031 *
1032 * @mr: the memory region being updated.
1033 * @log: whether dirty logging is to be enabled or disabled.
1034 * @client: the user of the logging information; %DIRTY_MEMORY_VGA only.
1035 */
1036 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
1037
1038 /**
1039 * memory_region_get_dirty: Check whether a range of bytes is dirty
1040 * for a specified client.
1041 *
1042 * Checks whether a range of bytes has been written to since the last
1043 * call to memory_region_reset_dirty() with the same @client. Dirty logging
1044 * must be enabled.
1045 *
1046 * @mr: the memory region being queried.
1047 * @addr: the address (relative to the start of the region) being queried.
1048 * @size: the size of the range being queried.
1049 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1050 * %DIRTY_MEMORY_VGA.
1051 */
1052 bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
1053 hwaddr size, unsigned client);
1054
1055 /**
1056 * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
1057 *
1058 * Marks a range of bytes as dirty, after it has been dirtied outside
1059 * guest code.
1060 *
1061 * @mr: the memory region being dirtied.
1062 * @addr: the address (relative to the start of the region) being dirtied.
1063 * @size: size of the range being dirtied.
1064 */
1065 void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
1066 hwaddr size);
1067
1068 /**
1069 * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty
1070 * for a specified client. It clears them.
1071 *
1072 * Checks whether a range of bytes has been written to since the last
1073 * call to memory_region_reset_dirty() with the same @client. Dirty logging
1074 * must be enabled.
1075 *
1076 * @mr: the memory region being queried.
1077 * @addr: the address (relative to the start of the region) being queried.
1078 * @size: the size of the range being queried.
1079 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1080 * %DIRTY_MEMORY_VGA.
1081 */
1082 bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
1083 hwaddr size, unsigned client);
1084
1085 /**
1086 * memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
1087 * bitmap and clear it.
1088 *
1089 * Creates a snapshot of the dirty bitmap, clears the dirty bitmap and
1090 * returns the snapshot. The snapshot can then be used to query dirty
1091 * status, using memory_region_snapshot_get_dirty. Unlike
1092 * memory_region_test_and_clear_dirty this allows to query the same
1093 * page multiple times, which is especially useful for display updates
1094 * where the scanlines often are not page aligned.
1095 *
1096 * The dirty bitmap region which gets copyed into the snapshot (and
1097 * cleared afterwards) can be larger than requested. The boundaries
1098 * are rounded up/down so complete bitmap longs (covering 64 pages on
1099 * 64bit hosts) can be copied over into the bitmap snapshot. Which
1100 * isn't a problem for display updates as the extra pages are outside
1101 * the visible area, and in case the visible area changes a full
1102 * display redraw is due anyway. Should other use cases for this
1103 * function emerge we might have to revisit this implementation
1104 * detail.
1105 *
1106 * Use g_free to release DirtyBitmapSnapshot.
1107 *
1108 * @mr: the memory region being queried.
1109 * @addr: the address (relative to the start of the region) being queried.
1110 * @size: the size of the range being queried.
1111 * @client: the user of the logging information; typically %DIRTY_MEMORY_VGA.
1112 */
1113 DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
1114 hwaddr addr,
1115 hwaddr size,
1116 unsigned client);
1117
1118 /**
1119 * memory_region_snapshot_get_dirty: Check whether a range of bytes is dirty
1120 * in the specified dirty bitmap snapshot.
1121 *
1122 * @mr: the memory region being queried.
1123 * @snap: the dirty bitmap snapshot
1124 * @addr: the address (relative to the start of the region) being queried.
1125 * @size: the size of the range being queried.
1126 */
1127 bool memory_region_snapshot_get_dirty(MemoryRegion *mr,
1128 DirtyBitmapSnapshot *snap,
1129 hwaddr addr, hwaddr size);
1130
1131 /**
1132 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
1133 * any external TLBs (e.g. kvm)
1134 *
1135 * Flushes dirty information from accelerators such as kvm and vhost-net
1136 * and makes it available to users of the memory API.
1137 *
1138 * @mr: the region being flushed.
1139 */
1140 void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
1141
1142 /**
1143 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
1144 * client.
1145 *
1146 * Marks a range of pages as no longer dirty.
1147 *
1148 * @mr: the region being updated.
1149 * @addr: the start of the subrange being cleaned.
1150 * @size: the size of the subrange being cleaned.
1151 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
1152 * %DIRTY_MEMORY_VGA.
1153 */
1154 void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
1155 hwaddr size, unsigned client);
1156
1157 /**
1158 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
1159 *
1160 * Allows a memory region to be marked as read-only (turning it into a ROM).
1161 * only useful on RAM regions.
1162 *
1163 * @mr: the region being updated.
1164 * @readonly: whether rhe region is to be ROM or RAM.
1165 */
1166 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
1167
1168 /**
1169 * memory_region_rom_device_set_romd: enable/disable ROMD mode
1170 *
1171 * Allows a ROM device (initialized with memory_region_init_rom_device() to
1172 * set to ROMD mode (default) or MMIO mode. When it is in ROMD mode, the
1173 * device is mapped to guest memory and satisfies read access directly.
1174 * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
1175 * Writes are always handled by the #MemoryRegion.write function.
1176 *
1177 * @mr: the memory region to be updated
1178 * @romd_mode: %true to put the region into ROMD mode
1179 */
1180 void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
1181
1182 /**
1183 * memory_region_set_coalescing: Enable memory coalescing for the region.
1184 *
1185 * Enabled writes to a region to be queued for later processing. MMIO ->write
1186 * callbacks may be delayed until a non-coalesced MMIO is issued.
1187 * Only useful for IO regions. Roughly similar to write-combining hardware.
1188 *
1189 * @mr: the memory region to be write coalesced
1190 */
1191 void memory_region_set_coalescing(MemoryRegion *mr);
1192
1193 /**
1194 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
1195 * a region.
1196 *
1197 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
1198 * Multiple calls can be issued coalesced disjoint ranges.
1199 *
1200 * @mr: the memory region to be updated.
1201 * @offset: the start of the range within the region to be coalesced.
1202 * @size: the size of the subrange to be coalesced.
1203 */
1204 void memory_region_add_coalescing(MemoryRegion *mr,
1205 hwaddr offset,
1206 uint64_t size);
1207
1208 /**
1209 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
1210 *
1211 * Disables any coalescing caused by memory_region_set_coalescing() or
1212 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
1213 * hardware.
1214 *
1215 * @mr: the memory region to be updated.
1216 */
1217 void memory_region_clear_coalescing(MemoryRegion *mr);
1218
1219 /**
1220 * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
1221 * accesses.
1222 *
1223 * Ensure that pending coalesced MMIO request are flushed before the memory
1224 * region is accessed. This property is automatically enabled for all regions
1225 * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
1226 *
1227 * @mr: the memory region to be updated.
1228 */
1229 void memory_region_set_flush_coalesced(MemoryRegion *mr);
1230
1231 /**
1232 * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
1233 * accesses.
1234 *
1235 * Clear the automatic coalesced MMIO flushing enabled via
1236 * memory_region_set_flush_coalesced. Note that this service has no effect on
1237 * memory regions that have MMIO coalescing enabled for themselves. For them,
1238 * automatic flushing will stop once coalescing is disabled.
1239 *
1240 * @mr: the memory region to be updated.
1241 */
1242 void memory_region_clear_flush_coalesced(MemoryRegion *mr);
1243
1244 /**
1245 * memory_region_set_global_locking: Declares the access processing requires
1246 * QEMU's global lock.
1247 *
1248 * When this is invoked, accesses to the memory region will be processed while
1249 * holding the global lock of QEMU. This is the default behavior of memory
1250 * regions.
1251 *
1252 * @mr: the memory region to be updated.
1253 */
1254 void memory_region_set_global_locking(MemoryRegion *mr);
1255
1256 /**
1257 * memory_region_clear_global_locking: Declares that access processing does
1258 * not depend on the QEMU global lock.
1259 *
1260 * By clearing this property, accesses to the memory region will be processed
1261 * outside of QEMU's global lock (unless the lock is held on when issuing the
1262 * access request). In this case, the device model implementing the access
1263 * handlers is responsible for synchronization of concurrency.
1264 *
1265 * @mr: the memory region to be updated.
1266 */
1267 void memory_region_clear_global_locking(MemoryRegion *mr);
1268
1269 /**
1270 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
1271 * is written to a location.
1272 *
1273 * Marks a word in an IO region (initialized with memory_region_init_io())
1274 * as a trigger for an eventfd event. The I/O callback will not be called.
1275 * The caller must be prepared to handle failure (that is, take the required
1276 * action if the callback _is_ called).
1277 *
1278 * @mr: the memory region being updated.
1279 * @addr: the address within @mr that is to be monitored
1280 * @size: the size of the access to trigger the eventfd
1281 * @match_data: whether to match against @data, instead of just @addr
1282 * @data: the data to match against the guest write
1283 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
1284 **/
1285 void memory_region_add_eventfd(MemoryRegion *mr,
1286 hwaddr addr,
1287 unsigned size,
1288 bool match_data,
1289 uint64_t data,
1290 EventNotifier *e);
1291
1292 /**
1293 * memory_region_del_eventfd: Cancel an eventfd.
1294 *
1295 * Cancels an eventfd trigger requested by a previous
1296 * memory_region_add_eventfd() call.
1297 *
1298 * @mr: the memory region being updated.
1299 * @addr: the address within @mr that is to be monitored
1300 * @size: the size of the access to trigger the eventfd
1301 * @match_data: whether to match against @data, instead of just @addr
1302 * @data: the data to match against the guest write
1303 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
1304 */
1305 void memory_region_del_eventfd(MemoryRegion *mr,
1306 hwaddr addr,
1307 unsigned size,
1308 bool match_data,
1309 uint64_t data,
1310 EventNotifier *e);
1311
1312 /**
1313 * memory_region_add_subregion: Add a subregion to a container.
1314 *
1315 * Adds a subregion at @offset. The subregion may not overlap with other
1316 * subregions (except for those explicitly marked as overlapping). A region
1317 * may only be added once as a subregion (unless removed with
1318 * memory_region_del_subregion()); use memory_region_init_alias() if you
1319 * want a region to be a subregion in multiple locations.
1320 *
1321 * @mr: the region to contain the new subregion; must be a container
1322 * initialized with memory_region_init().
1323 * @offset: the offset relative to @mr where @subregion is added.
1324 * @subregion: the subregion to be added.
1325 */
1326 void memory_region_add_subregion(MemoryRegion *mr,
1327 hwaddr offset,
1328 MemoryRegion *subregion);
1329 /**
1330 * memory_region_add_subregion_overlap: Add a subregion to a container
1331 * with overlap.
1332 *
1333 * Adds a subregion at @offset. The subregion may overlap with other
1334 * subregions. Conflicts are resolved by having a higher @priority hide a
1335 * lower @priority. Subregions without priority are taken as @priority 0.
1336 * A region may only be added once as a subregion (unless removed with
1337 * memory_region_del_subregion()); use memory_region_init_alias() if you
1338 * want a region to be a subregion in multiple locations.
1339 *
1340 * @mr: the region to contain the new subregion; must be a container
1341 * initialized with memory_region_init().
1342 * @offset: the offset relative to @mr where @subregion is added.
1343 * @subregion: the subregion to be added.
1344 * @priority: used for resolving overlaps; highest priority wins.
1345 */
1346 void memory_region_add_subregion_overlap(MemoryRegion *mr,
1347 hwaddr offset,
1348 MemoryRegion *subregion,
1349 int priority);
1350
1351 /**
1352 * memory_region_get_ram_addr: Get the ram address associated with a memory
1353 * region
1354 */
1355 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
1356
1357 uint64_t memory_region_get_alignment(const MemoryRegion *mr);
1358 /**
1359 * memory_region_del_subregion: Remove a subregion.
1360 *
1361 * Removes a subregion from its container.
1362 *
1363 * @mr: the container to be updated.
1364 * @subregion: the region being removed; must be a current subregion of @mr.
1365 */
1366 void memory_region_del_subregion(MemoryRegion *mr,
1367 MemoryRegion *subregion);
1368
1369 /*
1370 * memory_region_set_enabled: dynamically enable or disable a region
1371 *
1372 * Enables or disables a memory region. A disabled memory region
1373 * ignores all accesses to itself and its subregions. It does not
1374 * obscure sibling subregions with lower priority - it simply behaves as
1375 * if it was removed from the hierarchy.
1376 *
1377 * Regions default to being enabled.
1378 *
1379 * @mr: the region to be updated
1380 * @enabled: whether to enable or disable the region
1381 */
1382 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
1383
1384 /*
1385 * memory_region_set_address: dynamically update the address of a region
1386 *
1387 * Dynamically updates the address of a region, relative to its container.
1388 * May be used on regions are currently part of a memory hierarchy.
1389 *
1390 * @mr: the region to be updated
1391 * @addr: new address, relative to container region
1392 */
1393 void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
1394
1395 /*
1396 * memory_region_set_size: dynamically update the size of a region.
1397 *
1398 * Dynamically updates the size of a region.
1399 *
1400 * @mr: the region to be updated
1401 * @size: used size of the region.
1402 */
1403 void memory_region_set_size(MemoryRegion *mr, uint64_t size);
1404
1405 /*
1406 * memory_region_set_alias_offset: dynamically update a memory alias's offset
1407 *
1408 * Dynamically updates the offset into the target region that an alias points
1409 * to, as if the fourth argument to memory_region_init_alias() has changed.
1410 *
1411 * @mr: the #MemoryRegion to be updated; should be an alias.
1412 * @offset: the new offset into the target memory region
1413 */
1414 void memory_region_set_alias_offset(MemoryRegion *mr,
1415 hwaddr offset);
1416
1417 /**
1418 * memory_region_present: checks if an address relative to a @container
1419 * translates into #MemoryRegion within @container
1420 *
1421 * Answer whether a #MemoryRegion within @container covers the address
1422 * @addr.
1423 *
1424 * @container: a #MemoryRegion within which @addr is a relative address
1425 * @addr: the area within @container to be searched
1426 */
1427 bool memory_region_present(MemoryRegion *container, hwaddr addr);
1428
1429 /**
1430 * memory_region_is_mapped: returns true if #MemoryRegion is mapped
1431 * into any address space.
1432 *
1433 * @mr: a #MemoryRegion which should be checked if it's mapped
1434 */
1435 bool memory_region_is_mapped(MemoryRegion *mr);
1436
1437 /**
1438 * memory_region_find: translate an address/size relative to a
1439 * MemoryRegion into a #MemoryRegionSection.
1440 *
1441 * Locates the first #MemoryRegion within @mr that overlaps the range
1442 * given by @addr and @size.
1443 *
1444 * Returns a #MemoryRegionSection that describes a contiguous overlap.
1445 * It will have the following characteristics:
1446 * .@size = 0 iff no overlap was found
1447 * .@mr is non-%NULL iff an overlap was found
1448 *
1449 * Remember that in the return value the @offset_within_region is
1450 * relative to the returned region (in the .@mr field), not to the
1451 * @mr argument.
1452 *
1453 * Similarly, the .@offset_within_address_space is relative to the
1454 * address space that contains both regions, the passed and the
1455 * returned one. However, in the special case where the @mr argument
1456 * has no container (and thus is the root of the address space), the
1457 * following will hold:
1458 * .@offset_within_address_space >= @addr
1459 * .@offset_within_address_space + .@size <= @addr + @size
1460 *
1461 * @mr: a MemoryRegion within which @addr is a relative address
1462 * @addr: start of the area within @as to be searched
1463 * @size: size of the area to be searched
1464 */
1465 MemoryRegionSection memory_region_find(MemoryRegion *mr,
1466 hwaddr addr, uint64_t size);
1467
1468 /**
1469 * memory_global_dirty_log_sync: synchronize the dirty log for all memory
1470 *
1471 * Synchronizes the dirty page log for all address spaces.
1472 */
1473 void memory_global_dirty_log_sync(void);
1474
1475 /**
1476 * memory_region_transaction_begin: Start a transaction.
1477 *
1478 * During a transaction, changes will be accumulated and made visible
1479 * only when the transaction ends (is committed).
1480 */
1481 void memory_region_transaction_begin(void);
1482
1483 /**
1484 * memory_region_transaction_commit: Commit a transaction and make changes
1485 * visible to the guest.
1486 */
1487 void memory_region_transaction_commit(void);
1488
1489 /**
1490 * memory_listener_register: register callbacks to be called when memory
1491 * sections are mapped or unmapped into an address
1492 * space
1493 *
1494 * @listener: an object containing the callbacks to be called
1495 * @filter: if non-%NULL, only regions in this address space will be observed
1496 */
1497 void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
1498
1499 /**
1500 * memory_listener_unregister: undo the effect of memory_listener_register()
1501 *
1502 * @listener: an object containing the callbacks to be removed
1503 */
1504 void memory_listener_unregister(MemoryListener *listener);
1505
1506 /**
1507 * memory_global_dirty_log_start: begin dirty logging for all regions
1508 */
1509 void memory_global_dirty_log_start(void);
1510
1511 /**
1512 * memory_global_dirty_log_stop: end dirty logging for all regions
1513 */
1514 void memory_global_dirty_log_stop(void);
1515
1516 void mtree_info(fprintf_function mon_printf, void *f, bool flatview,
1517 bool dispatch_tree);
1518
1519 /**
1520 * memory_region_request_mmio_ptr: request a pointer to an mmio
1521 * MemoryRegion. If it is possible map a RAM MemoryRegion with this pointer.
1522 * When the device wants to invalidate the pointer it will call
1523 * memory_region_invalidate_mmio_ptr.
1524 *
1525 * @mr: #MemoryRegion to check
1526 * @addr: address within that region
1527 *
1528 * Returns true on success, false otherwise.
1529 */
1530 bool memory_region_request_mmio_ptr(MemoryRegion *mr, hwaddr addr);
1531
1532 /**
1533 * memory_region_invalidate_mmio_ptr: invalidate the pointer to an mmio
1534 * previously requested.
1535 * In the end that means that if something wants to execute from this area it
1536 * will need to request the pointer again.
1537 *
1538 * @mr: #MemoryRegion associated to the pointer.
1539 * @addr: address within that region
1540 * @size: size of that area.
1541 */
1542 void memory_region_invalidate_mmio_ptr(MemoryRegion *mr, hwaddr offset,
1543 unsigned size);
1544
1545 /**
1546 * memory_region_dispatch_read: perform a read directly to the specified
1547 * MemoryRegion.
1548 *
1549 * @mr: #MemoryRegion to access
1550 * @addr: address within that region
1551 * @pval: pointer to uint64_t which the data is written to
1552 * @size: size of the access in bytes
1553 * @attrs: memory transaction attributes to use for the access
1554 */
1555 MemTxResult memory_region_dispatch_read(MemoryRegion *mr,
1556 hwaddr addr,
1557 uint64_t *pval,
1558 unsigned size,
1559 MemTxAttrs attrs);
1560 /**
1561 * memory_region_dispatch_write: perform a write directly to the specified
1562 * MemoryRegion.
1563 *
1564 * @mr: #MemoryRegion to access
1565 * @addr: address within that region
1566 * @data: data to write
1567 * @size: size of the access in bytes
1568 * @attrs: memory transaction attributes to use for the access
1569 */
1570 MemTxResult memory_region_dispatch_write(MemoryRegion *mr,
1571 hwaddr addr,
1572 uint64_t data,
1573 unsigned size,
1574 MemTxAttrs attrs);
1575
1576 /**
1577 * address_space_init: initializes an address space
1578 *
1579 * @as: an uninitialized #AddressSpace
1580 * @root: a #MemoryRegion that routes addresses for the address space
1581 * @name: an address space name. The name is only used for debugging
1582 * output.
1583 */
1584 void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
1585
1586 /**
1587 * address_space_destroy: destroy an address space
1588 *
1589 * Releases all resources associated with an address space. After an address space
1590 * is destroyed, its root memory region (given by address_space_init()) may be destroyed
1591 * as well.
1592 *
1593 * @as: address space to be destroyed
1594 */
1595 void address_space_destroy(AddressSpace *as);
1596
1597 /**
1598 * address_space_rw: read from or write to an address space.
1599 *
1600 * Return a MemTxResult indicating whether the operation succeeded
1601 * or failed (eg unassigned memory, device rejected the transaction,
1602 * IOMMU fault).
1603 *
1604 * @as: #AddressSpace to be accessed
1605 * @addr: address within that address space
1606 * @attrs: memory transaction attributes
1607 * @buf: buffer with the data transferred
1608 * @is_write: indicates the transfer direction
1609 */
1610 MemTxResult address_space_rw(AddressSpace *as, hwaddr addr,
1611 MemTxAttrs attrs, uint8_t *buf,
1612 int len, bool is_write);
1613
1614 /**
1615 * address_space_write: write to address space.
1616 *
1617 * Return a MemTxResult indicating whether the operation succeeded
1618 * or failed (eg unassigned memory, device rejected the transaction,
1619 * IOMMU fault).
1620 *
1621 * @as: #AddressSpace to be accessed
1622 * @addr: address within that address space
1623 * @attrs: memory transaction attributes
1624 * @buf: buffer with the data transferred
1625 */
1626 MemTxResult address_space_write(AddressSpace *as, hwaddr addr,
1627 MemTxAttrs attrs,
1628 const uint8_t *buf, int len);
1629
1630 /* address_space_ld*: load from an address space
1631 * address_space_st*: store to an address space
1632 *
1633 * These functions perform a load or store of the byte, word,
1634 * longword or quad to the specified address within the AddressSpace.
1635 * The _le suffixed functions treat the data as little endian;
1636 * _be indicates big endian; no suffix indicates "same endianness
1637 * as guest CPU".
1638 *
1639 * The "guest CPU endianness" accessors are deprecated for use outside
1640 * target-* code; devices should be CPU-agnostic and use either the LE
1641 * or the BE accessors.
1642 *
1643 * @as #AddressSpace to be accessed
1644 * @addr: address within that address space
1645 * @val: data value, for stores
1646 * @attrs: memory transaction attributes
1647 * @result: location to write the success/failure of the transaction;
1648 * if NULL, this information is discarded
1649 */
1650 uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
1651 MemTxAttrs attrs, MemTxResult *result);
1652 uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
1653 MemTxAttrs attrs, MemTxResult *result);
1654 uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
1655 MemTxAttrs attrs, MemTxResult *result);
1656 uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
1657 MemTxAttrs attrs, MemTxResult *result);
1658 uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
1659 MemTxAttrs attrs, MemTxResult *result);
1660 uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
1661 MemTxAttrs attrs, MemTxResult *result);
1662 uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
1663 MemTxAttrs attrs, MemTxResult *result);
1664 void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
1665 MemTxAttrs attrs, MemTxResult *result);
1666 void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
1667 MemTxAttrs attrs, MemTxResult *result);
1668 void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
1669 MemTxAttrs attrs, MemTxResult *result);
1670 void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
1671 MemTxAttrs attrs, MemTxResult *result);
1672 void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
1673 MemTxAttrs attrs, MemTxResult *result);
1674 void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
1675 MemTxAttrs attrs, MemTxResult *result);
1676 void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
1677 MemTxAttrs attrs, MemTxResult *result);
1678
1679 uint32_t ldub_phys(AddressSpace *as, hwaddr addr);
1680 uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr);
1681 uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr);
1682 uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr);
1683 uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr);
1684 uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr);
1685 uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr);
1686 void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1687 void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1688 void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1689 void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1690 void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val);
1691 void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val);
1692 void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val);
1693
1694 struct MemoryRegionCache {
1695 hwaddr xlat;
1696 hwaddr len;
1697 AddressSpace *as;
1698 };
1699
1700 #define MEMORY_REGION_CACHE_INVALID ((MemoryRegionCache) { .as = NULL })
1701
1702 /* address_space_cache_init: prepare for repeated access to a physical
1703 * memory region
1704 *
1705 * @cache: #MemoryRegionCache to be filled
1706 * @as: #AddressSpace to be accessed
1707 * @addr: address within that address space
1708 * @len: length of buffer
1709 * @is_write: indicates the transfer direction
1710 *
1711 * Will only work with RAM, and may map a subset of the requested range by
1712 * returning a value that is less than @len. On failure, return a negative
1713 * errno value.
1714 *
1715 * Because it only works with RAM, this function can be used for
1716 * read-modify-write operations. In this case, is_write should be %true.
1717 *
1718 * Note that addresses passed to the address_space_*_cached functions
1719 * are relative to @addr.
1720 */
1721 int64_t address_space_cache_init(MemoryRegionCache *cache,
1722 AddressSpace *as,
1723 hwaddr addr,
1724 hwaddr len,
1725 bool is_write);
1726
1727 /**
1728 * address_space_cache_invalidate: complete a write to a #MemoryRegionCache
1729 *
1730 * @cache: The #MemoryRegionCache to operate on.
1731 * @addr: The first physical address that was written, relative to the
1732 * address that was passed to @address_space_cache_init.
1733 * @access_len: The number of bytes that were written starting at @addr.
1734 */
1735 void address_space_cache_invalidate(MemoryRegionCache *cache,
1736 hwaddr addr,
1737 hwaddr access_len);
1738
1739 /**
1740 * address_space_cache_destroy: free a #MemoryRegionCache
1741 *
1742 * @cache: The #MemoryRegionCache whose memory should be released.
1743 */
1744 void address_space_cache_destroy(MemoryRegionCache *cache);
1745
1746 /* address_space_ld*_cached: load from a cached #MemoryRegion
1747 * address_space_st*_cached: store into a cached #MemoryRegion
1748 *
1749 * These functions perform a load or store of the byte, word,
1750 * longword or quad to the specified address. The address is
1751 * a physical address in the AddressSpace, but it must lie within
1752 * a #MemoryRegion that was mapped with address_space_cache_init.
1753 *
1754 * The _le suffixed functions treat the data as little endian;
1755 * _be indicates big endian; no suffix indicates "same endianness
1756 * as guest CPU".
1757 *
1758 * The "guest CPU endianness" accessors are deprecated for use outside
1759 * target-* code; devices should be CPU-agnostic and use either the LE
1760 * or the BE accessors.
1761 *
1762 * @cache: previously initialized #MemoryRegionCache to be accessed
1763 * @addr: address within the address space
1764 * @val: data value, for stores
1765 * @attrs: memory transaction attributes
1766 * @result: location to write the success/failure of the transaction;
1767 * if NULL, this information is discarded
1768 */
1769 uint32_t address_space_ldub_cached(MemoryRegionCache *cache, hwaddr addr,
1770 MemTxAttrs attrs, MemTxResult *result);
1771 uint32_t address_space_lduw_le_cached(MemoryRegionCache *cache, hwaddr addr,
1772 MemTxAttrs attrs, MemTxResult *result);
1773 uint32_t address_space_lduw_be_cached(MemoryRegionCache *cache, hwaddr addr,
1774 MemTxAttrs attrs, MemTxResult *result);
1775 uint32_t address_space_ldl_le_cached(MemoryRegionCache *cache, hwaddr addr,
1776 MemTxAttrs attrs, MemTxResult *result);
1777 uint32_t address_space_ldl_be_cached(MemoryRegionCache *cache, hwaddr addr,
1778 MemTxAttrs attrs, MemTxResult *result);
1779 uint64_t address_space_ldq_le_cached(MemoryRegionCache *cache, hwaddr addr,
1780 MemTxAttrs attrs, MemTxResult *result);
1781 uint64_t address_space_ldq_be_cached(MemoryRegionCache *cache, hwaddr addr,
1782 MemTxAttrs attrs, MemTxResult *result);
1783 void address_space_stb_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1784 MemTxAttrs attrs, MemTxResult *result);
1785 void address_space_stw_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1786 MemTxAttrs attrs, MemTxResult *result);
1787 void address_space_stw_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1788 MemTxAttrs attrs, MemTxResult *result);
1789 void address_space_stl_le_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1790 MemTxAttrs attrs, MemTxResult *result);
1791 void address_space_stl_be_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
1792 MemTxAttrs attrs, MemTxResult *result);
1793 void address_space_stq_le_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val,
1794 MemTxAttrs attrs, MemTxResult *result);
1795 void address_space_stq_be_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val,
1796 MemTxAttrs attrs, MemTxResult *result);
1797
1798 uint32_t ldub_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1799 uint32_t lduw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1800 uint32_t lduw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1801 uint32_t ldl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1802 uint32_t ldl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1803 uint64_t ldq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1804 uint64_t ldq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr);
1805 void stb_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1806 void stw_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1807 void stw_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1808 void stl_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1809 void stl_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
1810 void stq_le_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
1811 void stq_be_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
1812 /* address_space_get_iotlb_entry: translate an address into an IOTLB
1813 * entry. Should be called from an RCU critical section.
1814 */
1815 IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
1816 bool is_write);
1817
1818 /* address_space_translate: translate an address range into an address space
1819 * into a MemoryRegion and an address range into that section. Should be
1820 * called from an RCU critical section, to avoid that the last reference
1821 * to the returned region disappears after address_space_translate returns.
1822 *
1823 * @as: #AddressSpace to be accessed
1824 * @addr: address within that address space
1825 * @xlat: pointer to address within the returned memory region section's
1826 * #MemoryRegion.
1827 * @len: pointer to length
1828 * @is_write: indicates the transfer direction
1829 */
1830 MemoryRegion *flatview_translate(FlatView *fv,
1831 hwaddr addr, hwaddr *xlat,
1832 hwaddr *len, bool is_write);
1833
1834 static inline MemoryRegion *address_space_translate(AddressSpace *as,
1835 hwaddr addr, hwaddr *xlat,
1836 hwaddr *len, bool is_write)
1837 {
1838 return flatview_translate(address_space_to_flatview(as),
1839 addr, xlat, len, is_write);
1840 }
1841
1842 /* address_space_access_valid: check for validity of accessing an address
1843 * space range
1844 *
1845 * Check whether memory is assigned to the given address space range, and
1846 * access is permitted by any IOMMU regions that are active for the address
1847 * space.
1848 *
1849 * For now, addr and len should be aligned to a page size. This limitation
1850 * will be lifted in the future.
1851 *
1852 * @as: #AddressSpace to be accessed
1853 * @addr: address within that address space
1854 * @len: length of the area to be checked
1855 * @is_write: indicates the transfer direction
1856 */
1857 bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write);
1858
1859 /* address_space_map: map a physical memory region into a host virtual address
1860 *
1861 * May map a subset of the requested range, given by and returned in @plen.
1862 * May return %NULL if resources needed to perform the mapping are exhausted.
1863 * Use only for reads OR writes - not for read-modify-write operations.
1864 * Use cpu_register_map_client() to know when retrying the map operation is
1865 * likely to succeed.
1866 *
1867 * @as: #AddressSpace to be accessed
1868 * @addr: address within that address space
1869 * @plen: pointer to length of buffer; updated on return
1870 * @is_write: indicates the transfer direction
1871 */
1872 void *address_space_map(AddressSpace *as, hwaddr addr,
1873 hwaddr *plen, bool is_write);
1874
1875 /* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
1876 *
1877 * Will also mark the memory as dirty if @is_write == %true. @access_len gives
1878 * the amount of memory that was actually read or written by the caller.
1879 *
1880 * @as: #AddressSpace used
1881 * @addr: address within that address space
1882 * @len: buffer length as returned by address_space_map()
1883 * @access_len: amount of data actually transferred
1884 * @is_write: indicates the transfer direction
1885 */
1886 void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
1887 int is_write, hwaddr access_len);
1888
1889
1890 /* Internal functions, part of the implementation of address_space_read. */
1891 MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr,
1892 MemTxAttrs attrs, uint8_t *buf,
1893 int len, hwaddr addr1, hwaddr l,
1894 MemoryRegion *mr);
1895
1896 MemTxResult flatview_read_full(FlatView *fv, hwaddr addr,
1897 MemTxAttrs attrs, uint8_t *buf, int len);
1898 void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr);
1899
1900 static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
1901 {
1902 if (is_write) {
1903 return memory_region_is_ram(mr) &&
1904 !mr->readonly && !memory_region_is_ram_device(mr);
1905 } else {
1906 return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) ||
1907 memory_region_is_romd(mr);
1908 }
1909 }
1910
1911 /**
1912 * address_space_read: read from an address space.
1913 *
1914 * Return a MemTxResult indicating whether the operation succeeded
1915 * or failed (eg unassigned memory, device rejected the transaction,
1916 * IOMMU fault).
1917 *
1918 * @as: #AddressSpace to be accessed
1919 * @addr: address within that address space
1920 * @attrs: memory transaction attributes
1921 * @buf: buffer with the data transferred
1922 */
1923 static inline __attribute__((__always_inline__))
1924 MemTxResult flatview_read(FlatView *fv, hwaddr addr, MemTxAttrs attrs,
1925 uint8_t *buf, int len)
1926 {
1927 MemTxResult result = MEMTX_OK;
1928 hwaddr l, addr1;
1929 void *ptr;
1930 MemoryRegion *mr;
1931
1932 if (__builtin_constant_p(len)) {
1933 if (len) {
1934 rcu_read_lock();
1935 l = len;
1936 mr = flatview_translate(fv, addr, &addr1, &l, false);
1937 if (len == l && memory_access_is_direct(mr, false)) {
1938 ptr = qemu_map_ram_ptr(mr->ram_block, addr1);
1939 memcpy(buf, ptr, len);
1940 } else {
1941 result = flatview_read_continue(fv, addr, attrs, buf, len,
1942 addr1, l, mr);
1943 }
1944 rcu_read_unlock();
1945 }
1946 } else {
1947 result = flatview_read_full(fv, addr, attrs, buf, len);
1948 }
1949 return result;
1950 }
1951
1952 static inline MemTxResult address_space_read(AddressSpace *as, hwaddr addr,
1953 MemTxAttrs attrs, uint8_t *buf,
1954 int len)
1955 {
1956 return flatview_read(address_space_to_flatview(as), addr, attrs, buf, len);
1957 }
1958
1959 /**
1960 * address_space_read_cached: read from a cached RAM region
1961 *
1962 * @cache: Cached region to be addressed
1963 * @addr: address relative to the base of the RAM region
1964 * @buf: buffer with the data transferred
1965 * @len: length of the data transferred
1966 */
1967 static inline void
1968 address_space_read_cached(MemoryRegionCache *cache, hwaddr addr,
1969 void *buf, int len)
1970 {
1971 assert(addr < cache->len && len <= cache->len - addr);
1972 address_space_read(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len);
1973 }
1974
1975 /**
1976 * address_space_write_cached: write to a cached RAM region
1977 *
1978 * @cache: Cached region to be addressed
1979 * @addr: address relative to the base of the RAM region
1980 * @buf: buffer with the data transferred
1981 * @len: length of the data transferred
1982 */
1983 static inline void
1984 address_space_write_cached(MemoryRegionCache *cache, hwaddr addr,
1985 void *buf, int len)
1986 {
1987 assert(addr < cache->len && len <= cache->len - addr);
1988 address_space_write(cache->as, cache->xlat + addr, MEMTXATTRS_UNSPECIFIED, buf, len);
1989 }
1990
1991 #endif
1992
1993 #endif