]> git.proxmox.com Git - qemu.git/blob - memory.h
vmstate, memory: decouple vmstate from memory API
[qemu.git] / memory.h
1 /*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef MEMORY_H
15 #define MEMORY_H
16
17 #ifndef CONFIG_USER_ONLY
18
19 #include <stdint.h>
20 #include <stdbool.h>
21 #include "qemu-common.h"
22 #include "cpu-common.h"
23 #include "targphys.h"
24 #include "qemu-queue.h"
25 #include "iorange.h"
26 #include "ioport.h"
27 #include "int128.h"
28
29 typedef struct MemoryRegionOps MemoryRegionOps;
30 typedef struct MemoryRegion MemoryRegion;
31 typedef struct MemoryRegionPortio MemoryRegionPortio;
32 typedef struct MemoryRegionMmio MemoryRegionMmio;
33
34 /* Must match *_DIRTY_FLAGS in cpu-all.h. To be replaced with dynamic
35 * registration.
36 */
37 #define DIRTY_MEMORY_VGA 0
38 #define DIRTY_MEMORY_CODE 1
39 #define DIRTY_MEMORY_MIGRATION 3
40
41 struct MemoryRegionMmio {
42 CPUReadMemoryFunc *read[3];
43 CPUWriteMemoryFunc *write[3];
44 };
45
46 /*
47 * Memory region callbacks
48 */
49 struct MemoryRegionOps {
50 /* Read from the memory region. @addr is relative to @mr; @size is
51 * in bytes. */
52 uint64_t (*read)(void *opaque,
53 target_phys_addr_t addr,
54 unsigned size);
55 /* Write to the memory region. @addr is relative to @mr; @size is
56 * in bytes. */
57 void (*write)(void *opaque,
58 target_phys_addr_t addr,
59 uint64_t data,
60 unsigned size);
61
62 enum device_endian endianness;
63 /* Guest-visible constraints: */
64 struct {
65 /* If nonzero, specify bounds on access sizes beyond which a machine
66 * check is thrown.
67 */
68 unsigned min_access_size;
69 unsigned max_access_size;
70 /* If true, unaligned accesses are supported. Otherwise unaligned
71 * accesses throw machine checks.
72 */
73 bool unaligned;
74 /*
75 * If present, and returns #false, the transaction is not accepted
76 * by the device (and results in machine dependent behaviour such
77 * as a machine check exception).
78 */
79 bool (*accepts)(void *opaque, target_phys_addr_t addr,
80 unsigned size, bool is_write);
81 } valid;
82 /* Internal implementation constraints: */
83 struct {
84 /* If nonzero, specifies the minimum size implemented. Smaller sizes
85 * will be rounded upwards and a partial result will be returned.
86 */
87 unsigned min_access_size;
88 /* If nonzero, specifies the maximum size implemented. Larger sizes
89 * will be done as a series of accesses with smaller sizes.
90 */
91 unsigned max_access_size;
92 /* If true, unaligned accesses are supported. Otherwise all accesses
93 * are converted to (possibly multiple) naturally aligned accesses.
94 */
95 bool unaligned;
96 } impl;
97
98 /* If .read and .write are not present, old_portio may be used for
99 * backwards compatibility with old portio registration
100 */
101 const MemoryRegionPortio *old_portio;
102 /* If .read and .write are not present, old_mmio may be used for
103 * backwards compatibility with old mmio registration
104 */
105 const MemoryRegionMmio old_mmio;
106 };
107
108 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
109 typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
110
111 struct MemoryRegion {
112 /* All fields are private - violators will be prosecuted */
113 const MemoryRegionOps *ops;
114 void *opaque;
115 MemoryRegion *parent;
116 Int128 size;
117 target_phys_addr_t addr;
118 target_phys_addr_t offset;
119 bool backend_registered;
120 void (*destructor)(MemoryRegion *mr);
121 ram_addr_t ram_addr;
122 IORange iorange;
123 bool terminates;
124 bool readable;
125 bool ram;
126 bool readonly; /* For RAM regions */
127 bool enabled;
128 MemoryRegion *alias;
129 target_phys_addr_t alias_offset;
130 unsigned priority;
131 bool may_overlap;
132 QTAILQ_HEAD(subregions, MemoryRegion) subregions;
133 QTAILQ_ENTRY(MemoryRegion) subregions_link;
134 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
135 const char *name;
136 uint8_t dirty_log_mask;
137 unsigned ioeventfd_nb;
138 MemoryRegionIoeventfd *ioeventfds;
139 };
140
141 struct MemoryRegionPortio {
142 uint32_t offset;
143 uint32_t len;
144 unsigned size;
145 IOPortReadFunc *read;
146 IOPortWriteFunc *write;
147 };
148
149 #define PORTIO_END_OF_LIST() { }
150
151 typedef struct MemoryRegionSection MemoryRegionSection;
152
153 /**
154 * MemoryRegionSection: describes a fragment of a #MemoryRegion
155 *
156 * @mr: the region, or %NULL if empty
157 * @address_space: the address space the region is mapped in
158 * @offset_within_region: the beginning of the section, relative to @mr's start
159 * @size: the size of the section; will not exceed @mr's boundaries
160 * @offset_within_address_space: the address of the first byte of the section
161 * relative to the region's address space
162 */
163 struct MemoryRegionSection {
164 MemoryRegion *mr;
165 MemoryRegion *address_space;
166 target_phys_addr_t offset_within_region;
167 uint64_t size;
168 target_phys_addr_t offset_within_address_space;
169 };
170
171 typedef struct MemoryListener MemoryListener;
172
173 /**
174 * MemoryListener: callbacks structure for updates to the physical memory map
175 *
176 * Allows a component to adjust to changes in the guest-visible memory map.
177 * Use with memory_listener_register() and memory_listener_unregister().
178 */
179 struct MemoryListener {
180 void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
181 void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
182 void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
183 void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
184 void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
185 void (*log_global_start)(MemoryListener *listener);
186 void (*log_global_stop)(MemoryListener *listener);
187 QLIST_ENTRY(MemoryListener) link;
188 };
189
190 /**
191 * memory_region_init: Initialize a memory region
192 *
193 * The region typically acts as a container for other memory regions. Use
194 * memory_region_add_subregion() to add subregions.
195 *
196 * @mr: the #MemoryRegion to be initialized
197 * @name: used for debugging; not visible to the user or ABI
198 * @size: size of the region; any subregions beyond this size will be clipped
199 */
200 void memory_region_init(MemoryRegion *mr,
201 const char *name,
202 uint64_t size);
203 /**
204 * memory_region_init_io: Initialize an I/O memory region.
205 *
206 * Accesses into the region will cause the callbacks in @ops to be called.
207 * if @size is nonzero, subregions will be clipped to @size.
208 *
209 * @mr: the #MemoryRegion to be initialized.
210 * @ops: a structure containing read and write callbacks to be used when
211 * I/O is performed on the region.
212 * @opaque: passed to to the read and write callbacks of the @ops structure.
213 * @name: used for debugging; not visible to the user or ABI
214 * @size: size of the region.
215 */
216 void memory_region_init_io(MemoryRegion *mr,
217 const MemoryRegionOps *ops,
218 void *opaque,
219 const char *name,
220 uint64_t size);
221
222 /**
223 * memory_region_init_ram: Initialize RAM memory region. Accesses into the
224 * region will modify memory directly.
225 *
226 * @mr: the #MemoryRegion to be initialized.
227 * @name: the name of the region.
228 * @size: size of the region.
229 */
230 void memory_region_init_ram(MemoryRegion *mr,
231 const char *name,
232 uint64_t size);
233
234 /**
235 * memory_region_init_ram: Initialize RAM memory region from a user-provided.
236 * pointer. Accesses into the region will modify
237 * memory directly.
238 *
239 * @mr: the #MemoryRegion to be initialized.
240 * @name: the name of the region.
241 * @size: size of the region.
242 * @ptr: memory to be mapped; must contain at least @size bytes.
243 */
244 void memory_region_init_ram_ptr(MemoryRegion *mr,
245 const char *name,
246 uint64_t size,
247 void *ptr);
248
249 /**
250 * memory_region_init_alias: Initialize a memory region that aliases all or a
251 * part of another memory region.
252 *
253 * @mr: the #MemoryRegion to be initialized.
254 * @name: used for debugging; not visible to the user or ABI
255 * @orig: the region to be referenced; @mr will be equivalent to
256 * @orig between @offset and @offset + @size - 1.
257 * @offset: start of the section in @orig to be referenced.
258 * @size: size of the region.
259 */
260 void memory_region_init_alias(MemoryRegion *mr,
261 const char *name,
262 MemoryRegion *orig,
263 target_phys_addr_t offset,
264 uint64_t size);
265
266 /**
267 * memory_region_init_rom_device: Initialize a ROM memory region. Writes are
268 * handled via callbacks.
269 *
270 * @mr: the #MemoryRegion to be initialized.
271 * @ops: callbacks for write access handling.
272 * @name: the name of the region.
273 * @size: size of the region.
274 */
275 void memory_region_init_rom_device(MemoryRegion *mr,
276 const MemoryRegionOps *ops,
277 void *opaque,
278 const char *name,
279 uint64_t size);
280
281 /**
282 * memory_region_destroy: Destroy a memory region and reclaim all resources.
283 *
284 * @mr: the region to be destroyed. May not currently be a subregion
285 * (see memory_region_add_subregion()) or referenced in an alias
286 * (see memory_region_init_alias()).
287 */
288 void memory_region_destroy(MemoryRegion *mr);
289
290 /**
291 * memory_region_size: get a memory region's size.
292 *
293 * @mr: the memory region being queried.
294 */
295 uint64_t memory_region_size(MemoryRegion *mr);
296
297 /**
298 * memory_region_is_ram: check whether a memory region is random access
299 *
300 * Returns %true is a memory region is random access.
301 *
302 * @mr: the memory region being queried
303 */
304 bool memory_region_is_ram(MemoryRegion *mr);
305
306 /**
307 * memory_region_name: get a memory region's name
308 *
309 * Returns the string that was used to initialize the memory region.
310 *
311 * @mr: the memory region being queried
312 */
313 const char *memory_region_name(MemoryRegion *mr);
314
315 /**
316 * memory_region_is_logging: return whether a memory region is logging writes
317 *
318 * Returns %true if the memory region is logging writes
319 *
320 * @mr: the memory region being queried
321 */
322 bool memory_region_is_logging(MemoryRegion *mr);
323
324 /**
325 * memory_region_is_rom: check whether a memory region is ROM
326 *
327 * Returns %true is a memory region is read-only memory.
328 *
329 * @mr: the memory region being queried
330 */
331 bool memory_region_is_rom(MemoryRegion *mr);
332
333 /**
334 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
335 *
336 * Returns a host pointer to a RAM memory region (created with
337 * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
338 * care.
339 *
340 * @mr: the memory region being queried.
341 */
342 void *memory_region_get_ram_ptr(MemoryRegion *mr);
343
344 /**
345 * memory_region_set_offset: Sets an offset to be added to MemoryRegionOps
346 * callbacks.
347 *
348 * This function is deprecated and should not be used in new code.
349 */
350 void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset);
351
352 /**
353 * memory_region_set_log: Turn dirty logging on or off for a region.
354 *
355 * Turns dirty logging on or off for a specified client (display, migration).
356 * Only meaningful for RAM regions.
357 *
358 * @mr: the memory region being updated.
359 * @log: whether dirty logging is to be enabled or disabled.
360 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
361 * %DIRTY_MEMORY_VGA.
362 */
363 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
364
365 /**
366 * memory_region_get_dirty: Check whether a page is dirty for a specified
367 * client.
368 *
369 * Checks whether a page has been written to since the last
370 * call to memory_region_reset_dirty() with the same @client. Dirty logging
371 * must be enabled.
372 *
373 * @mr: the memory region being queried.
374 * @addr: the address (relative to the start of the region) being queried.
375 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
376 * %DIRTY_MEMORY_VGA.
377 */
378 bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
379 unsigned client);
380
381 /**
382 * memory_region_set_dirty: Mark a page as dirty in a memory region.
383 *
384 * Marks a page as dirty, after it has been dirtied outside guest code.
385 *
386 * @mr: the memory region being queried.
387 * @addr: the address (relative to the start of the region) being dirtied.
388 */
389 void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr);
390
391 /**
392 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
393 * any external TLBs (e.g. kvm)
394 *
395 * Flushes dirty information from accelerators such as kvm and vhost-net
396 * and makes it available to users of the memory API.
397 *
398 * @mr: the region being flushed.
399 */
400 void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
401
402 /**
403 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
404 * client.
405 *
406 * Marks a range of pages as no longer dirty.
407 *
408 * @mr: the region being updated.
409 * @addr: the start of the subrange being cleaned.
410 * @size: the size of the subrange being cleaned.
411 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
412 * %DIRTY_MEMORY_VGA.
413 */
414 void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
415 target_phys_addr_t size, unsigned client);
416
417 /**
418 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
419 *
420 * Allows a memory region to be marked as read-only (turning it into a ROM).
421 * only useful on RAM regions.
422 *
423 * @mr: the region being updated.
424 * @readonly: whether rhe region is to be ROM or RAM.
425 */
426 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
427
428 /**
429 * memory_region_rom_device_set_readable: enable/disable ROM readability
430 *
431 * Allows a ROM device (initialized with memory_region_init_rom_device() to
432 * to be marked as readable (default) or not readable. When it is readable,
433 * the device is mapped to guest memory. When not readable, reads are
434 * forwarded to the #MemoryRegion.read function.
435 *
436 * @mr: the memory region to be updated
437 * @readable: whether reads are satisified directly (%true) or via callbacks
438 * (%false)
439 */
440 void memory_region_rom_device_set_readable(MemoryRegion *mr, bool readable);
441
442 /**
443 * memory_region_set_coalescing: Enable memory coalescing for the region.
444 *
445 * Enabled writes to a region to be queued for later processing. MMIO ->write
446 * callbacks may be delayed until a non-coalesced MMIO is issued.
447 * Only useful for IO regions. Roughly similar to write-combining hardware.
448 *
449 * @mr: the memory region to be write coalesced
450 */
451 void memory_region_set_coalescing(MemoryRegion *mr);
452
453 /**
454 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
455 * a region.
456 *
457 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
458 * Multiple calls can be issued coalesced disjoint ranges.
459 *
460 * @mr: the memory region to be updated.
461 * @offset: the start of the range within the region to be coalesced.
462 * @size: the size of the subrange to be coalesced.
463 */
464 void memory_region_add_coalescing(MemoryRegion *mr,
465 target_phys_addr_t offset,
466 uint64_t size);
467
468 /**
469 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
470 *
471 * Disables any coalescing caused by memory_region_set_coalescing() or
472 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
473 * hardware.
474 *
475 * @mr: the memory region to be updated.
476 */
477 void memory_region_clear_coalescing(MemoryRegion *mr);
478
479 /**
480 * memory_region_add_eventfd: Request an eventfd to be triggered when a word
481 * is written to a location.
482 *
483 * Marks a word in an IO region (initialized with memory_region_init_io())
484 * as a trigger for an eventfd event. The I/O callback will not be called.
485 * The caller must be prepared to handle failure (that is, take the required
486 * action if the callback _is_ called).
487 *
488 * @mr: the memory region being updated.
489 * @addr: the address within @mr that is to be monitored
490 * @size: the size of the access to trigger the eventfd
491 * @match_data: whether to match against @data, instead of just @addr
492 * @data: the data to match against the guest write
493 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
494 **/
495 void memory_region_add_eventfd(MemoryRegion *mr,
496 target_phys_addr_t addr,
497 unsigned size,
498 bool match_data,
499 uint64_t data,
500 int fd);
501
502 /**
503 * memory_region_del_eventfd: Cancel an eventfd.
504 *
505 * Cancels an eventfd trigger requested by a previous
506 * memory_region_add_eventfd() call.
507 *
508 * @mr: the memory region being updated.
509 * @addr: the address within @mr that is to be monitored
510 * @size: the size of the access to trigger the eventfd
511 * @match_data: whether to match against @data, instead of just @addr
512 * @data: the data to match against the guest write
513 * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
514 */
515 void memory_region_del_eventfd(MemoryRegion *mr,
516 target_phys_addr_t addr,
517 unsigned size,
518 bool match_data,
519 uint64_t data,
520 int fd);
521 /**
522 * memory_region_add_subregion: Add a subregion to a container.
523 *
524 * Adds a subregion at @offset. The subregion may not overlap with other
525 * subregions (except for those explicitly marked as overlapping). A region
526 * may only be added once as a subregion (unless removed with
527 * memory_region_del_subregion()); use memory_region_init_alias() if you
528 * want a region to be a subregion in multiple locations.
529 *
530 * @mr: the region to contain the new subregion; must be a container
531 * initialized with memory_region_init().
532 * @offset: the offset relative to @mr where @subregion is added.
533 * @subregion: the subregion to be added.
534 */
535 void memory_region_add_subregion(MemoryRegion *mr,
536 target_phys_addr_t offset,
537 MemoryRegion *subregion);
538 /**
539 * memory_region_add_subregion: Add a subregion to a container, with overlap.
540 *
541 * Adds a subregion at @offset. The subregion may overlap with other
542 * subregions. Conflicts are resolved by having a higher @priority hide a
543 * lower @priority. Subregions without priority are taken as @priority 0.
544 * A region may only be added once as a subregion (unless removed with
545 * memory_region_del_subregion()); use memory_region_init_alias() if you
546 * want a region to be a subregion in multiple locations.
547 *
548 * @mr: the region to contain the new subregion; must be a container
549 * initialized with memory_region_init().
550 * @offset: the offset relative to @mr where @subregion is added.
551 * @subregion: the subregion to be added.
552 * @priority: used for resolving overlaps; highest priority wins.
553 */
554 void memory_region_add_subregion_overlap(MemoryRegion *mr,
555 target_phys_addr_t offset,
556 MemoryRegion *subregion,
557 unsigned priority);
558
559 /**
560 * memory_region_get_ram_addr: Get the ram address associated with a memory
561 * region
562 *
563 * DO NOT USE THIS FUCNTION. This is a temporary workaround while the Xen
564 * code is being reworked.
565 */
566 ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
567
568 /**
569 * memory_region_del_subregion: Remove a subregion.
570 *
571 * Removes a subregion from its container.
572 *
573 * @mr: the container to be updated.
574 * @subregion: the region being removed; must be a current subregion of @mr.
575 */
576 void memory_region_del_subregion(MemoryRegion *mr,
577 MemoryRegion *subregion);
578
579 /*
580 * memory_region_set_enabled: dynamically enable or disable a region
581 *
582 * Enables or disables a memory region. A disabled memory region
583 * ignores all accesses to itself and its subregions. It does not
584 * obscure sibling subregions with lower priority - it simply behaves as
585 * if it was removed from the hierarchy.
586 *
587 * Regions default to being enabled.
588 *
589 * @mr: the region to be updated
590 * @enabled: whether to enable or disable the region
591 */
592 void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
593
594 /*
595 * memory_region_set_address: dynamically update the address of a region
596 *
597 * Dynamically updates the address of a region, relative to its parent.
598 * May be used on regions are currently part of a memory hierarchy.
599 *
600 * @mr: the region to be updated
601 * @addr: new address, relative to parent region
602 */
603 void memory_region_set_address(MemoryRegion *mr, target_phys_addr_t addr);
604
605 /*
606 * memory_region_set_alias_offset: dynamically update a memory alias's offset
607 *
608 * Dynamically updates the offset into the target region that an alias points
609 * to, as if the fourth argument to memory_region_init_alias() has changed.
610 *
611 * @mr: the #MemoryRegion to be updated; should be an alias.
612 * @offset: the new offset into the target memory region
613 */
614 void memory_region_set_alias_offset(MemoryRegion *mr,
615 target_phys_addr_t offset);
616
617 /**
618 * memory_region_find: locate a MemoryRegion in an address space
619 *
620 * Locates the first #MemoryRegion within an address space given by
621 * @address_space that overlaps the range given by @addr and @size.
622 *
623 * Returns a #MemoryRegionSection that describes a contiguous overlap.
624 * It will have the following characteristics:
625 * .@offset_within_address_space >= @addr
626 * .@offset_within_address_space + .@size <= @addr + @size
627 * .@size = 0 iff no overlap was found
628 * .@mr is non-%NULL iff an overlap was found
629 *
630 * @address_space: a top-level (i.e. parentless) region that contains
631 * the region to be found
632 * @addr: start of the area within @address_space to be searched
633 * @size: size of the area to be searched
634 */
635 MemoryRegionSection memory_region_find(MemoryRegion *address_space,
636 target_phys_addr_t addr, uint64_t size);
637
638
639 /**
640 * memory_global_sync_dirty_bitmap: synchronize the dirty log for all memory
641 *
642 * Synchronizes the dirty page log for an entire address space.
643 * @address_space: a top-level (i.e. parentless) region that contains the
644 * memory being synchronized
645 */
646 void memory_global_sync_dirty_bitmap(MemoryRegion *address_space);
647
648 /**
649 * memory_region_transaction_begin: Start a transaction.
650 *
651 * During a transaction, changes will be accumulated and made visible
652 * only when the transaction ends (is commited).
653 */
654 void memory_region_transaction_begin(void);
655
656 /**
657 * memory_region_transaction_commit: Commit a transaction and make changes
658 * visible to the guest.
659 */
660 void memory_region_transaction_commit(void);
661
662 /**
663 * memory_listener_register: register callbacks to be called when memory
664 * sections are mapped or unmapped into an address
665 * space
666 *
667 * @listener: an object containing the callbacks to be called
668 */
669 void memory_listener_register(MemoryListener *listener);
670
671 /**
672 * memory_listener_unregister: undo the effect of memory_listener_register()
673 *
674 * @listener: an object containing the callbacks to be removed
675 */
676 void memory_listener_unregister(MemoryListener *listener);
677
678 /**
679 * memory_global_dirty_log_start: begin dirty logging for all regions
680 */
681 void memory_global_dirty_log_start(void);
682
683 /**
684 * memory_global_dirty_log_stop: begin dirty logging for all regions
685 */
686 void memory_global_dirty_log_stop(void);
687
688 void mtree_info(fprintf_function mon_printf, void *f);
689
690 #endif
691
692 #endif