]> git.proxmox.com Git - mirror_qemu.git/blob - memory.h
memory: rename MemoryRegion::has_ram_addr to ::terminates
[mirror_qemu.git] / memory.h
1 /*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef MEMORY_H
15 #define MEMORY_H
16
17 #ifndef CONFIG_USER_ONLY
18
19 #include <stdint.h>
20 #include <stdbool.h>
21 #include "qemu-common.h"
22 #include "cpu-common.h"
23 #include "targphys.h"
24 #include "qemu-queue.h"
25
26 typedef struct MemoryRegionOps MemoryRegionOps;
27 typedef struct MemoryRegion MemoryRegion;
28
29 /* Must match *_DIRTY_FLAGS in cpu-all.h. To be replaced with dynamic
30 * registration.
31 */
32 #define DIRTY_MEMORY_VGA 0
33 #define DIRTY_MEMORY_CODE 1
34 #define DIRTY_MEMORY_MIGRATION 3
35
36 /*
37 * Memory region callbacks
38 */
39 struct MemoryRegionOps {
40 /* Read from the memory region. @addr is relative to @mr; @size is
41 * in bytes. */
42 uint64_t (*read)(void *opaque,
43 target_phys_addr_t addr,
44 unsigned size);
45 /* Write to the memory region. @addr is relative to @mr; @size is
46 * in bytes. */
47 void (*write)(void *opaque,
48 target_phys_addr_t addr,
49 uint64_t data,
50 unsigned size);
51
52 enum device_endian endianness;
53 /* Guest-visible constraints: */
54 struct {
55 /* If nonzero, specify bounds on access sizes beyond which a machine
56 * check is thrown.
57 */
58 unsigned min_access_size;
59 unsigned max_access_size;
60 /* If true, unaligned accesses are supported. Otherwise unaligned
61 * accesses throw machine checks.
62 */
63 bool unaligned;
64 } valid;
65 /* Internal implementation constraints: */
66 struct {
67 /* If nonzero, specifies the minimum size implemented. Smaller sizes
68 * will be rounded upwards and a partial result will be returned.
69 */
70 unsigned min_access_size;
71 /* If nonzero, specifies the maximum size implemented. Larger sizes
72 * will be done as a series of accesses with smaller sizes.
73 */
74 unsigned max_access_size;
75 /* If true, unaligned accesses are supported. Otherwise all accesses
76 * are converted to (possibly multiple) naturally aligned accesses.
77 */
78 bool unaligned;
79 } impl;
80 };
81
82 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
83
84 struct MemoryRegion {
85 /* All fields are private - violators will be prosecuted */
86 const MemoryRegionOps *ops;
87 void *opaque;
88 MemoryRegion *parent;
89 uint64_t size;
90 target_phys_addr_t addr;
91 target_phys_addr_t offset;
92 ram_addr_t ram_addr;
93 bool terminates;
94 MemoryRegion *alias;
95 target_phys_addr_t alias_offset;
96 unsigned priority;
97 bool may_overlap;
98 QTAILQ_HEAD(subregions, MemoryRegion) subregions;
99 QTAILQ_ENTRY(MemoryRegion) subregions_link;
100 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
101 const char *name;
102 uint8_t dirty_log_mask;
103 };
104
105 /**
106 * memory_region_init: Initialize a memory region
107 *
108 * The region typically acts as a container for other memory regions. Us
109 * memory_region_add_subregion() to add subregions.
110 *
111 * @mr: the #MemoryRegion to be initialized
112 * @name: used for debugging; not visible to the user or ABI
113 * @size: size of the region; any subregions beyond this size will be clipped
114 */
115 void memory_region_init(MemoryRegion *mr,
116 const char *name,
117 uint64_t size);
118 /**
119 * memory_region_init_io: Initialize an I/O memory region.
120 *
121 * Accesses into the region will be cause the callbacks in @ops to be called.
122 * if @size is nonzero, subregions will be clipped to @size.
123 *
124 * @mr: the #MemoryRegion to be initialized.
125 * @ops: a structure containing read and write callbacks to be used when
126 * I/O is performed on the region.
127 * @opaque: passed to to the read and write callbacks of the @ops structure.
128 * @name: used for debugging; not visible to the user or ABI
129 * @size: size of the region.
130 */
131 void memory_region_init_io(MemoryRegion *mr,
132 const MemoryRegionOps *ops,
133 void *opaque,
134 const char *name,
135 uint64_t size);
136
137 /**
138 * memory_region_init_ram: Initialize RAM memory region. Accesses into the
139 * region will be modify memory directly.
140 *
141 * @mr: the #MemoryRegion to be initialized.
142 * @dev: a device associated with the region; may be %NULL.
143 * @name: the name of the region; the pair (@dev, @name) must be globally
144 * unique. The name is part of the save/restore ABI and so cannot be
145 * changed.
146 * @size: size of the region.
147 */
148 void memory_region_init_ram(MemoryRegion *mr,
149 DeviceState *dev, /* FIXME: layering violation */
150 const char *name,
151 uint64_t size);
152
153 /**
154 * memory_region_init_ram: Initialize RAM memory region from a user-provided.
155 * pointer. Accesses into the region will be modify
156 * memory directly.
157 *
158 * @mr: the #MemoryRegion to be initialized.
159 * @dev: a device associated with the region; may be %NULL.
160 * @name: the name of the region; the pair (@dev, @name) must be globally
161 * unique. The name is part of the save/restore ABI and so cannot be
162 * changed.
163 * @size: size of the region.
164 * @ptr: memory to be mapped; must contain at least @size bytes.
165 */
166 void memory_region_init_ram_ptr(MemoryRegion *mr,
167 DeviceState *dev, /* FIXME: layering violation */
168 const char *name,
169 uint64_t size,
170 void *ptr);
171
172 /**
173 * memory_region_init_alias: Initialize a memory region that aliases all or a
174 * part of another memory region.
175 *
176 * @mr: the #MemoryRegion to be initialized.
177 * @name: used for debugging; not visible to the user or ABI
178 * @orig: the region to be referenced; @mr will be equivalent to
179 * @orig between @offset and @offset + @size - 1.
180 * @offset: start of the section in @orig to be referenced.
181 * @size: size of the region.
182 */
183 void memory_region_init_alias(MemoryRegion *mr,
184 const char *name,
185 MemoryRegion *orig,
186 target_phys_addr_t offset,
187 uint64_t size);
188 /**
189 * memory_region_destroy: Destroy a memory region and relaim all resources.
190 *
191 * @mr: the region to be destroyed. May not currently be a subregion
192 * (see memory_region_add_subregion()) or referenced in an alias
193 * (see memory_region_init_alias()).
194 */
195 void memory_region_destroy(MemoryRegion *mr);
196
197 /**
198 * memory_region_size: get a memory region's size.
199 *
200 * @mr: the memory region being queried.
201 */
202 uint64_t memory_region_size(MemoryRegion *mr);
203
204 /**
205 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
206 *
207 * Returns a host pointer to a RAM memory region (created with
208 * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
209 * care.
210 *
211 * @mr: the memory region being queried.
212 */
213 void *memory_region_get_ram_ptr(MemoryRegion *mr);
214
215 /**
216 * memory_region_set_offset: Sets an offset to be added to MemoryRegionOps
217 * callbacks.
218 *
219 * This function is deprecated and should not be used in new code.
220 */
221 void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset);
222
223 /**
224 * memory_region_set_log: Turn dirty logging on or off for a region.
225 *
226 * Turns dirty logging on or off for a specified client (display, migration).
227 * Only meaningful for RAM regions.
228 *
229 * @mr: the memory region being updated.
230 * @log: whether dirty logging is to be enabled or disabled.
231 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
232 * %DIRTY_MEMORY_VGA.
233 */
234 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
235
236 /**
237 * memory_region_get_dirty: Check whether a page is dirty for a specified
238 * client.
239 *
240 * Checks whether a page has been written to since the last
241 * call to memory_region_reset_dirty() with the same @client. Dirty logging
242 * must be enabled.
243 *
244 * @mr: the memory region being queried.
245 * @addr: the address (relative to the start of the region) being queried.
246 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
247 * %DIRTY_MEMORY_VGA.
248 */
249 bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
250 unsigned client);
251
252 /**
253 * memory_region_set_dirty: Mark a page as dirty in a memory region.
254 *
255 * Marks a page as dirty, after it has been dirtied outside guest code.
256 *
257 * @mr: the memory region being queried.
258 * @addr: the address (relative to the start of the region) being dirtied.
259 */
260 void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr);
261
262 /**
263 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
264 * any external TLBs (e.g. kvm)
265 *
266 * Flushes dirty information from accelerators such as kvm and vhost-net
267 * and makes it available to users of the memory API.
268 *
269 * @mr: the region being flushed.
270 */
271 void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
272
273 /**
274 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
275 * client.
276 *
277 * Marks a range of pages as no longer dirty.
278 *
279 * @mr: the region being updated.
280 * @addr: the start of the subrange being cleaned.
281 * @size: the size of the subrange being cleaned.
282 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
283 * %DIRTY_MEMORY_VGA.
284 */
285 void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
286 target_phys_addr_t size, unsigned client);
287
288 /**
289 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
290 *
291 * Allows a memory region to be marked as read-only (turning it into a ROM).
292 * only useful on RAM regions.
293 *
294 * @mr: the region being updated.
295 * @readonly: whether rhe region is to be ROM or RAM.
296 */
297 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
298
299 /**
300 * memory_region_set_coalescing: Enable memory coalescing for the region.
301 *
302 * Enabled writes to a region to be queued for later processing. MMIO ->write
303 * callbacks may be delayed until a non-coalesced MMIO is issued.
304 * Only useful for IO regions. Roughly similar to write-combining hardware.
305 *
306 * @mr: the memory region to be write coalesced
307 */
308 void memory_region_set_coalescing(MemoryRegion *mr);
309
310 /**
311 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
312 * a region.
313 *
314 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
315 * Multiple calls can be issued coalesced disjoint ranges.
316 *
317 * @mr: the memory region to be updated.
318 * @offset: the start of the range within the region to be coalesced.
319 * @size: the size of the subrange to be coalesced.
320 */
321 void memory_region_add_coalescing(MemoryRegion *mr,
322 target_phys_addr_t offset,
323 uint64_t size);
324
325 /**
326 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
327 *
328 * Disables any coalescing caused by memory_region_set_coalescing() or
329 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
330 * hardware.
331 *
332 * @mr: the memory region to be updated.
333 */
334 void memory_region_clear_coalescing(MemoryRegion *mr);
335
336 /**
337 * memory_region_add_subregion: Add a sub-region to a container.
338 *
339 * Adds a sub-region at @offset. The sub-region may not overlap with other
340 * subregions (except for those explicitly marked as overlapping). A region
341 * may only be added once as a subregion (unless removed with
342 * memory_region_del_subregion()); use memory_region_init_alias() if you
343 * want a region to be a subregion in multiple locations.
344 *
345 * @mr: the region to contain the new subregion; must be a container
346 * initialized with memory_region_init().
347 * @offset: the offset relative to @mr where @subregion is added.
348 * @subregion: the subregion to be added.
349 */
350 void memory_region_add_subregion(MemoryRegion *mr,
351 target_phys_addr_t offset,
352 MemoryRegion *subregion);
353 /**
354 * memory_region_add_subregion: Add a sub-region to a container, with overlap.
355 *
356 * Adds a sub-region at @offset. The sub-region may overlap with other
357 * subregions. Conflicts are resolved by having a higher @priority hide a
358 * lower @priority. Subregions without priority are taken as @priority 0.
359 * A region may only be added once as a subregion (unless removed with
360 * memory_region_del_subregion()); use memory_region_init_alias() if you
361 * want a region to be a subregion in multiple locations.
362 *
363 * @mr: the region to contain the new subregion; must be a container
364 * initialized with memory_region_init().
365 * @offset: the offset relative to @mr where @subregion is added.
366 * @subregion: the subregion to be added.
367 * @priority: used for resolving overlaps; highest priority wins.
368 */
369 void memory_region_add_subregion_overlap(MemoryRegion *mr,
370 target_phys_addr_t offset,
371 MemoryRegion *subregion,
372 unsigned priority);
373 /**
374 * memory_region_del_subregion: Remove a subregion.
375 *
376 * Removes a subregion from its container.
377 *
378 * @mr: the container to be updated.
379 * @subregion: the region being removed; must be a current subregion of @mr.
380 */
381 void memory_region_del_subregion(MemoryRegion *mr,
382 MemoryRegion *subregion);
383
384 #endif
385
386 #endif