]> git.proxmox.com Git - qemu.git/blob - memory.h
memory: I/O address space support
[qemu.git] / memory.h
1 /*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14 #ifndef MEMORY_H
15 #define MEMORY_H
16
17 #ifndef CONFIG_USER_ONLY
18
19 #include <stdint.h>
20 #include <stdbool.h>
21 #include "qemu-common.h"
22 #include "cpu-common.h"
23 #include "targphys.h"
24 #include "qemu-queue.h"
25 #include "iorange.h"
26
27 typedef struct MemoryRegionOps MemoryRegionOps;
28 typedef struct MemoryRegion MemoryRegion;
29
30 /* Must match *_DIRTY_FLAGS in cpu-all.h. To be replaced with dynamic
31 * registration.
32 */
33 #define DIRTY_MEMORY_VGA 0
34 #define DIRTY_MEMORY_CODE 1
35 #define DIRTY_MEMORY_MIGRATION 3
36
37 /*
38 * Memory region callbacks
39 */
40 struct MemoryRegionOps {
41 /* Read from the memory region. @addr is relative to @mr; @size is
42 * in bytes. */
43 uint64_t (*read)(void *opaque,
44 target_phys_addr_t addr,
45 unsigned size);
46 /* Write to the memory region. @addr is relative to @mr; @size is
47 * in bytes. */
48 void (*write)(void *opaque,
49 target_phys_addr_t addr,
50 uint64_t data,
51 unsigned size);
52
53 enum device_endian endianness;
54 /* Guest-visible constraints: */
55 struct {
56 /* If nonzero, specify bounds on access sizes beyond which a machine
57 * check is thrown.
58 */
59 unsigned min_access_size;
60 unsigned max_access_size;
61 /* If true, unaligned accesses are supported. Otherwise unaligned
62 * accesses throw machine checks.
63 */
64 bool unaligned;
65 } valid;
66 /* Internal implementation constraints: */
67 struct {
68 /* If nonzero, specifies the minimum size implemented. Smaller sizes
69 * will be rounded upwards and a partial result will be returned.
70 */
71 unsigned min_access_size;
72 /* If nonzero, specifies the maximum size implemented. Larger sizes
73 * will be done as a series of accesses with smaller sizes.
74 */
75 unsigned max_access_size;
76 /* If true, unaligned accesses are supported. Otherwise all accesses
77 * are converted to (possibly multiple) naturally aligned accesses.
78 */
79 bool unaligned;
80 } impl;
81 };
82
83 typedef struct CoalescedMemoryRange CoalescedMemoryRange;
84
85 struct MemoryRegion {
86 /* All fields are private - violators will be prosecuted */
87 const MemoryRegionOps *ops;
88 void *opaque;
89 MemoryRegion *parent;
90 uint64_t size;
91 target_phys_addr_t addr;
92 target_phys_addr_t offset;
93 bool backend_registered;
94 ram_addr_t ram_addr;
95 IORange iorange;
96 bool terminates;
97 MemoryRegion *alias;
98 target_phys_addr_t alias_offset;
99 unsigned priority;
100 bool may_overlap;
101 QTAILQ_HEAD(subregions, MemoryRegion) subregions;
102 QTAILQ_ENTRY(MemoryRegion) subregions_link;
103 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
104 const char *name;
105 uint8_t dirty_log_mask;
106 };
107
108 /**
109 * memory_region_init: Initialize a memory region
110 *
111 * The region typically acts as a container for other memory regions. Us
112 * memory_region_add_subregion() to add subregions.
113 *
114 * @mr: the #MemoryRegion to be initialized
115 * @name: used for debugging; not visible to the user or ABI
116 * @size: size of the region; any subregions beyond this size will be clipped
117 */
118 void memory_region_init(MemoryRegion *mr,
119 const char *name,
120 uint64_t size);
121 /**
122 * memory_region_init_io: Initialize an I/O memory region.
123 *
124 * Accesses into the region will be cause the callbacks in @ops to be called.
125 * if @size is nonzero, subregions will be clipped to @size.
126 *
127 * @mr: the #MemoryRegion to be initialized.
128 * @ops: a structure containing read and write callbacks to be used when
129 * I/O is performed on the region.
130 * @opaque: passed to to the read and write callbacks of the @ops structure.
131 * @name: used for debugging; not visible to the user or ABI
132 * @size: size of the region.
133 */
134 void memory_region_init_io(MemoryRegion *mr,
135 const MemoryRegionOps *ops,
136 void *opaque,
137 const char *name,
138 uint64_t size);
139
140 /**
141 * memory_region_init_ram: Initialize RAM memory region. Accesses into the
142 * region will be modify memory directly.
143 *
144 * @mr: the #MemoryRegion to be initialized.
145 * @dev: a device associated with the region; may be %NULL.
146 * @name: the name of the region; the pair (@dev, @name) must be globally
147 * unique. The name is part of the save/restore ABI and so cannot be
148 * changed.
149 * @size: size of the region.
150 */
151 void memory_region_init_ram(MemoryRegion *mr,
152 DeviceState *dev, /* FIXME: layering violation */
153 const char *name,
154 uint64_t size);
155
156 /**
157 * memory_region_init_ram: Initialize RAM memory region from a user-provided.
158 * pointer. Accesses into the region will be modify
159 * memory directly.
160 *
161 * @mr: the #MemoryRegion to be initialized.
162 * @dev: a device associated with the region; may be %NULL.
163 * @name: the name of the region; the pair (@dev, @name) must be globally
164 * unique. The name is part of the save/restore ABI and so cannot be
165 * changed.
166 * @size: size of the region.
167 * @ptr: memory to be mapped; must contain at least @size bytes.
168 */
169 void memory_region_init_ram_ptr(MemoryRegion *mr,
170 DeviceState *dev, /* FIXME: layering violation */
171 const char *name,
172 uint64_t size,
173 void *ptr);
174
175 /**
176 * memory_region_init_alias: Initialize a memory region that aliases all or a
177 * part of another memory region.
178 *
179 * @mr: the #MemoryRegion to be initialized.
180 * @name: used for debugging; not visible to the user or ABI
181 * @orig: the region to be referenced; @mr will be equivalent to
182 * @orig between @offset and @offset + @size - 1.
183 * @offset: start of the section in @orig to be referenced.
184 * @size: size of the region.
185 */
186 void memory_region_init_alias(MemoryRegion *mr,
187 const char *name,
188 MemoryRegion *orig,
189 target_phys_addr_t offset,
190 uint64_t size);
191 /**
192 * memory_region_destroy: Destroy a memory region and relaim all resources.
193 *
194 * @mr: the region to be destroyed. May not currently be a subregion
195 * (see memory_region_add_subregion()) or referenced in an alias
196 * (see memory_region_init_alias()).
197 */
198 void memory_region_destroy(MemoryRegion *mr);
199
200 /**
201 * memory_region_size: get a memory region's size.
202 *
203 * @mr: the memory region being queried.
204 */
205 uint64_t memory_region_size(MemoryRegion *mr);
206
207 /**
208 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
209 *
210 * Returns a host pointer to a RAM memory region (created with
211 * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
212 * care.
213 *
214 * @mr: the memory region being queried.
215 */
216 void *memory_region_get_ram_ptr(MemoryRegion *mr);
217
218 /**
219 * memory_region_set_offset: Sets an offset to be added to MemoryRegionOps
220 * callbacks.
221 *
222 * This function is deprecated and should not be used in new code.
223 */
224 void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset);
225
226 /**
227 * memory_region_set_log: Turn dirty logging on or off for a region.
228 *
229 * Turns dirty logging on or off for a specified client (display, migration).
230 * Only meaningful for RAM regions.
231 *
232 * @mr: the memory region being updated.
233 * @log: whether dirty logging is to be enabled or disabled.
234 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
235 * %DIRTY_MEMORY_VGA.
236 */
237 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
238
239 /**
240 * memory_region_get_dirty: Check whether a page is dirty for a specified
241 * client.
242 *
243 * Checks whether a page has been written to since the last
244 * call to memory_region_reset_dirty() with the same @client. Dirty logging
245 * must be enabled.
246 *
247 * @mr: the memory region being queried.
248 * @addr: the address (relative to the start of the region) being queried.
249 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
250 * %DIRTY_MEMORY_VGA.
251 */
252 bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
253 unsigned client);
254
255 /**
256 * memory_region_set_dirty: Mark a page as dirty in a memory region.
257 *
258 * Marks a page as dirty, after it has been dirtied outside guest code.
259 *
260 * @mr: the memory region being queried.
261 * @addr: the address (relative to the start of the region) being dirtied.
262 */
263 void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr);
264
265 /**
266 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
267 * any external TLBs (e.g. kvm)
268 *
269 * Flushes dirty information from accelerators such as kvm and vhost-net
270 * and makes it available to users of the memory API.
271 *
272 * @mr: the region being flushed.
273 */
274 void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
275
276 /**
277 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
278 * client.
279 *
280 * Marks a range of pages as no longer dirty.
281 *
282 * @mr: the region being updated.
283 * @addr: the start of the subrange being cleaned.
284 * @size: the size of the subrange being cleaned.
285 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
286 * %DIRTY_MEMORY_VGA.
287 */
288 void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
289 target_phys_addr_t size, unsigned client);
290
291 /**
292 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
293 *
294 * Allows a memory region to be marked as read-only (turning it into a ROM).
295 * only useful on RAM regions.
296 *
297 * @mr: the region being updated.
298 * @readonly: whether rhe region is to be ROM or RAM.
299 */
300 void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
301
302 /**
303 * memory_region_set_coalescing: Enable memory coalescing for the region.
304 *
305 * Enabled writes to a region to be queued for later processing. MMIO ->write
306 * callbacks may be delayed until a non-coalesced MMIO is issued.
307 * Only useful for IO regions. Roughly similar to write-combining hardware.
308 *
309 * @mr: the memory region to be write coalesced
310 */
311 void memory_region_set_coalescing(MemoryRegion *mr);
312
313 /**
314 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
315 * a region.
316 *
317 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
318 * Multiple calls can be issued coalesced disjoint ranges.
319 *
320 * @mr: the memory region to be updated.
321 * @offset: the start of the range within the region to be coalesced.
322 * @size: the size of the subrange to be coalesced.
323 */
324 void memory_region_add_coalescing(MemoryRegion *mr,
325 target_phys_addr_t offset,
326 uint64_t size);
327
328 /**
329 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
330 *
331 * Disables any coalescing caused by memory_region_set_coalescing() or
332 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
333 * hardware.
334 *
335 * @mr: the memory region to be updated.
336 */
337 void memory_region_clear_coalescing(MemoryRegion *mr);
338
339 /**
340 * memory_region_add_subregion: Add a sub-region to a container.
341 *
342 * Adds a sub-region at @offset. The sub-region may not overlap with other
343 * subregions (except for those explicitly marked as overlapping). A region
344 * may only be added once as a subregion (unless removed with
345 * memory_region_del_subregion()); use memory_region_init_alias() if you
346 * want a region to be a subregion in multiple locations.
347 *
348 * @mr: the region to contain the new subregion; must be a container
349 * initialized with memory_region_init().
350 * @offset: the offset relative to @mr where @subregion is added.
351 * @subregion: the subregion to be added.
352 */
353 void memory_region_add_subregion(MemoryRegion *mr,
354 target_phys_addr_t offset,
355 MemoryRegion *subregion);
356 /**
357 * memory_region_add_subregion: Add a sub-region to a container, with overlap.
358 *
359 * Adds a sub-region at @offset. The sub-region may overlap with other
360 * subregions. Conflicts are resolved by having a higher @priority hide a
361 * lower @priority. Subregions without priority are taken as @priority 0.
362 * A region may only be added once as a subregion (unless removed with
363 * memory_region_del_subregion()); use memory_region_init_alias() if you
364 * want a region to be a subregion in multiple locations.
365 *
366 * @mr: the region to contain the new subregion; must be a container
367 * initialized with memory_region_init().
368 * @offset: the offset relative to @mr where @subregion is added.
369 * @subregion: the subregion to be added.
370 * @priority: used for resolving overlaps; highest priority wins.
371 */
372 void memory_region_add_subregion_overlap(MemoryRegion *mr,
373 target_phys_addr_t offset,
374 MemoryRegion *subregion,
375 unsigned priority);
376 /**
377 * memory_region_del_subregion: Remove a subregion.
378 *
379 * Removes a subregion from its container.
380 *
381 * @mr: the container to be updated.
382 * @subregion: the region being removed; must be a current subregion of @mr.
383 */
384 void memory_region_del_subregion(MemoryRegion *mr,
385 MemoryRegion *subregion);
386
387 #endif
388
389 #endif