]> git.proxmox.com Git - qemu.git/blame - memory.h
memory: late initialization of ram_addr
[qemu.git] / memory.h
CommitLineData
093bc2cd
AK
1/*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#ifndef MEMORY_H
15#define MEMORY_H
16
17#ifndef CONFIG_USER_ONLY
18
19#include <stdint.h>
20#include <stdbool.h>
21#include "qemu-common.h"
22#include "cpu-common.h"
23#include "targphys.h"
24#include "qemu-queue.h"
25
26typedef struct MemoryRegionOps MemoryRegionOps;
27typedef struct MemoryRegion MemoryRegion;
28
29/* Must match *_DIRTY_FLAGS in cpu-all.h. To be replaced with dynamic
30 * registration.
31 */
32#define DIRTY_MEMORY_VGA 0
33#define DIRTY_MEMORY_CODE 1
34#define DIRTY_MEMORY_MIGRATION 3
35
36/*
37 * Memory region callbacks
38 */
39struct MemoryRegionOps {
40 /* Read from the memory region. @addr is relative to @mr; @size is
41 * in bytes. */
42 uint64_t (*read)(void *opaque,
43 target_phys_addr_t addr,
44 unsigned size);
45 /* Write to the memory region. @addr is relative to @mr; @size is
46 * in bytes. */
47 void (*write)(void *opaque,
48 target_phys_addr_t addr,
49 uint64_t data,
50 unsigned size);
51
52 enum device_endian endianness;
53 /* Guest-visible constraints: */
54 struct {
55 /* If nonzero, specify bounds on access sizes beyond which a machine
56 * check is thrown.
57 */
58 unsigned min_access_size;
59 unsigned max_access_size;
60 /* If true, unaligned accesses are supported. Otherwise unaligned
61 * accesses throw machine checks.
62 */
63 bool unaligned;
64 } valid;
65 /* Internal implementation constraints: */
66 struct {
67 /* If nonzero, specifies the minimum size implemented. Smaller sizes
68 * will be rounded upwards and a partial result will be returned.
69 */
70 unsigned min_access_size;
71 /* If nonzero, specifies the maximum size implemented. Larger sizes
72 * will be done as a series of accesses with smaller sizes.
73 */
74 unsigned max_access_size;
75 /* If true, unaligned accesses are supported. Otherwise all accesses
76 * are converted to (possibly multiple) naturally aligned accesses.
77 */
78 bool unaligned;
79 } impl;
80};
81
82typedef struct CoalescedMemoryRange CoalescedMemoryRange;
83
84struct MemoryRegion {
85 /* All fields are private - violators will be prosecuted */
86 const MemoryRegionOps *ops;
87 void *opaque;
88 MemoryRegion *parent;
89 uint64_t size;
90 target_phys_addr_t addr;
91 target_phys_addr_t offset;
16ef61c9 92 bool backend_registered;
093bc2cd 93 ram_addr_t ram_addr;
14a3c10a 94 bool terminates;
093bc2cd
AK
95 MemoryRegion *alias;
96 target_phys_addr_t alias_offset;
97 unsigned priority;
98 bool may_overlap;
99 QTAILQ_HEAD(subregions, MemoryRegion) subregions;
100 QTAILQ_ENTRY(MemoryRegion) subregions_link;
101 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
102 const char *name;
5a583347 103 uint8_t dirty_log_mask;
093bc2cd
AK
104};
105
106/**
107 * memory_region_init: Initialize a memory region
108 *
109 * The region typically acts as a container for other memory regions. Us
110 * memory_region_add_subregion() to add subregions.
111 *
112 * @mr: the #MemoryRegion to be initialized
113 * @name: used for debugging; not visible to the user or ABI
114 * @size: size of the region; any subregions beyond this size will be clipped
115 */
116void memory_region_init(MemoryRegion *mr,
117 const char *name,
118 uint64_t size);
119/**
120 * memory_region_init_io: Initialize an I/O memory region.
121 *
122 * Accesses into the region will be cause the callbacks in @ops to be called.
123 * if @size is nonzero, subregions will be clipped to @size.
124 *
125 * @mr: the #MemoryRegion to be initialized.
126 * @ops: a structure containing read and write callbacks to be used when
127 * I/O is performed on the region.
128 * @opaque: passed to to the read and write callbacks of the @ops structure.
129 * @name: used for debugging; not visible to the user or ABI
130 * @size: size of the region.
131 */
132void memory_region_init_io(MemoryRegion *mr,
133 const MemoryRegionOps *ops,
134 void *opaque,
135 const char *name,
136 uint64_t size);
137
138/**
139 * memory_region_init_ram: Initialize RAM memory region. Accesses into the
140 * region will be modify memory directly.
141 *
142 * @mr: the #MemoryRegion to be initialized.
143 * @dev: a device associated with the region; may be %NULL.
144 * @name: the name of the region; the pair (@dev, @name) must be globally
145 * unique. The name is part of the save/restore ABI and so cannot be
146 * changed.
147 * @size: size of the region.
148 */
149void memory_region_init_ram(MemoryRegion *mr,
150 DeviceState *dev, /* FIXME: layering violation */
151 const char *name,
152 uint64_t size);
153
154/**
155 * memory_region_init_ram: Initialize RAM memory region from a user-provided.
156 * pointer. Accesses into the region will be modify
157 * memory directly.
158 *
159 * @mr: the #MemoryRegion to be initialized.
160 * @dev: a device associated with the region; may be %NULL.
161 * @name: the name of the region; the pair (@dev, @name) must be globally
162 * unique. The name is part of the save/restore ABI and so cannot be
163 * changed.
164 * @size: size of the region.
165 * @ptr: memory to be mapped; must contain at least @size bytes.
166 */
167void memory_region_init_ram_ptr(MemoryRegion *mr,
168 DeviceState *dev, /* FIXME: layering violation */
169 const char *name,
170 uint64_t size,
171 void *ptr);
172
173/**
174 * memory_region_init_alias: Initialize a memory region that aliases all or a
175 * part of another memory region.
176 *
177 * @mr: the #MemoryRegion to be initialized.
178 * @name: used for debugging; not visible to the user or ABI
179 * @orig: the region to be referenced; @mr will be equivalent to
180 * @orig between @offset and @offset + @size - 1.
181 * @offset: start of the section in @orig to be referenced.
182 * @size: size of the region.
183 */
184void memory_region_init_alias(MemoryRegion *mr,
185 const char *name,
186 MemoryRegion *orig,
187 target_phys_addr_t offset,
188 uint64_t size);
189/**
190 * memory_region_destroy: Destroy a memory region and relaim all resources.
191 *
192 * @mr: the region to be destroyed. May not currently be a subregion
193 * (see memory_region_add_subregion()) or referenced in an alias
194 * (see memory_region_init_alias()).
195 */
196void memory_region_destroy(MemoryRegion *mr);
197
198/**
199 * memory_region_size: get a memory region's size.
200 *
201 * @mr: the memory region being queried.
202 */
203uint64_t memory_region_size(MemoryRegion *mr);
204
205/**
206 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
207 *
208 * Returns a host pointer to a RAM memory region (created with
209 * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
210 * care.
211 *
212 * @mr: the memory region being queried.
213 */
214void *memory_region_get_ram_ptr(MemoryRegion *mr);
215
216/**
217 * memory_region_set_offset: Sets an offset to be added to MemoryRegionOps
218 * callbacks.
219 *
220 * This function is deprecated and should not be used in new code.
221 */
222void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset);
223
224/**
225 * memory_region_set_log: Turn dirty logging on or off for a region.
226 *
227 * Turns dirty logging on or off for a specified client (display, migration).
228 * Only meaningful for RAM regions.
229 *
230 * @mr: the memory region being updated.
231 * @log: whether dirty logging is to be enabled or disabled.
232 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
233 * %DIRTY_MEMORY_VGA.
234 */
235void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
236
237/**
238 * memory_region_get_dirty: Check whether a page is dirty for a specified
239 * client.
240 *
241 * Checks whether a page has been written to since the last
242 * call to memory_region_reset_dirty() with the same @client. Dirty logging
243 * must be enabled.
244 *
245 * @mr: the memory region being queried.
246 * @addr: the address (relative to the start of the region) being queried.
247 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
248 * %DIRTY_MEMORY_VGA.
249 */
250bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
251 unsigned client);
252
253/**
254 * memory_region_set_dirty: Mark a page as dirty in a memory region.
255 *
256 * Marks a page as dirty, after it has been dirtied outside guest code.
257 *
258 * @mr: the memory region being queried.
259 * @addr: the address (relative to the start of the region) being dirtied.
260 */
261void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr);
262
263/**
264 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
265 * any external TLBs (e.g. kvm)
266 *
267 * Flushes dirty information from accelerators such as kvm and vhost-net
268 * and makes it available to users of the memory API.
269 *
270 * @mr: the region being flushed.
271 */
272void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
273
274/**
275 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
276 * client.
277 *
278 * Marks a range of pages as no longer dirty.
279 *
280 * @mr: the region being updated.
281 * @addr: the start of the subrange being cleaned.
282 * @size: the size of the subrange being cleaned.
283 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
284 * %DIRTY_MEMORY_VGA.
285 */
286void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
287 target_phys_addr_t size, unsigned client);
288
289/**
290 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
291 *
292 * Allows a memory region to be marked as read-only (turning it into a ROM).
293 * only useful on RAM regions.
294 *
295 * @mr: the region being updated.
296 * @readonly: whether rhe region is to be ROM or RAM.
297 */
298void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
299
300/**
301 * memory_region_set_coalescing: Enable memory coalescing for the region.
302 *
303 * Enabled writes to a region to be queued for later processing. MMIO ->write
304 * callbacks may be delayed until a non-coalesced MMIO is issued.
305 * Only useful for IO regions. Roughly similar to write-combining hardware.
306 *
307 * @mr: the memory region to be write coalesced
308 */
309void memory_region_set_coalescing(MemoryRegion *mr);
310
311/**
312 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
313 * a region.
314 *
315 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
316 * Multiple calls can be issued coalesced disjoint ranges.
317 *
318 * @mr: the memory region to be updated.
319 * @offset: the start of the range within the region to be coalesced.
320 * @size: the size of the subrange to be coalesced.
321 */
322void memory_region_add_coalescing(MemoryRegion *mr,
323 target_phys_addr_t offset,
324 uint64_t size);
325
326/**
327 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
328 *
329 * Disables any coalescing caused by memory_region_set_coalescing() or
330 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
331 * hardware.
332 *
333 * @mr: the memory region to be updated.
334 */
335void memory_region_clear_coalescing(MemoryRegion *mr);
336
337/**
338 * memory_region_add_subregion: Add a sub-region to a container.
339 *
340 * Adds a sub-region at @offset. The sub-region may not overlap with other
341 * subregions (except for those explicitly marked as overlapping). A region
342 * may only be added once as a subregion (unless removed with
343 * memory_region_del_subregion()); use memory_region_init_alias() if you
344 * want a region to be a subregion in multiple locations.
345 *
346 * @mr: the region to contain the new subregion; must be a container
347 * initialized with memory_region_init().
348 * @offset: the offset relative to @mr where @subregion is added.
349 * @subregion: the subregion to be added.
350 */
351void memory_region_add_subregion(MemoryRegion *mr,
352 target_phys_addr_t offset,
353 MemoryRegion *subregion);
354/**
355 * memory_region_add_subregion: Add a sub-region to a container, with overlap.
356 *
357 * Adds a sub-region at @offset. The sub-region may overlap with other
358 * subregions. Conflicts are resolved by having a higher @priority hide a
359 * lower @priority. Subregions without priority are taken as @priority 0.
360 * A region may only be added once as a subregion (unless removed with
361 * memory_region_del_subregion()); use memory_region_init_alias() if you
362 * want a region to be a subregion in multiple locations.
363 *
364 * @mr: the region to contain the new subregion; must be a container
365 * initialized with memory_region_init().
366 * @offset: the offset relative to @mr where @subregion is added.
367 * @subregion: the subregion to be added.
368 * @priority: used for resolving overlaps; highest priority wins.
369 */
370void memory_region_add_subregion_overlap(MemoryRegion *mr,
371 target_phys_addr_t offset,
372 MemoryRegion *subregion,
373 unsigned priority);
374/**
375 * memory_region_del_subregion: Remove a subregion.
376 *
377 * Removes a subregion from its container.
378 *
379 * @mr: the container to be updated.
380 * @subregion: the region being removed; must be a current subregion of @mr.
381 */
382void memory_region_del_subregion(MemoryRegion *mr,
383 MemoryRegion *subregion);
384
385#endif
386
387#endif