]> git.proxmox.com Git - mirror_qemu.git/blame - memory.h
memory: add backward compatibility for old portio registration
[mirror_qemu.git] / memory.h
CommitLineData
093bc2cd
AK
1/*
2 * Physical memory management API
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#ifndef MEMORY_H
15#define MEMORY_H
16
17#ifndef CONFIG_USER_ONLY
18
19#include <stdint.h>
20#include <stdbool.h>
21#include "qemu-common.h"
22#include "cpu-common.h"
23#include "targphys.h"
24#include "qemu-queue.h"
658b2224 25#include "iorange.h"
627a0e90 26#include "ioport.h"
093bc2cd
AK
27
28typedef struct MemoryRegionOps MemoryRegionOps;
29typedef struct MemoryRegion MemoryRegion;
627a0e90 30typedef struct MemoryRegionPortio MemoryRegionPortio;
093bc2cd
AK
31
32/* Must match *_DIRTY_FLAGS in cpu-all.h. To be replaced with dynamic
33 * registration.
34 */
35#define DIRTY_MEMORY_VGA 0
36#define DIRTY_MEMORY_CODE 1
37#define DIRTY_MEMORY_MIGRATION 3
38
39/*
40 * Memory region callbacks
41 */
42struct MemoryRegionOps {
43 /* Read from the memory region. @addr is relative to @mr; @size is
44 * in bytes. */
45 uint64_t (*read)(void *opaque,
46 target_phys_addr_t addr,
47 unsigned size);
48 /* Write to the memory region. @addr is relative to @mr; @size is
49 * in bytes. */
50 void (*write)(void *opaque,
51 target_phys_addr_t addr,
52 uint64_t data,
53 unsigned size);
54
55 enum device_endian endianness;
56 /* Guest-visible constraints: */
57 struct {
58 /* If nonzero, specify bounds on access sizes beyond which a machine
59 * check is thrown.
60 */
61 unsigned min_access_size;
62 unsigned max_access_size;
63 /* If true, unaligned accesses are supported. Otherwise unaligned
64 * accesses throw machine checks.
65 */
66 bool unaligned;
67 } valid;
68 /* Internal implementation constraints: */
69 struct {
70 /* If nonzero, specifies the minimum size implemented. Smaller sizes
71 * will be rounded upwards and a partial result will be returned.
72 */
73 unsigned min_access_size;
74 /* If nonzero, specifies the maximum size implemented. Larger sizes
75 * will be done as a series of accesses with smaller sizes.
76 */
77 unsigned max_access_size;
78 /* If true, unaligned accesses are supported. Otherwise all accesses
79 * are converted to (possibly multiple) naturally aligned accesses.
80 */
81 bool unaligned;
82 } impl;
627a0e90
AK
83
84 /* If .read and .write are not present, old_portio may be used for
85 * backwards compatibility with old portio registration
86 */
87 const MemoryRegionPortio *old_portio;
093bc2cd
AK
88};
89
90typedef struct CoalescedMemoryRange CoalescedMemoryRange;
91
92struct MemoryRegion {
93 /* All fields are private - violators will be prosecuted */
94 const MemoryRegionOps *ops;
95 void *opaque;
96 MemoryRegion *parent;
97 uint64_t size;
98 target_phys_addr_t addr;
99 target_phys_addr_t offset;
16ef61c9 100 bool backend_registered;
093bc2cd 101 ram_addr_t ram_addr;
658b2224 102 IORange iorange;
14a3c10a 103 bool terminates;
093bc2cd
AK
104 MemoryRegion *alias;
105 target_phys_addr_t alias_offset;
106 unsigned priority;
107 bool may_overlap;
108 QTAILQ_HEAD(subregions, MemoryRegion) subregions;
109 QTAILQ_ENTRY(MemoryRegion) subregions_link;
110 QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
111 const char *name;
5a583347 112 uint8_t dirty_log_mask;
093bc2cd
AK
113};
114
627a0e90
AK
115struct MemoryRegionPortio {
116 uint32_t offset;
117 uint32_t len;
118 unsigned size;
119 IOPortReadFunc *read;
120 IOPortWriteFunc *write;
121};
122
123#define PORTIO_END { }
124
093bc2cd
AK
125/**
126 * memory_region_init: Initialize a memory region
127 *
128 * The region typically acts as a container for other memory regions. Us
129 * memory_region_add_subregion() to add subregions.
130 *
131 * @mr: the #MemoryRegion to be initialized
132 * @name: used for debugging; not visible to the user or ABI
133 * @size: size of the region; any subregions beyond this size will be clipped
134 */
135void memory_region_init(MemoryRegion *mr,
136 const char *name,
137 uint64_t size);
138/**
139 * memory_region_init_io: Initialize an I/O memory region.
140 *
141 * Accesses into the region will be cause the callbacks in @ops to be called.
142 * if @size is nonzero, subregions will be clipped to @size.
143 *
144 * @mr: the #MemoryRegion to be initialized.
145 * @ops: a structure containing read and write callbacks to be used when
146 * I/O is performed on the region.
147 * @opaque: passed to to the read and write callbacks of the @ops structure.
148 * @name: used for debugging; not visible to the user or ABI
149 * @size: size of the region.
150 */
151void memory_region_init_io(MemoryRegion *mr,
152 const MemoryRegionOps *ops,
153 void *opaque,
154 const char *name,
155 uint64_t size);
156
157/**
158 * memory_region_init_ram: Initialize RAM memory region. Accesses into the
159 * region will be modify memory directly.
160 *
161 * @mr: the #MemoryRegion to be initialized.
162 * @dev: a device associated with the region; may be %NULL.
163 * @name: the name of the region; the pair (@dev, @name) must be globally
164 * unique. The name is part of the save/restore ABI and so cannot be
165 * changed.
166 * @size: size of the region.
167 */
168void memory_region_init_ram(MemoryRegion *mr,
169 DeviceState *dev, /* FIXME: layering violation */
170 const char *name,
171 uint64_t size);
172
173/**
174 * memory_region_init_ram: Initialize RAM memory region from a user-provided.
175 * pointer. Accesses into the region will be modify
176 * memory directly.
177 *
178 * @mr: the #MemoryRegion to be initialized.
179 * @dev: a device associated with the region; may be %NULL.
180 * @name: the name of the region; the pair (@dev, @name) must be globally
181 * unique. The name is part of the save/restore ABI and so cannot be
182 * changed.
183 * @size: size of the region.
184 * @ptr: memory to be mapped; must contain at least @size bytes.
185 */
186void memory_region_init_ram_ptr(MemoryRegion *mr,
187 DeviceState *dev, /* FIXME: layering violation */
188 const char *name,
189 uint64_t size,
190 void *ptr);
191
192/**
193 * memory_region_init_alias: Initialize a memory region that aliases all or a
194 * part of another memory region.
195 *
196 * @mr: the #MemoryRegion to be initialized.
197 * @name: used for debugging; not visible to the user or ABI
198 * @orig: the region to be referenced; @mr will be equivalent to
199 * @orig between @offset and @offset + @size - 1.
200 * @offset: start of the section in @orig to be referenced.
201 * @size: size of the region.
202 */
203void memory_region_init_alias(MemoryRegion *mr,
204 const char *name,
205 MemoryRegion *orig,
206 target_phys_addr_t offset,
207 uint64_t size);
208/**
209 * memory_region_destroy: Destroy a memory region and relaim all resources.
210 *
211 * @mr: the region to be destroyed. May not currently be a subregion
212 * (see memory_region_add_subregion()) or referenced in an alias
213 * (see memory_region_init_alias()).
214 */
215void memory_region_destroy(MemoryRegion *mr);
216
217/**
218 * memory_region_size: get a memory region's size.
219 *
220 * @mr: the memory region being queried.
221 */
222uint64_t memory_region_size(MemoryRegion *mr);
223
224/**
225 * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
226 *
227 * Returns a host pointer to a RAM memory region (created with
228 * memory_region_init_ram() or memory_region_init_ram_ptr()). Use with
229 * care.
230 *
231 * @mr: the memory region being queried.
232 */
233void *memory_region_get_ram_ptr(MemoryRegion *mr);
234
235/**
236 * memory_region_set_offset: Sets an offset to be added to MemoryRegionOps
237 * callbacks.
238 *
239 * This function is deprecated and should not be used in new code.
240 */
241void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset);
242
243/**
244 * memory_region_set_log: Turn dirty logging on or off for a region.
245 *
246 * Turns dirty logging on or off for a specified client (display, migration).
247 * Only meaningful for RAM regions.
248 *
249 * @mr: the memory region being updated.
250 * @log: whether dirty logging is to be enabled or disabled.
251 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
252 * %DIRTY_MEMORY_VGA.
253 */
254void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
255
256/**
257 * memory_region_get_dirty: Check whether a page is dirty for a specified
258 * client.
259 *
260 * Checks whether a page has been written to since the last
261 * call to memory_region_reset_dirty() with the same @client. Dirty logging
262 * must be enabled.
263 *
264 * @mr: the memory region being queried.
265 * @addr: the address (relative to the start of the region) being queried.
266 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
267 * %DIRTY_MEMORY_VGA.
268 */
269bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
270 unsigned client);
271
272/**
273 * memory_region_set_dirty: Mark a page as dirty in a memory region.
274 *
275 * Marks a page as dirty, after it has been dirtied outside guest code.
276 *
277 * @mr: the memory region being queried.
278 * @addr: the address (relative to the start of the region) being dirtied.
279 */
280void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr);
281
282/**
283 * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
284 * any external TLBs (e.g. kvm)
285 *
286 * Flushes dirty information from accelerators such as kvm and vhost-net
287 * and makes it available to users of the memory API.
288 *
289 * @mr: the region being flushed.
290 */
291void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
292
293/**
294 * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
295 * client.
296 *
297 * Marks a range of pages as no longer dirty.
298 *
299 * @mr: the region being updated.
300 * @addr: the start of the subrange being cleaned.
301 * @size: the size of the subrange being cleaned.
302 * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
303 * %DIRTY_MEMORY_VGA.
304 */
305void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
306 target_phys_addr_t size, unsigned client);
307
308/**
309 * memory_region_set_readonly: Turn a memory region read-only (or read-write)
310 *
311 * Allows a memory region to be marked as read-only (turning it into a ROM).
312 * only useful on RAM regions.
313 *
314 * @mr: the region being updated.
315 * @readonly: whether rhe region is to be ROM or RAM.
316 */
317void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
318
319/**
320 * memory_region_set_coalescing: Enable memory coalescing for the region.
321 *
322 * Enabled writes to a region to be queued for later processing. MMIO ->write
323 * callbacks may be delayed until a non-coalesced MMIO is issued.
324 * Only useful for IO regions. Roughly similar to write-combining hardware.
325 *
326 * @mr: the memory region to be write coalesced
327 */
328void memory_region_set_coalescing(MemoryRegion *mr);
329
330/**
331 * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
332 * a region.
333 *
334 * Like memory_region_set_coalescing(), but works on a sub-range of a region.
335 * Multiple calls can be issued coalesced disjoint ranges.
336 *
337 * @mr: the memory region to be updated.
338 * @offset: the start of the range within the region to be coalesced.
339 * @size: the size of the subrange to be coalesced.
340 */
341void memory_region_add_coalescing(MemoryRegion *mr,
342 target_phys_addr_t offset,
343 uint64_t size);
344
345/**
346 * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
347 *
348 * Disables any coalescing caused by memory_region_set_coalescing() or
349 * memory_region_add_coalescing(). Roughly equivalent to uncacheble memory
350 * hardware.
351 *
352 * @mr: the memory region to be updated.
353 */
354void memory_region_clear_coalescing(MemoryRegion *mr);
355
356/**
357 * memory_region_add_subregion: Add a sub-region to a container.
358 *
359 * Adds a sub-region at @offset. The sub-region may not overlap with other
360 * subregions (except for those explicitly marked as overlapping). A region
361 * may only be added once as a subregion (unless removed with
362 * memory_region_del_subregion()); use memory_region_init_alias() if you
363 * want a region to be a subregion in multiple locations.
364 *
365 * @mr: the region to contain the new subregion; must be a container
366 * initialized with memory_region_init().
367 * @offset: the offset relative to @mr where @subregion is added.
368 * @subregion: the subregion to be added.
369 */
370void memory_region_add_subregion(MemoryRegion *mr,
371 target_phys_addr_t offset,
372 MemoryRegion *subregion);
373/**
374 * memory_region_add_subregion: Add a sub-region to a container, with overlap.
375 *
376 * Adds a sub-region at @offset. The sub-region may overlap with other
377 * subregions. Conflicts are resolved by having a higher @priority hide a
378 * lower @priority. Subregions without priority are taken as @priority 0.
379 * A region may only be added once as a subregion (unless removed with
380 * memory_region_del_subregion()); use memory_region_init_alias() if you
381 * want a region to be a subregion in multiple locations.
382 *
383 * @mr: the region to contain the new subregion; must be a container
384 * initialized with memory_region_init().
385 * @offset: the offset relative to @mr where @subregion is added.
386 * @subregion: the subregion to be added.
387 * @priority: used for resolving overlaps; highest priority wins.
388 */
389void memory_region_add_subregion_overlap(MemoryRegion *mr,
390 target_phys_addr_t offset,
391 MemoryRegion *subregion,
392 unsigned priority);
393/**
394 * memory_region_del_subregion: Remove a subregion.
395 *
396 * Removes a subregion from its container.
397 *
398 * @mr: the container to be updated.
399 * @subregion: the region being removed; must be a current subregion of @mr.
400 */
401void memory_region_del_subregion(MemoryRegion *mr,
402 MemoryRegion *subregion);
403
404#endif
405
406#endif