]> git.proxmox.com Git - qemu.git/blob - include/exec/memory-internal.h
Merge remote-tracking branch 'aneesh/for-upstream' into staging
[qemu.git] / include / exec / memory-internal.h
1 /*
2 * Declarations for obsolete exec.c functions
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or
10 * later. See the COPYING file in the top-level directory.
11 *
12 */
13
14 /*
15 * This header is for use by exec.c and memory.c ONLY. Do not include it.
16 * The functions declared here will be removed soon.
17 */
18
19 #ifndef MEMORY_INTERNAL_H
20 #define MEMORY_INTERNAL_H
21
22 #ifndef CONFIG_USER_ONLY
23 #include "hw/xen/xen.h"
24
25 typedef struct PhysPageEntry PhysPageEntry;
26
27 struct PhysPageEntry {
28 uint16_t is_leaf : 1;
29 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
30 uint16_t ptr : 15;
31 };
32
33 typedef struct AddressSpaceDispatch AddressSpaceDispatch;
34
35 struct AddressSpaceDispatch {
36 /* This is a multi-level map on the physical address space.
37 * The bottom level has pointers to MemoryRegionSections.
38 */
39 PhysPageEntry phys_map;
40 MemoryListener listener;
41 };
42
43 void address_space_init_dispatch(AddressSpace *as);
44 void address_space_destroy_dispatch(AddressSpace *as);
45
46 ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
47 MemoryRegion *mr);
48 ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr);
49 void *qemu_get_ram_ptr(ram_addr_t addr);
50 void qemu_ram_free(ram_addr_t addr);
51 void qemu_ram_free_from_ptr(ram_addr_t addr);
52
53 #define VGA_DIRTY_FLAG 0x01
54 #define CODE_DIRTY_FLAG 0x02
55 #define MIGRATION_DIRTY_FLAG 0x08
56
57 static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
58 {
59 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
60 }
61
62 /* read dirty bit (return 0 or 1) */
63 static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
64 {
65 return cpu_physical_memory_get_dirty_flags(addr) == 0xff;
66 }
67
68 static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
69 ram_addr_t length,
70 int dirty_flags)
71 {
72 int ret = 0;
73 ram_addr_t addr, end;
74
75 end = TARGET_PAGE_ALIGN(start + length);
76 start &= TARGET_PAGE_MASK;
77 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
78 ret |= cpu_physical_memory_get_dirty_flags(addr) & dirty_flags;
79 }
80 return ret;
81 }
82
83 static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
84 int dirty_flags)
85 {
86 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
87 }
88
89 static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
90 {
91 cpu_physical_memory_set_dirty_flags(addr, 0xff);
92 }
93
94 static inline int cpu_physical_memory_clear_dirty_flags(ram_addr_t addr,
95 int dirty_flags)
96 {
97 int mask = ~dirty_flags;
98
99 return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask;
100 }
101
102 static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
103 ram_addr_t length,
104 int dirty_flags)
105 {
106 ram_addr_t addr, end;
107
108 end = TARGET_PAGE_ALIGN(start + length);
109 start &= TARGET_PAGE_MASK;
110 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
111 cpu_physical_memory_set_dirty_flags(addr, dirty_flags);
112 }
113 xen_modified_memory(addr, length);
114 }
115
116 static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
117 ram_addr_t length,
118 int dirty_flags)
119 {
120 ram_addr_t addr, end;
121
122 end = TARGET_PAGE_ALIGN(start + length);
123 start &= TARGET_PAGE_MASK;
124 for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
125 cpu_physical_memory_clear_dirty_flags(addr, dirty_flags);
126 }
127 }
128
129 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
130 int dirty_flags);
131
132 extern const IORangeOps memory_region_iorange_ops;
133
134 #endif
135
136 #endif