]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/hax-mem.c
target/i386: Add Intel HAX files
[mirror_qemu.git] / target / i386 / hax-mem.c
1 /*
2 * HAX memory mapping operations
3 *
4 * Copyright (c) 2015-16 Intel Corporation
5 * Copyright 2016 Google, Inc.
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2. See
8 * the COPYING file in the top-level directory.
9 */
10
11 #include "qemu/osdep.h"
12 #include "cpu.h"
13 #include "exec/address-spaces.h"
14 #include "exec/exec-all.h"
15
16 #include "target/i386/hax-i386.h"
17 #include "qemu/queue.h"
18
19 #define DEBUG_HAX_MEM 0
20
21 #define DPRINTF(fmt, ...) \
22 do { \
23 if (DEBUG_HAX_MEM) { \
24 fprintf(stdout, fmt, ## __VA_ARGS__); \
25 } \
26 } while (0)
27
28 /**
29 * HAXMapping: describes a pending guest physical memory mapping
30 *
31 * @start_pa: a guest physical address marking the start of the region; must be
32 * page-aligned
33 * @size: a guest physical address marking the end of the region; must be
34 * page-aligned
35 * @host_va: the host virtual address of the start of the mapping
36 * @flags: mapping parameters e.g. HAX_RAM_INFO_ROM or HAX_RAM_INFO_INVALID
37 * @entry: additional fields for linking #HAXMapping instances together
38 */
39 typedef struct HAXMapping {
40 uint64_t start_pa;
41 uint32_t size;
42 uint64_t host_va;
43 int flags;
44 QTAILQ_ENTRY(HAXMapping) entry;
45 } HAXMapping;
46
47 /*
48 * A doubly-linked list (actually a tail queue) of the pending page mappings
49 * for the ongoing memory transaction.
50 *
51 * It is used to optimize the number of page mapping updates done through the
52 * kernel module. For example, it's effective when a driver is digging an MMIO
53 * hole inside an existing memory mapping. It will get a deletion of the whole
54 * region, then the addition of the 2 remaining RAM areas around the hole and
55 * finally the memory transaction commit. During the commit, it will effectively
56 * send to the kernel only the removal of the pages from the MMIO hole after
57 * having computed locally the result of the deletion and additions.
58 */
59 static QTAILQ_HEAD(HAXMappingListHead, HAXMapping) mappings =
60 QTAILQ_HEAD_INITIALIZER(mappings);
61
62 /**
63 * hax_mapping_dump_list: dumps @mappings to stdout (for debugging)
64 */
65 static void hax_mapping_dump_list(void)
66 {
67 HAXMapping *entry;
68
69 DPRINTF("%s updates:\n", __func__);
70 QTAILQ_FOREACH(entry, &mappings, entry) {
71 DPRINTF("\t%c 0x%016" PRIx64 "->0x%016" PRIx64 " VA 0x%016" PRIx64
72 "%s\n", entry->flags & HAX_RAM_INFO_INVALID ? '-' : '+',
73 entry->start_pa, entry->start_pa + entry->size, entry->host_va,
74 entry->flags & HAX_RAM_INFO_ROM ? " ROM" : "");
75 }
76 }
77
78 static void hax_insert_mapping_before(HAXMapping *next, uint64_t start_pa,
79 uint32_t size, uint64_t host_va,
80 uint8_t flags)
81 {
82 HAXMapping *entry;
83
84 entry = g_malloc0(sizeof(*entry));
85 entry->start_pa = start_pa;
86 entry->size = size;
87 entry->host_va = host_va;
88 entry->flags = flags;
89 if (!next) {
90 QTAILQ_INSERT_TAIL(&mappings, entry, entry);
91 } else {
92 QTAILQ_INSERT_BEFORE(next, entry, entry);
93 }
94 }
95
96 static bool hax_mapping_is_opposite(HAXMapping *entry, uint64_t host_va,
97 uint8_t flags)
98 {
99 /* removed then added without change for the read-only flag */
100 bool nop_flags = (entry->flags ^ flags) == HAX_RAM_INFO_INVALID;
101
102 return (entry->host_va == host_va) && nop_flags;
103 }
104
105 static void hax_update_mapping(uint64_t start_pa, uint32_t size,
106 uint64_t host_va, uint8_t flags)
107 {
108 uint64_t end_pa = start_pa + size;
109 uint32_t chunk_sz;
110 HAXMapping *entry, *next;
111
112 QTAILQ_FOREACH_SAFE(entry, &mappings, entry, next) {
113 if (start_pa >= entry->start_pa + entry->size) {
114 continue;
115 }
116 if (start_pa < entry->start_pa) {
117 chunk_sz = end_pa <= entry->start_pa ? size
118 : entry->start_pa - start_pa;
119 hax_insert_mapping_before(entry, start_pa, chunk_sz,
120 host_va, flags);
121 start_pa += chunk_sz;
122 host_va += chunk_sz;
123 size -= chunk_sz;
124 }
125 chunk_sz = MIN(size, entry->size);
126 if (chunk_sz) {
127 bool nop = hax_mapping_is_opposite(entry, host_va, flags);
128 bool partial = chunk_sz < entry->size;
129 if (partial) {
130 /* remove the beginning of the existing chunk */
131 entry->start_pa += chunk_sz;
132 entry->host_va += chunk_sz;
133 entry->size -= chunk_sz;
134 if (!nop) {
135 hax_insert_mapping_before(entry, start_pa, chunk_sz,
136 host_va, flags);
137 }
138 } else { /* affects the full mapping entry */
139 if (nop) { /* no change to this mapping, remove it */
140 QTAILQ_REMOVE(&mappings, entry, entry);
141 g_free(entry);
142 } else { /* update mapping properties */
143 entry->host_va = host_va;
144 entry->flags = flags;
145 }
146 }
147 start_pa += chunk_sz;
148 host_va += chunk_sz;
149 size -= chunk_sz;
150 }
151 if (!size) { /* we are done */
152 break;
153 }
154 }
155 if (size) { /* add the leftover */
156 hax_insert_mapping_before(NULL, start_pa, size, host_va, flags);
157 }
158 }
159
160 static void hax_process_section(MemoryRegionSection *section, uint8_t flags)
161 {
162 MemoryRegion *mr = section->mr;
163 hwaddr start_pa = section->offset_within_address_space;
164 ram_addr_t size = int128_get64(section->size);
165 unsigned int delta;
166 uint64_t host_va;
167
168 /* We only care about RAM pages */
169 if (!memory_region_is_ram(mr)) {
170 return;
171 }
172
173 /* Adjust start_pa and size so that they are page-aligned. (Cf
174 * kvm_set_phys_mem() in kvm-all.c).
175 */
176 delta = qemu_real_host_page_size - (start_pa & ~qemu_real_host_page_mask);
177 delta &= ~qemu_real_host_page_mask;
178 if (delta > size) {
179 return;
180 }
181 start_pa += delta;
182 size -= delta;
183 size &= qemu_real_host_page_mask;
184 if (!size || (start_pa & ~qemu_real_host_page_mask)) {
185 return;
186 }
187
188 host_va = (uintptr_t)memory_region_get_ram_ptr(mr)
189 + section->offset_within_region + delta;
190 if (memory_region_is_rom(section->mr)) {
191 flags |= HAX_RAM_INFO_ROM;
192 }
193
194 /* the kernel module interface uses 32-bit sizes (but we could split...) */
195 g_assert(size <= UINT32_MAX);
196
197 hax_update_mapping(start_pa, size, host_va, flags);
198 }
199
200 static void hax_region_add(MemoryListener *listener,
201 MemoryRegionSection *section)
202 {
203 memory_region_ref(section->mr);
204 hax_process_section(section, 0);
205 }
206
207 static void hax_region_del(MemoryListener *listener,
208 MemoryRegionSection *section)
209 {
210 hax_process_section(section, HAX_RAM_INFO_INVALID);
211 memory_region_unref(section->mr);
212 }
213
214 static void hax_transaction_begin(MemoryListener *listener)
215 {
216 g_assert(QTAILQ_EMPTY(&mappings));
217 }
218
219 static void hax_transaction_commit(MemoryListener *listener)
220 {
221 if (!QTAILQ_EMPTY(&mappings)) {
222 HAXMapping *entry, *next;
223
224 if (DEBUG_HAX_MEM) {
225 hax_mapping_dump_list();
226 }
227 QTAILQ_FOREACH_SAFE(entry, &mappings, entry, next) {
228 if (entry->flags & HAX_RAM_INFO_INVALID) {
229 /* for unmapping, put the values expected by the kernel */
230 entry->flags = HAX_RAM_INFO_INVALID;
231 entry->host_va = 0;
232 }
233 if (hax_set_ram(entry->start_pa, entry->size,
234 entry->host_va, entry->flags)) {
235 fprintf(stderr, "%s: Failed mapping @0x%016" PRIx64 "+0x%"
236 PRIx32 " flags %02x\n", __func__, entry->start_pa,
237 entry->size, entry->flags);
238 }
239 QTAILQ_REMOVE(&mappings, entry, entry);
240 g_free(entry);
241 }
242 }
243 }
244
245 /* currently we fake the dirty bitmap sync, always dirty */
246 static void hax_log_sync(MemoryListener *listener,
247 MemoryRegionSection *section)
248 {
249 MemoryRegion *mr = section->mr;
250
251 if (!memory_region_is_ram(mr)) {
252 /* Skip MMIO regions */
253 return;
254 }
255
256 memory_region_set_dirty(mr, 0, int128_get64(section->size));
257 }
258
259 static MemoryListener hax_memory_listener = {
260 .begin = hax_transaction_begin,
261 .commit = hax_transaction_commit,
262 .region_add = hax_region_add,
263 .region_del = hax_region_del,
264 .log_sync = hax_log_sync,
265 .priority = 10,
266 };
267
268 static void hax_ram_block_added(RAMBlockNotifier *n, void *host, size_t size)
269 {
270 /*
271 * In HAX, QEMU allocates the virtual address, and HAX kernel
272 * populates the memory with physical memory. Currently we have no
273 * paging, so user should make sure enough free memory in advance.
274 */
275 if (hax_populate_ram((uint64_t)(uintptr_t)host, size) < 0) {
276 fprintf(stderr, "HAX failed to populate RAM");
277 abort();
278 }
279 }
280
281 static struct RAMBlockNotifier hax_ram_notifier = {
282 .ram_block_added = hax_ram_block_added,
283 };
284
285 void hax_memory_init(void)
286 {
287 ram_block_notifier_add(&hax_ram_notifier);
288 memory_listener_register(&hax_memory_listener, &address_space_memory);
289 }