]> git.proxmox.com Git - mirror_qemu.git/blob - accel/hvf/hvf-accel-ops.c
hvf: Make hvf_set_phys_mem() static
[mirror_qemu.git] / accel / hvf / hvf-accel-ops.c
1 /*
2 * Copyright 2008 IBM Corporation
3 * 2008 Red Hat, Inc.
4 * Copyright 2011 Intel Corporation
5 * Copyright 2016 Veertu, Inc.
6 * Copyright 2017 The Android Open Source Project
7 *
8 * QEMU Hypervisor.framework support
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 *
22 * This file contain code under public domain from the hvdos project:
23 * https://github.com/mist64/hvdos
24 *
25 * Parts Copyright (c) 2011 NetApp, Inc.
26 * All rights reserved.
27 *
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
30 * are met:
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
36 *
37 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
38 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
42 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
43 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
45 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
46 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
47 * SUCH DAMAGE.
48 */
49
50 #include "qemu/osdep.h"
51 #include "qemu/error-report.h"
52 #include "qemu/main-loop.h"
53 #include "exec/address-spaces.h"
54 #include "exec/exec-all.h"
55 #include "sysemu/cpus.h"
56 #include "sysemu/hvf.h"
57 #include "sysemu/hvf_int.h"
58 #include "sysemu/runstate.h"
59 #include "qemu/guest-random.h"
60
61 #include "hvf-accel-ops.h"
62
63 HVFState *hvf_state;
64
65 /* Memory slots */
66
67 hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
68 {
69 hvf_slot *slot;
70 int x;
71 for (x = 0; x < hvf_state->num_slots; ++x) {
72 slot = &hvf_state->slots[x];
73 if (slot->size && start < (slot->start + slot->size) &&
74 (start + size) > slot->start) {
75 return slot;
76 }
77 }
78 return NULL;
79 }
80
81 struct mac_slot {
82 int present;
83 uint64_t size;
84 uint64_t gpa_start;
85 uint64_t gva;
86 };
87
88 struct mac_slot mac_slots[32];
89
90 static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags)
91 {
92 struct mac_slot *macslot;
93 hv_return_t ret;
94
95 macslot = &mac_slots[slot->slot_id];
96
97 if (macslot->present) {
98 if (macslot->size != slot->size) {
99 macslot->present = 0;
100 ret = hv_vm_unmap(macslot->gpa_start, macslot->size);
101 assert_hvf_ok(ret);
102 }
103 }
104
105 if (!slot->size) {
106 return 0;
107 }
108
109 macslot->present = 1;
110 macslot->gpa_start = slot->start;
111 macslot->size = slot->size;
112 ret = hv_vm_map((hv_uvaddr_t)slot->mem, slot->start, slot->size, flags);
113 assert_hvf_ok(ret);
114 return 0;
115 }
116
117 static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
118 {
119 hvf_slot *mem;
120 MemoryRegion *area = section->mr;
121 bool writeable = !area->readonly && !area->rom_device;
122 hv_memory_flags_t flags;
123
124 if (!memory_region_is_ram(area)) {
125 if (writeable) {
126 return;
127 } else if (!memory_region_is_romd(area)) {
128 /*
129 * If the memory device is not in romd_mode, then we actually want
130 * to remove the hvf memory slot so all accesses will trap.
131 */
132 add = false;
133 }
134 }
135
136 mem = hvf_find_overlap_slot(
137 section->offset_within_address_space,
138 int128_get64(section->size));
139
140 if (mem && add) {
141 if (mem->size == int128_get64(section->size) &&
142 mem->start == section->offset_within_address_space &&
143 mem->mem == (memory_region_get_ram_ptr(area) +
144 section->offset_within_region)) {
145 return; /* Same region was attempted to register, go away. */
146 }
147 }
148
149 /* Region needs to be reset. set the size to 0 and remap it. */
150 if (mem) {
151 mem->size = 0;
152 if (do_hvf_set_memory(mem, 0)) {
153 error_report("Failed to reset overlapping slot");
154 abort();
155 }
156 }
157
158 if (!add) {
159 return;
160 }
161
162 if (area->readonly ||
163 (!memory_region_is_ram(area) && memory_region_is_romd(area))) {
164 flags = HV_MEMORY_READ | HV_MEMORY_EXEC;
165 } else {
166 flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
167 }
168
169 /* Now make a new slot. */
170 int x;
171
172 for (x = 0; x < hvf_state->num_slots; ++x) {
173 mem = &hvf_state->slots[x];
174 if (!mem->size) {
175 break;
176 }
177 }
178
179 if (x == hvf_state->num_slots) {
180 error_report("No free slots");
181 abort();
182 }
183
184 mem->size = int128_get64(section->size);
185 mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;
186 mem->start = section->offset_within_address_space;
187 mem->region = area;
188
189 if (do_hvf_set_memory(mem, flags)) {
190 error_report("Error registering new memory slot");
191 abort();
192 }
193 }
194
195 static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
196 {
197 if (!cpu->vcpu_dirty) {
198 hvf_get_registers(cpu);
199 cpu->vcpu_dirty = true;
200 }
201 }
202
203 void hvf_cpu_synchronize_state(CPUState *cpu)
204 {
205 if (!cpu->vcpu_dirty) {
206 run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
207 }
208 }
209
210 static void do_hvf_cpu_synchronize_post_reset(CPUState *cpu,
211 run_on_cpu_data arg)
212 {
213 hvf_put_registers(cpu);
214 cpu->vcpu_dirty = false;
215 }
216
217 void hvf_cpu_synchronize_post_reset(CPUState *cpu)
218 {
219 run_on_cpu(cpu, do_hvf_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
220 }
221
222 static void do_hvf_cpu_synchronize_post_init(CPUState *cpu,
223 run_on_cpu_data arg)
224 {
225 hvf_put_registers(cpu);
226 cpu->vcpu_dirty = false;
227 }
228
229 void hvf_cpu_synchronize_post_init(CPUState *cpu)
230 {
231 run_on_cpu(cpu, do_hvf_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
232 }
233
234 static void do_hvf_cpu_synchronize_pre_loadvm(CPUState *cpu,
235 run_on_cpu_data arg)
236 {
237 cpu->vcpu_dirty = true;
238 }
239
240 void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
241 {
242 run_on_cpu(cpu, do_hvf_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
243 }
244
245 static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
246 {
247 hvf_slot *slot;
248
249 slot = hvf_find_overlap_slot(
250 section->offset_within_address_space,
251 int128_get64(section->size));
252
253 /* protect region against writes; begin tracking it */
254 if (on) {
255 slot->flags |= HVF_SLOT_LOG;
256 hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
257 HV_MEMORY_READ);
258 /* stop tracking region*/
259 } else {
260 slot->flags &= ~HVF_SLOT_LOG;
261 hv_vm_protect((hv_gpaddr_t)slot->start, (size_t)slot->size,
262 HV_MEMORY_READ | HV_MEMORY_WRITE);
263 }
264 }
265
266 static void hvf_log_start(MemoryListener *listener,
267 MemoryRegionSection *section, int old, int new)
268 {
269 if (old != 0) {
270 return;
271 }
272
273 hvf_set_dirty_tracking(section, 1);
274 }
275
276 static void hvf_log_stop(MemoryListener *listener,
277 MemoryRegionSection *section, int old, int new)
278 {
279 if (new != 0) {
280 return;
281 }
282
283 hvf_set_dirty_tracking(section, 0);
284 }
285
286 static void hvf_log_sync(MemoryListener *listener,
287 MemoryRegionSection *section)
288 {
289 /*
290 * sync of dirty pages is handled elsewhere; just make sure we keep
291 * tracking the region.
292 */
293 hvf_set_dirty_tracking(section, 1);
294 }
295
296 static void hvf_region_add(MemoryListener *listener,
297 MemoryRegionSection *section)
298 {
299 hvf_set_phys_mem(section, true);
300 }
301
302 static void hvf_region_del(MemoryListener *listener,
303 MemoryRegionSection *section)
304 {
305 hvf_set_phys_mem(section, false);
306 }
307
308 static MemoryListener hvf_memory_listener = {
309 .priority = 10,
310 .region_add = hvf_region_add,
311 .region_del = hvf_region_del,
312 .log_start = hvf_log_start,
313 .log_stop = hvf_log_stop,
314 .log_sync = hvf_log_sync,
315 };
316
317 static void dummy_signal(int sig)
318 {
319 }
320
321 bool hvf_allowed;
322
323 static int hvf_accel_init(MachineState *ms)
324 {
325 int x;
326 hv_return_t ret;
327 HVFState *s;
328
329 ret = hv_vm_create(HV_VM_DEFAULT);
330 assert_hvf_ok(ret);
331
332 s = g_new0(HVFState, 1);
333
334 s->num_slots = 32;
335 for (x = 0; x < s->num_slots; ++x) {
336 s->slots[x].size = 0;
337 s->slots[x].slot_id = x;
338 }
339
340 hvf_state = s;
341 memory_listener_register(&hvf_memory_listener, &address_space_memory);
342 return 0;
343 }
344
345 static void hvf_accel_class_init(ObjectClass *oc, void *data)
346 {
347 AccelClass *ac = ACCEL_CLASS(oc);
348 ac->name = "HVF";
349 ac->init_machine = hvf_accel_init;
350 ac->allowed = &hvf_allowed;
351 }
352
353 static const TypeInfo hvf_accel_type = {
354 .name = TYPE_HVF_ACCEL,
355 .parent = TYPE_ACCEL,
356 .class_init = hvf_accel_class_init,
357 };
358
359 static void hvf_type_init(void)
360 {
361 type_register_static(&hvf_accel_type);
362 }
363
364 type_init(hvf_type_init);
365
366 /*
367 * The HVF-specific vCPU thread function. This one should only run when the host
368 * CPU supports the VMX "unrestricted guest" feature.
369 */
370 static void *hvf_cpu_thread_fn(void *arg)
371 {
372 CPUState *cpu = arg;
373
374 int r;
375
376 assert(hvf_enabled());
377
378 rcu_register_thread();
379
380 qemu_mutex_lock_iothread();
381 qemu_thread_get_self(cpu->thread);
382
383 cpu->thread_id = qemu_get_thread_id();
384 cpu->can_do_io = 1;
385 current_cpu = cpu;
386
387 hvf_init_vcpu(cpu);
388
389 /* signal CPU creation */
390 cpu_thread_signal_created(cpu);
391 qemu_guest_random_seed_thread_part2(cpu->random_seed);
392
393 do {
394 if (cpu_can_run(cpu)) {
395 r = hvf_vcpu_exec(cpu);
396 if (r == EXCP_DEBUG) {
397 cpu_handle_guest_debug(cpu);
398 }
399 }
400 qemu_wait_io_event(cpu);
401 } while (!cpu->unplug || cpu_can_run(cpu));
402
403 hvf_vcpu_destroy(cpu);
404 cpu_thread_signal_destroyed(cpu);
405 qemu_mutex_unlock_iothread();
406 rcu_unregister_thread();
407 return NULL;
408 }
409
410 static void hvf_start_vcpu_thread(CPUState *cpu)
411 {
412 char thread_name[VCPU_THREAD_NAME_SIZE];
413
414 /*
415 * HVF currently does not support TCG, and only runs in
416 * unrestricted-guest mode.
417 */
418 assert(hvf_enabled());
419
420 cpu->thread = g_malloc0(sizeof(QemuThread));
421 cpu->halt_cond = g_malloc0(sizeof(QemuCond));
422 qemu_cond_init(cpu->halt_cond);
423
424 snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
425 cpu->cpu_index);
426 qemu_thread_create(cpu->thread, thread_name, hvf_cpu_thread_fn,
427 cpu, QEMU_THREAD_JOINABLE);
428 }
429
430 static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
431 {
432 AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
433
434 ops->create_vcpu_thread = hvf_start_vcpu_thread;
435
436 ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset;
437 ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
438 ops->synchronize_state = hvf_cpu_synchronize_state;
439 ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm;
440 };
441 static const TypeInfo hvf_accel_ops_type = {
442 .name = ACCEL_OPS_NAME("hvf"),
443
444 .parent = TYPE_ACCEL_OPS,
445 .class_init = hvf_accel_ops_class_init,
446 .abstract = true,
447 };
448 static void hvf_accel_ops_register_types(void)
449 {
450 type_register_static(&hvf_accel_ops_type);
451 }
452 type_init(hvf_accel_ops_register_types);