2 * Copyright 2008 IBM Corporation
4 * Copyright 2011 Intel Corporation
5 * Copyright 2016 Veertu, Inc.
6 * Copyright 2017 The Android Open Source Project
8 * QEMU Hypervisor.framework support
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of version 2 of the GNU General Public
12 * License as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, see <http://www.gnu.org/licenses/>.
22 * This file contain code under public domain from the hvdos project:
23 * https://github.com/mist64/hvdos
25 * Parts Copyright (c) 2011 NetApp, Inc.
26 * All rights reserved.
28 * Redistribution and use in source and binary forms, with or without
29 * modification, are permitted provided that the following conditions
31 * 1. Redistributions of source code must retain the above copyright
32 * notice, this list of conditions and the following disclaimer.
33 * 2. Redistributions in binary form must reproduce the above copyright
34 * notice, this list of conditions and the following disclaimer in the
35 * documentation and/or other materials provided with the distribution.
37 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
38 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
40 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
42 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
43 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
45 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
46 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 #include "qemu/osdep.h"
51 #include "qemu/error-report.h"
52 #include "qemu/main-loop.h"
53 #include "exec/address-spaces.h"
54 #include "exec/exec-all.h"
55 #include "sysemu/cpus.h"
56 #include "sysemu/hvf.h"
57 #include "sysemu/hvf_int.h"
58 #include "sysemu/runstate.h"
59 #include "qemu/guest-random.h"
61 #include "hvf-accel-ops.h"
67 hvf_slot
*hvf_find_overlap_slot(uint64_t start
, uint64_t size
)
71 for (x
= 0; x
< hvf_state
->num_slots
; ++x
) {
72 slot
= &hvf_state
->slots
[x
];
73 if (slot
->size
&& start
< (slot
->start
+ slot
->size
) &&
74 (start
+ size
) > slot
->start
) {
88 struct mac_slot mac_slots
[32];
90 static int do_hvf_set_memory(hvf_slot
*slot
, hv_memory_flags_t flags
)
92 struct mac_slot
*macslot
;
95 macslot
= &mac_slots
[slot
->slot_id
];
97 if (macslot
->present
) {
98 if (macslot
->size
!= slot
->size
) {
100 ret
= hv_vm_unmap(macslot
->gpa_start
, macslot
->size
);
109 macslot
->present
= 1;
110 macslot
->gpa_start
= slot
->start
;
111 macslot
->size
= slot
->size
;
112 ret
= hv_vm_map((hv_uvaddr_t
)slot
->mem
, slot
->start
, slot
->size
, flags
);
117 void hvf_set_phys_mem(MemoryRegionSection
*section
, bool add
)
120 MemoryRegion
*area
= section
->mr
;
121 bool writeable
= !area
->readonly
&& !area
->rom_device
;
122 hv_memory_flags_t flags
;
124 if (!memory_region_is_ram(area
)) {
127 } else if (!memory_region_is_romd(area
)) {
129 * If the memory device is not in romd_mode, then we actually want
130 * to remove the hvf memory slot so all accesses will trap.
136 mem
= hvf_find_overlap_slot(
137 section
->offset_within_address_space
,
138 int128_get64(section
->size
));
141 if (mem
->size
== int128_get64(section
->size
) &&
142 mem
->start
== section
->offset_within_address_space
&&
143 mem
->mem
== (memory_region_get_ram_ptr(area
) +
144 section
->offset_within_region
)) {
145 return; /* Same region was attempted to register, go away. */
149 /* Region needs to be reset. set the size to 0 and remap it. */
152 if (do_hvf_set_memory(mem
, 0)) {
153 error_report("Failed to reset overlapping slot");
162 if (area
->readonly
||
163 (!memory_region_is_ram(area
) && memory_region_is_romd(area
))) {
164 flags
= HV_MEMORY_READ
| HV_MEMORY_EXEC
;
166 flags
= HV_MEMORY_READ
| HV_MEMORY_WRITE
| HV_MEMORY_EXEC
;
169 /* Now make a new slot. */
172 for (x
= 0; x
< hvf_state
->num_slots
; ++x
) {
173 mem
= &hvf_state
->slots
[x
];
179 if (x
== hvf_state
->num_slots
) {
180 error_report("No free slots");
184 mem
->size
= int128_get64(section
->size
);
185 mem
->mem
= memory_region_get_ram_ptr(area
) + section
->offset_within_region
;
186 mem
->start
= section
->offset_within_address_space
;
189 if (do_hvf_set_memory(mem
, flags
)) {
190 error_report("Error registering new memory slot");
195 static void do_hvf_cpu_synchronize_state(CPUState
*cpu
, run_on_cpu_data arg
)
197 if (!cpu
->vcpu_dirty
) {
198 hvf_get_registers(cpu
);
199 cpu
->vcpu_dirty
= true;
203 void hvf_cpu_synchronize_state(CPUState
*cpu
)
205 if (!cpu
->vcpu_dirty
) {
206 run_on_cpu(cpu
, do_hvf_cpu_synchronize_state
, RUN_ON_CPU_NULL
);
210 static void do_hvf_cpu_synchronize_post_reset(CPUState
*cpu
,
213 hvf_put_registers(cpu
);
214 cpu
->vcpu_dirty
= false;
217 void hvf_cpu_synchronize_post_reset(CPUState
*cpu
)
219 run_on_cpu(cpu
, do_hvf_cpu_synchronize_post_reset
, RUN_ON_CPU_NULL
);
222 static void do_hvf_cpu_synchronize_post_init(CPUState
*cpu
,
225 hvf_put_registers(cpu
);
226 cpu
->vcpu_dirty
= false;
229 void hvf_cpu_synchronize_post_init(CPUState
*cpu
)
231 run_on_cpu(cpu
, do_hvf_cpu_synchronize_post_init
, RUN_ON_CPU_NULL
);
234 static void do_hvf_cpu_synchronize_pre_loadvm(CPUState
*cpu
,
237 cpu
->vcpu_dirty
= true;
240 void hvf_cpu_synchronize_pre_loadvm(CPUState
*cpu
)
242 run_on_cpu(cpu
, do_hvf_cpu_synchronize_pre_loadvm
, RUN_ON_CPU_NULL
);
245 static void hvf_set_dirty_tracking(MemoryRegionSection
*section
, bool on
)
249 slot
= hvf_find_overlap_slot(
250 section
->offset_within_address_space
,
251 int128_get64(section
->size
));
253 /* protect region against writes; begin tracking it */
255 slot
->flags
|= HVF_SLOT_LOG
;
256 hv_vm_protect((hv_gpaddr_t
)slot
->start
, (size_t)slot
->size
,
258 /* stop tracking region*/
260 slot
->flags
&= ~HVF_SLOT_LOG
;
261 hv_vm_protect((hv_gpaddr_t
)slot
->start
, (size_t)slot
->size
,
262 HV_MEMORY_READ
| HV_MEMORY_WRITE
);
266 static void hvf_log_start(MemoryListener
*listener
,
267 MemoryRegionSection
*section
, int old
, int new)
273 hvf_set_dirty_tracking(section
, 1);
276 static void hvf_log_stop(MemoryListener
*listener
,
277 MemoryRegionSection
*section
, int old
, int new)
283 hvf_set_dirty_tracking(section
, 0);
286 static void hvf_log_sync(MemoryListener
*listener
,
287 MemoryRegionSection
*section
)
290 * sync of dirty pages is handled elsewhere; just make sure we keep
291 * tracking the region.
293 hvf_set_dirty_tracking(section
, 1);
296 static void hvf_region_add(MemoryListener
*listener
,
297 MemoryRegionSection
*section
)
299 hvf_set_phys_mem(section
, true);
302 static void hvf_region_del(MemoryListener
*listener
,
303 MemoryRegionSection
*section
)
305 hvf_set_phys_mem(section
, false);
308 static MemoryListener hvf_memory_listener
= {
310 .region_add
= hvf_region_add
,
311 .region_del
= hvf_region_del
,
312 .log_start
= hvf_log_start
,
313 .log_stop
= hvf_log_stop
,
314 .log_sync
= hvf_log_sync
,
317 static void dummy_signal(int sig
)
323 static int hvf_accel_init(MachineState
*ms
)
329 ret
= hv_vm_create(HV_VM_DEFAULT
);
332 s
= g_new0(HVFState
, 1);
335 for (x
= 0; x
< s
->num_slots
; ++x
) {
336 s
->slots
[x
].size
= 0;
337 s
->slots
[x
].slot_id
= x
;
341 memory_listener_register(&hvf_memory_listener
, &address_space_memory
);
345 static void hvf_accel_class_init(ObjectClass
*oc
, void *data
)
347 AccelClass
*ac
= ACCEL_CLASS(oc
);
349 ac
->init_machine
= hvf_accel_init
;
350 ac
->allowed
= &hvf_allowed
;
353 static const TypeInfo hvf_accel_type
= {
354 .name
= TYPE_HVF_ACCEL
,
355 .parent
= TYPE_ACCEL
,
356 .class_init
= hvf_accel_class_init
,
359 static void hvf_type_init(void)
361 type_register_static(&hvf_accel_type
);
364 type_init(hvf_type_init
);
367 * The HVF-specific vCPU thread function. This one should only run when the host
368 * CPU supports the VMX "unrestricted guest" feature.
370 static void *hvf_cpu_thread_fn(void *arg
)
376 assert(hvf_enabled());
378 rcu_register_thread();
380 qemu_mutex_lock_iothread();
381 qemu_thread_get_self(cpu
->thread
);
383 cpu
->thread_id
= qemu_get_thread_id();
389 /* signal CPU creation */
390 cpu_thread_signal_created(cpu
);
391 qemu_guest_random_seed_thread_part2(cpu
->random_seed
);
394 if (cpu_can_run(cpu
)) {
395 r
= hvf_vcpu_exec(cpu
);
396 if (r
== EXCP_DEBUG
) {
397 cpu_handle_guest_debug(cpu
);
400 qemu_wait_io_event(cpu
);
401 } while (!cpu
->unplug
|| cpu_can_run(cpu
));
403 hvf_vcpu_destroy(cpu
);
404 cpu_thread_signal_destroyed(cpu
);
405 qemu_mutex_unlock_iothread();
406 rcu_unregister_thread();
410 static void hvf_start_vcpu_thread(CPUState
*cpu
)
412 char thread_name
[VCPU_THREAD_NAME_SIZE
];
415 * HVF currently does not support TCG, and only runs in
416 * unrestricted-guest mode.
418 assert(hvf_enabled());
420 cpu
->thread
= g_malloc0(sizeof(QemuThread
));
421 cpu
->halt_cond
= g_malloc0(sizeof(QemuCond
));
422 qemu_cond_init(cpu
->halt_cond
);
424 snprintf(thread_name
, VCPU_THREAD_NAME_SIZE
, "CPU %d/HVF",
426 qemu_thread_create(cpu
->thread
, thread_name
, hvf_cpu_thread_fn
,
427 cpu
, QEMU_THREAD_JOINABLE
);
430 static void hvf_accel_ops_class_init(ObjectClass
*oc
, void *data
)
432 AccelOpsClass
*ops
= ACCEL_OPS_CLASS(oc
);
434 ops
->create_vcpu_thread
= hvf_start_vcpu_thread
;
436 ops
->synchronize_post_reset
= hvf_cpu_synchronize_post_reset
;
437 ops
->synchronize_post_init
= hvf_cpu_synchronize_post_init
;
438 ops
->synchronize_state
= hvf_cpu_synchronize_state
;
439 ops
->synchronize_pre_loadvm
= hvf_cpu_synchronize_pre_loadvm
;
441 static const TypeInfo hvf_accel_ops_type
= {
442 .name
= ACCEL_OPS_NAME("hvf"),
444 .parent
= TYPE_ACCEL_OPS
,
445 .class_init
= hvf_accel_ops_class_init
,
448 static void hvf_accel_ops_register_types(void)
450 type_register_static(&hvf_accel_ops_type
);
452 type_init(hvf_accel_ops_register_types
);