]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/hax-posix.c
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
[mirror_qemu.git] / target / i386 / hax-posix.c
1 /*
2 * QEMU HAXM support
3 *
4 * Copyright (c) 2011 Intel Corporation
5 * Written by:
6 * Jiang Yunhong<yunhong.jiang@intel.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
10 *
11 */
12
13 /* HAX module interface - darwin version */
14 #include "qemu/osdep.h"
15 #include <sys/ioctl.h>
16
17 #include "target/i386/hax-i386.h"
18
19 hax_fd hax_mod_open(void)
20 {
21 int fd = open("/dev/HAX", O_RDWR);
22 if (fd == -1) {
23 fprintf(stderr, "Failed to open the hax module\n");
24 }
25
26 fcntl(fd, F_SETFD, FD_CLOEXEC);
27
28 return fd;
29 }
30
31 int hax_populate_ram(uint64_t va, uint64_t size)
32 {
33 int ret;
34
35 if (!hax_global.vm || !hax_global.vm->fd) {
36 fprintf(stderr, "Allocate memory before vm create?\n");
37 return -EINVAL;
38 }
39
40 if (hax_global.supports_64bit_ramblock) {
41 struct hax_ramblock_info ramblock = {
42 .start_va = va,
43 .size = size,
44 .reserved = 0
45 };
46
47 ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_ADD_RAMBLOCK, &ramblock);
48 } else {
49 struct hax_alloc_ram_info info = {
50 .size = (uint32_t)size,
51 .pad = 0,
52 .va = va
53 };
54
55 ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_ALLOC_RAM, &info);
56 }
57 if (ret < 0) {
58 fprintf(stderr, "Failed to register RAM block: ret=%d, va=0x%" PRIx64
59 ", size=0x%" PRIx64 ", method=%s\n", ret, va, size,
60 hax_global.supports_64bit_ramblock ? "new" : "legacy");
61 return ret;
62 }
63 return 0;
64 }
65
66 int hax_set_ram(uint64_t start_pa, uint32_t size, uint64_t host_va, int flags)
67 {
68 struct hax_set_ram_info info;
69 int ret;
70
71 info.pa_start = start_pa;
72 info.size = size;
73 info.va = host_va;
74 info.flags = (uint8_t) flags;
75
76 ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_SET_RAM, &info);
77 if (ret < 0) {
78 return -errno;
79 }
80 return 0;
81 }
82
83 int hax_capability(struct hax_state *hax, struct hax_capabilityinfo *cap)
84 {
85 int ret;
86
87 ret = ioctl(hax->fd, HAX_IOCTL_CAPABILITY, cap);
88 if (ret == -1) {
89 fprintf(stderr, "Failed to get HAX capability\n");
90 return -errno;
91 }
92
93 return 0;
94 }
95
96 int hax_mod_version(struct hax_state *hax, struct hax_module_version *version)
97 {
98 int ret;
99
100 ret = ioctl(hax->fd, HAX_IOCTL_VERSION, version);
101 if (ret == -1) {
102 fprintf(stderr, "Failed to get HAX version\n");
103 return -errno;
104 }
105
106 return 0;
107 }
108
109 static char *hax_vm_devfs_string(int vm_id)
110 {
111 return g_strdup_printf("/dev/hax_vm/vm%02d", vm_id);
112 }
113
114 static char *hax_vcpu_devfs_string(int vm_id, int vcpu_id)
115 {
116 return g_strdup_printf("/dev/hax_vm%02d/vcpu%02d", vm_id, vcpu_id);
117 }
118
119 int hax_host_create_vm(struct hax_state *hax, int *vmid)
120 {
121 int ret;
122 int vm_id = 0;
123
124 if (hax_invalid_fd(hax->fd)) {
125 return -EINVAL;
126 }
127
128 if (hax->vm) {
129 return 0;
130 }
131
132 ret = ioctl(hax->fd, HAX_IOCTL_CREATE_VM, &vm_id);
133 *vmid = vm_id;
134 return ret;
135 }
136
137 hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id)
138 {
139 hax_fd fd;
140 char *vm_name = NULL;
141
142 vm_name = hax_vm_devfs_string(vm_id);
143 if (!vm_name) {
144 return -1;
145 }
146
147 fd = open(vm_name, O_RDWR);
148 g_free(vm_name);
149
150 fcntl(fd, F_SETFD, FD_CLOEXEC);
151
152 return fd;
153 }
154
155 int hax_notify_qemu_version(hax_fd vm_fd, struct hax_qemu_version *qversion)
156 {
157 int ret;
158
159 if (hax_invalid_fd(vm_fd)) {
160 return -EINVAL;
161 }
162
163 ret = ioctl(vm_fd, HAX_VM_IOCTL_NOTIFY_QEMU_VERSION, qversion);
164
165 if (ret < 0) {
166 fprintf(stderr, "Failed to notify qemu API version\n");
167 return ret;
168 }
169 return 0;
170 }
171
172 /* Simply assume the size should be bigger than the hax_tunnel,
173 * since the hax_tunnel can be extended later with compatibility considered
174 */
175 int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid)
176 {
177 int ret;
178
179 ret = ioctl(vm_fd, HAX_VM_IOCTL_VCPU_CREATE, &vcpuid);
180 if (ret < 0) {
181 fprintf(stderr, "Failed to create vcpu %x\n", vcpuid);
182 }
183
184 return ret;
185 }
186
187 hax_fd hax_host_open_vcpu(int vmid, int vcpuid)
188 {
189 char *devfs_path = NULL;
190 hax_fd fd;
191
192 devfs_path = hax_vcpu_devfs_string(vmid, vcpuid);
193 if (!devfs_path) {
194 fprintf(stderr, "Failed to get the devfs\n");
195 return -EINVAL;
196 }
197
198 fd = open(devfs_path, O_RDWR);
199 g_free(devfs_path);
200 if (fd < 0) {
201 fprintf(stderr, "Failed to open the vcpu devfs\n");
202 }
203 fcntl(fd, F_SETFD, FD_CLOEXEC);
204 return fd;
205 }
206
207 int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu)
208 {
209 int ret;
210 struct hax_tunnel_info info;
211
212 ret = ioctl(vcpu->fd, HAX_VCPU_IOCTL_SETUP_TUNNEL, &info);
213 if (ret) {
214 fprintf(stderr, "Failed to setup the hax tunnel\n");
215 return ret;
216 }
217
218 if (!valid_hax_tunnel_size(info.size)) {
219 fprintf(stderr, "Invalid hax tunnel size %x\n", info.size);
220 ret = -EINVAL;
221 return ret;
222 }
223
224 vcpu->tunnel = (struct hax_tunnel *) (intptr_t) (info.va);
225 vcpu->iobuf = (unsigned char *) (intptr_t) (info.io_va);
226 return 0;
227 }
228
229 int hax_vcpu_run(struct hax_vcpu_state *vcpu)
230 {
231 return ioctl(vcpu->fd, HAX_VCPU_IOCTL_RUN, NULL);
232 }
233
234 int hax_sync_fpu(CPUArchState *env, struct fx_layout *fl, int set)
235 {
236 int ret, fd;
237
238 fd = hax_vcpu_get_fd(env);
239 if (fd <= 0) {
240 return -1;
241 }
242
243 if (set) {
244 ret = ioctl(fd, HAX_VCPU_IOCTL_SET_FPU, fl);
245 } else {
246 ret = ioctl(fd, HAX_VCPU_IOCTL_GET_FPU, fl);
247 }
248 return ret;
249 }
250
251 int hax_sync_msr(CPUArchState *env, struct hax_msr_data *msrs, int set)
252 {
253 int ret, fd;
254
255 fd = hax_vcpu_get_fd(env);
256 if (fd <= 0) {
257 return -1;
258 }
259 if (set) {
260 ret = ioctl(fd, HAX_VCPU_IOCTL_SET_MSRS, msrs);
261 } else {
262 ret = ioctl(fd, HAX_VCPU_IOCTL_GET_MSRS, msrs);
263 }
264 return ret;
265 }
266
267 int hax_sync_vcpu_state(CPUArchState *env, struct vcpu_state_t *state, int set)
268 {
269 int ret, fd;
270
271 fd = hax_vcpu_get_fd(env);
272 if (fd <= 0) {
273 return -1;
274 }
275
276 if (set) {
277 ret = ioctl(fd, HAX_VCPU_SET_REGS, state);
278 } else {
279 ret = ioctl(fd, HAX_VCPU_GET_REGS, state);
280 }
281 return ret;
282 }
283
284 int hax_inject_interrupt(CPUArchState *env, int vector)
285 {
286 int fd;
287
288 fd = hax_vcpu_get_fd(env);
289 if (fd <= 0) {
290 return -1;
291 }
292
293 return ioctl(fd, HAX_VCPU_IOCTL_INTERRUPT, &vector);
294 }