]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - arch/x86/include/asm/xen/hypercall.h
Merge branches 'for-5.1/upstream-fixes', 'for-5.2/core', 'for-5.2/ish', 'for-5.2...
[mirror_ubuntu-kernels.git] / arch / x86 / include / asm / xen / hypercall.h
1 /******************************************************************************
2 * hypercall.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33 #ifndef _ASM_X86_XEN_HYPERCALL_H
34 #define _ASM_X86_XEN_HYPERCALL_H
35
36 #include <linux/kernel.h>
37 #include <linux/spinlock.h>
38 #include <linux/errno.h>
39 #include <linux/string.h>
40 #include <linux/types.h>
41
42 #include <trace/events/xen.h>
43
44 #include <asm/page.h>
45 #include <asm/pgtable.h>
46 #include <asm/smap.h>
47 #include <asm/nospec-branch.h>
48
49 #include <xen/interface/xen.h>
50 #include <xen/interface/sched.h>
51 #include <xen/interface/physdev.h>
52 #include <xen/interface/platform.h>
53 #include <xen/interface/xen-mca.h>
54
55 struct xen_dm_op_buf;
56
57 /*
58 * The hypercall asms have to meet several constraints:
59 * - Work on 32- and 64-bit.
60 * The two architectures put their arguments in different sets of
61 * registers.
62 *
63 * - Work around asm syntax quirks
64 * It isn't possible to specify one of the rNN registers in a
65 * constraint, so we use explicit register variables to get the
66 * args into the right place.
67 *
68 * - Mark all registers as potentially clobbered
69 * Even unused parameters can be clobbered by the hypervisor, so we
70 * need to make sure gcc knows it.
71 *
72 * - Avoid compiler bugs.
73 * This is the tricky part. Because x86_32 has such a constrained
74 * register set, gcc versions below 4.3 have trouble generating
75 * code when all the arg registers and memory are trashed by the
76 * asm. There are syntactically simpler ways of achieving the
77 * semantics below, but they cause the compiler to crash.
78 *
79 * The only combination I found which works is:
80 * - assign the __argX variables first
81 * - list all actually used parameters as "+r" (__argX)
82 * - clobber the rest
83 *
84 * The result certainly isn't pretty, and it really shows up cpp's
85 * weakness as as macro language. Sorry. (But let's just give thanks
86 * there aren't more than 5 arguments...)
87 */
88
89 extern struct { char _entry[32]; } hypercall_page[];
90
91 #define __HYPERCALL "call hypercall_page+%c[offset]"
92 #define __HYPERCALL_ENTRY(x) \
93 [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
94
95 #ifdef CONFIG_X86_32
96 #define __HYPERCALL_RETREG "eax"
97 #define __HYPERCALL_ARG1REG "ebx"
98 #define __HYPERCALL_ARG2REG "ecx"
99 #define __HYPERCALL_ARG3REG "edx"
100 #define __HYPERCALL_ARG4REG "esi"
101 #define __HYPERCALL_ARG5REG "edi"
102 #else
103 #define __HYPERCALL_RETREG "rax"
104 #define __HYPERCALL_ARG1REG "rdi"
105 #define __HYPERCALL_ARG2REG "rsi"
106 #define __HYPERCALL_ARG3REG "rdx"
107 #define __HYPERCALL_ARG4REG "r10"
108 #define __HYPERCALL_ARG5REG "r8"
109 #endif
110
111 #define __HYPERCALL_DECLS \
112 register unsigned long __res asm(__HYPERCALL_RETREG); \
113 register unsigned long __arg1 asm(__HYPERCALL_ARG1REG) = __arg1; \
114 register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \
115 register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \
116 register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \
117 register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5;
118
119 #define __HYPERCALL_0PARAM "=r" (__res), ASM_CALL_CONSTRAINT
120 #define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1)
121 #define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2)
122 #define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3)
123 #define __HYPERCALL_4PARAM __HYPERCALL_3PARAM, "+r" (__arg4)
124 #define __HYPERCALL_5PARAM __HYPERCALL_4PARAM, "+r" (__arg5)
125
126 #define __HYPERCALL_0ARG()
127 #define __HYPERCALL_1ARG(a1) \
128 __HYPERCALL_0ARG() __arg1 = (unsigned long)(a1);
129 #define __HYPERCALL_2ARG(a1,a2) \
130 __HYPERCALL_1ARG(a1) __arg2 = (unsigned long)(a2);
131 #define __HYPERCALL_3ARG(a1,a2,a3) \
132 __HYPERCALL_2ARG(a1,a2) __arg3 = (unsigned long)(a3);
133 #define __HYPERCALL_4ARG(a1,a2,a3,a4) \
134 __HYPERCALL_3ARG(a1,a2,a3) __arg4 = (unsigned long)(a4);
135 #define __HYPERCALL_5ARG(a1,a2,a3,a4,a5) \
136 __HYPERCALL_4ARG(a1,a2,a3,a4) __arg5 = (unsigned long)(a5);
137
138 #define __HYPERCALL_CLOBBER5 "memory"
139 #define __HYPERCALL_CLOBBER4 __HYPERCALL_CLOBBER5, __HYPERCALL_ARG5REG
140 #define __HYPERCALL_CLOBBER3 __HYPERCALL_CLOBBER4, __HYPERCALL_ARG4REG
141 #define __HYPERCALL_CLOBBER2 __HYPERCALL_CLOBBER3, __HYPERCALL_ARG3REG
142 #define __HYPERCALL_CLOBBER1 __HYPERCALL_CLOBBER2, __HYPERCALL_ARG2REG
143 #define __HYPERCALL_CLOBBER0 __HYPERCALL_CLOBBER1, __HYPERCALL_ARG1REG
144
145 #define _hypercall0(type, name) \
146 ({ \
147 __HYPERCALL_DECLS; \
148 __HYPERCALL_0ARG(); \
149 asm volatile (__HYPERCALL \
150 : __HYPERCALL_0PARAM \
151 : __HYPERCALL_ENTRY(name) \
152 : __HYPERCALL_CLOBBER0); \
153 (type)__res; \
154 })
155
156 #define _hypercall1(type, name, a1) \
157 ({ \
158 __HYPERCALL_DECLS; \
159 __HYPERCALL_1ARG(a1); \
160 asm volatile (__HYPERCALL \
161 : __HYPERCALL_1PARAM \
162 : __HYPERCALL_ENTRY(name) \
163 : __HYPERCALL_CLOBBER1); \
164 (type)__res; \
165 })
166
167 #define _hypercall2(type, name, a1, a2) \
168 ({ \
169 __HYPERCALL_DECLS; \
170 __HYPERCALL_2ARG(a1, a2); \
171 asm volatile (__HYPERCALL \
172 : __HYPERCALL_2PARAM \
173 : __HYPERCALL_ENTRY(name) \
174 : __HYPERCALL_CLOBBER2); \
175 (type)__res; \
176 })
177
178 #define _hypercall3(type, name, a1, a2, a3) \
179 ({ \
180 __HYPERCALL_DECLS; \
181 __HYPERCALL_3ARG(a1, a2, a3); \
182 asm volatile (__HYPERCALL \
183 : __HYPERCALL_3PARAM \
184 : __HYPERCALL_ENTRY(name) \
185 : __HYPERCALL_CLOBBER3); \
186 (type)__res; \
187 })
188
189 #define _hypercall4(type, name, a1, a2, a3, a4) \
190 ({ \
191 __HYPERCALL_DECLS; \
192 __HYPERCALL_4ARG(a1, a2, a3, a4); \
193 asm volatile (__HYPERCALL \
194 : __HYPERCALL_4PARAM \
195 : __HYPERCALL_ENTRY(name) \
196 : __HYPERCALL_CLOBBER4); \
197 (type)__res; \
198 })
199
200 static inline long
201 xen_single_call(unsigned int call,
202 unsigned long a1, unsigned long a2,
203 unsigned long a3, unsigned long a4,
204 unsigned long a5)
205 {
206 __HYPERCALL_DECLS;
207 __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
208
209 asm volatile(CALL_NOSPEC
210 : __HYPERCALL_5PARAM
211 : [thunk_target] "a" (&hypercall_page[call])
212 : __HYPERCALL_CLOBBER5);
213
214 return (long)__res;
215 }
216
217 static inline long
218 privcmd_call(unsigned int call,
219 unsigned long a1, unsigned long a2,
220 unsigned long a3, unsigned long a4,
221 unsigned long a5)
222 {
223 long res;
224
225 stac();
226 res = xen_single_call(call, a1, a2, a3, a4, a5);
227 clac();
228
229 return res;
230 }
231
232 static inline int
233 HYPERVISOR_set_trap_table(struct trap_info *table)
234 {
235 return _hypercall1(int, set_trap_table, table);
236 }
237
238 static inline int
239 HYPERVISOR_mmu_update(struct mmu_update *req, int count,
240 int *success_count, domid_t domid)
241 {
242 return _hypercall4(int, mmu_update, req, count, success_count, domid);
243 }
244
245 static inline int
246 HYPERVISOR_mmuext_op(struct mmuext_op *op, int count,
247 int *success_count, domid_t domid)
248 {
249 return _hypercall4(int, mmuext_op, op, count, success_count, domid);
250 }
251
252 static inline int
253 HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
254 {
255 return _hypercall2(int, set_gdt, frame_list, entries);
256 }
257
258 static inline int
259 HYPERVISOR_callback_op(int cmd, void *arg)
260 {
261 return _hypercall2(int, callback_op, cmd, arg);
262 }
263
264 static inline int
265 HYPERVISOR_sched_op(int cmd, void *arg)
266 {
267 return _hypercall2(int, sched_op, cmd, arg);
268 }
269
270 static inline long
271 HYPERVISOR_set_timer_op(u64 timeout)
272 {
273 unsigned long timeout_hi = (unsigned long)(timeout>>32);
274 unsigned long timeout_lo = (unsigned long)timeout;
275 return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
276 }
277
278 static inline int
279 HYPERVISOR_mca(struct xen_mc *mc_op)
280 {
281 mc_op->interface_version = XEN_MCA_INTERFACE_VERSION;
282 return _hypercall1(int, mca, mc_op);
283 }
284
285 static inline int
286 HYPERVISOR_platform_op(struct xen_platform_op *op)
287 {
288 op->interface_version = XENPF_INTERFACE_VERSION;
289 return _hypercall1(int, platform_op, op);
290 }
291
292 static inline int
293 HYPERVISOR_set_debugreg(int reg, unsigned long value)
294 {
295 return _hypercall2(int, set_debugreg, reg, value);
296 }
297
298 static inline unsigned long
299 HYPERVISOR_get_debugreg(int reg)
300 {
301 return _hypercall1(unsigned long, get_debugreg, reg);
302 }
303
304 static inline int
305 HYPERVISOR_update_descriptor(u64 ma, u64 desc)
306 {
307 if (sizeof(u64) == sizeof(long))
308 return _hypercall2(int, update_descriptor, ma, desc);
309 return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
310 }
311
312 static inline long
313 HYPERVISOR_memory_op(unsigned int cmd, void *arg)
314 {
315 return _hypercall2(long, memory_op, cmd, arg);
316 }
317
318 static inline int
319 HYPERVISOR_multicall(void *call_list, uint32_t nr_calls)
320 {
321 return _hypercall2(int, multicall, call_list, nr_calls);
322 }
323
324 static inline int
325 HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
326 unsigned long flags)
327 {
328 if (sizeof(new_val) == sizeof(long))
329 return _hypercall3(int, update_va_mapping, va,
330 new_val.pte, flags);
331 else
332 return _hypercall4(int, update_va_mapping, va,
333 new_val.pte, new_val.pte >> 32, flags);
334 }
335
336 static inline int
337 HYPERVISOR_event_channel_op(int cmd, void *arg)
338 {
339 return _hypercall2(int, event_channel_op, cmd, arg);
340 }
341
342 static inline int
343 HYPERVISOR_xen_version(int cmd, void *arg)
344 {
345 return _hypercall2(int, xen_version, cmd, arg);
346 }
347
348 static inline int
349 HYPERVISOR_console_io(int cmd, int count, char *str)
350 {
351 return _hypercall3(int, console_io, cmd, count, str);
352 }
353
354 static inline int
355 HYPERVISOR_physdev_op(int cmd, void *arg)
356 {
357 return _hypercall2(int, physdev_op, cmd, arg);
358 }
359
360 static inline int
361 HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
362 {
363 return _hypercall3(int, grant_table_op, cmd, uop, count);
364 }
365
366 static inline int
367 HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type)
368 {
369 return _hypercall2(int, vm_assist, cmd, type);
370 }
371
372 static inline int
373 HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
374 {
375 return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
376 }
377
378 #ifdef CONFIG_X86_64
379 static inline int
380 HYPERVISOR_set_segment_base(int reg, unsigned long value)
381 {
382 return _hypercall2(int, set_segment_base, reg, value);
383 }
384 #endif
385
386 static inline int
387 HYPERVISOR_suspend(unsigned long start_info_mfn)
388 {
389 struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
390
391 /*
392 * For a PV guest the tools require that the start_info mfn be
393 * present in rdx/edx when the hypercall is made. Per the
394 * hypercall calling convention this is the third hypercall
395 * argument, which is start_info_mfn here.
396 */
397 return _hypercall3(int, sched_op, SCHEDOP_shutdown, &r, start_info_mfn);
398 }
399
400 static inline unsigned long __must_check
401 HYPERVISOR_hvm_op(int op, void *arg)
402 {
403 return _hypercall2(unsigned long, hvm_op, op, arg);
404 }
405
406 static inline int
407 HYPERVISOR_tmem_op(
408 struct tmem_op *op)
409 {
410 return _hypercall1(int, tmem_op, op);
411 }
412
413 static inline int
414 HYPERVISOR_xenpmu_op(unsigned int op, void *arg)
415 {
416 return _hypercall2(int, xenpmu_op, op, arg);
417 }
418
419 static inline int
420 HYPERVISOR_dm_op(
421 domid_t dom, unsigned int nr_bufs, struct xen_dm_op_buf *bufs)
422 {
423 int ret;
424 stac();
425 ret = _hypercall3(int, dm_op, dom, nr_bufs, bufs);
426 clac();
427 return ret;
428 }
429
430 static inline void
431 MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
432 {
433 mcl->op = __HYPERVISOR_fpu_taskswitch;
434 mcl->args[0] = set;
435
436 trace_xen_mc_entry(mcl, 1);
437 }
438
439 static inline void
440 MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
441 pte_t new_val, unsigned long flags)
442 {
443 mcl->op = __HYPERVISOR_update_va_mapping;
444 mcl->args[0] = va;
445 if (sizeof(new_val) == sizeof(long)) {
446 mcl->args[1] = new_val.pte;
447 mcl->args[2] = flags;
448 } else {
449 mcl->args[1] = new_val.pte;
450 mcl->args[2] = new_val.pte >> 32;
451 mcl->args[3] = flags;
452 }
453
454 trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 3 : 4);
455 }
456
457 static inline void
458 MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
459 struct desc_struct desc)
460 {
461 mcl->op = __HYPERVISOR_update_descriptor;
462 if (sizeof(maddr) == sizeof(long)) {
463 mcl->args[0] = maddr;
464 mcl->args[1] = *(unsigned long *)&desc;
465 } else {
466 u32 *p = (u32 *)&desc;
467
468 mcl->args[0] = maddr;
469 mcl->args[1] = maddr >> 32;
470 mcl->args[2] = *p++;
471 mcl->args[3] = *p;
472 }
473
474 trace_xen_mc_entry(mcl, sizeof(maddr) == sizeof(long) ? 2 : 4);
475 }
476
477 static inline void
478 MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
479 int count, int *success_count, domid_t domid)
480 {
481 mcl->op = __HYPERVISOR_mmu_update;
482 mcl->args[0] = (unsigned long)req;
483 mcl->args[1] = count;
484 mcl->args[2] = (unsigned long)success_count;
485 mcl->args[3] = domid;
486
487 trace_xen_mc_entry(mcl, 4);
488 }
489
490 static inline void
491 MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
492 int *success_count, domid_t domid)
493 {
494 mcl->op = __HYPERVISOR_mmuext_op;
495 mcl->args[0] = (unsigned long)op;
496 mcl->args[1] = count;
497 mcl->args[2] = (unsigned long)success_count;
498 mcl->args[3] = domid;
499
500 trace_xen_mc_entry(mcl, 4);
501 }
502
503 static inline void
504 MULTI_stack_switch(struct multicall_entry *mcl,
505 unsigned long ss, unsigned long esp)
506 {
507 mcl->op = __HYPERVISOR_stack_switch;
508 mcl->args[0] = ss;
509 mcl->args[1] = esp;
510
511 trace_xen_mc_entry(mcl, 2);
512 }
513
514 #endif /* _ASM_X86_XEN_HYPERCALL_H */