]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/include/asm/xen/hypercall.h
Merge branches 'for-4.11/upstream-fixes', 'for-4.12/accutouch', 'for-4.12/cp2112...
[mirror_ubuntu-artful-kernel.git] / arch / x86 / include / asm / xen / hypercall.h
1 /******************************************************************************
2 * hypercall.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33 #ifndef _ASM_X86_XEN_HYPERCALL_H
34 #define _ASM_X86_XEN_HYPERCALL_H
35
36 #include <linux/kernel.h>
37 #include <linux/spinlock.h>
38 #include <linux/errno.h>
39 #include <linux/string.h>
40 #include <linux/types.h>
41
42 #include <trace/events/xen.h>
43
44 #include <asm/page.h>
45 #include <asm/pgtable.h>
46
47 #include <xen/interface/xen.h>
48 #include <xen/interface/sched.h>
49 #include <xen/interface/physdev.h>
50 #include <xen/interface/platform.h>
51 #include <xen/interface/xen-mca.h>
52
53 /*
54 * The hypercall asms have to meet several constraints:
55 * - Work on 32- and 64-bit.
56 * The two architectures put their arguments in different sets of
57 * registers.
58 *
59 * - Work around asm syntax quirks
60 * It isn't possible to specify one of the rNN registers in a
61 * constraint, so we use explicit register variables to get the
62 * args into the right place.
63 *
64 * - Mark all registers as potentially clobbered
65 * Even unused parameters can be clobbered by the hypervisor, so we
66 * need to make sure gcc knows it.
67 *
68 * - Avoid compiler bugs.
69 * This is the tricky part. Because x86_32 has such a constrained
70 * register set, gcc versions below 4.3 have trouble generating
71 * code when all the arg registers and memory are trashed by the
72 * asm. There are syntactically simpler ways of achieving the
73 * semantics below, but they cause the compiler to crash.
74 *
75 * The only combination I found which works is:
76 * - assign the __argX variables first
77 * - list all actually used parameters as "+r" (__argX)
78 * - clobber the rest
79 *
80 * The result certainly isn't pretty, and it really shows up cpp's
81 * weakness as as macro language. Sorry. (But let's just give thanks
82 * there aren't more than 5 arguments...)
83 */
84
85 extern struct { char _entry[32]; } hypercall_page[];
86
87 #define __HYPERCALL "call hypercall_page+%c[offset]"
88 #define __HYPERCALL_ENTRY(x) \
89 [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
90
91 #ifdef CONFIG_X86_32
92 #define __HYPERCALL_RETREG "eax"
93 #define __HYPERCALL_ARG1REG "ebx"
94 #define __HYPERCALL_ARG2REG "ecx"
95 #define __HYPERCALL_ARG3REG "edx"
96 #define __HYPERCALL_ARG4REG "esi"
97 #define __HYPERCALL_ARG5REG "edi"
98 #else
99 #define __HYPERCALL_RETREG "rax"
100 #define __HYPERCALL_ARG1REG "rdi"
101 #define __HYPERCALL_ARG2REG "rsi"
102 #define __HYPERCALL_ARG3REG "rdx"
103 #define __HYPERCALL_ARG4REG "r10"
104 #define __HYPERCALL_ARG5REG "r8"
105 #endif
106
107 #define __HYPERCALL_DECLS \
108 register unsigned long __res asm(__HYPERCALL_RETREG); \
109 register unsigned long __arg1 asm(__HYPERCALL_ARG1REG) = __arg1; \
110 register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \
111 register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \
112 register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \
113 register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5; \
114 register void *__sp asm(_ASM_SP);
115
116 #define __HYPERCALL_0PARAM "=r" (__res), "+r" (__sp)
117 #define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1)
118 #define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2)
119 #define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3)
120 #define __HYPERCALL_4PARAM __HYPERCALL_3PARAM, "+r" (__arg4)
121 #define __HYPERCALL_5PARAM __HYPERCALL_4PARAM, "+r" (__arg5)
122
123 #define __HYPERCALL_0ARG()
124 #define __HYPERCALL_1ARG(a1) \
125 __HYPERCALL_0ARG() __arg1 = (unsigned long)(a1);
126 #define __HYPERCALL_2ARG(a1,a2) \
127 __HYPERCALL_1ARG(a1) __arg2 = (unsigned long)(a2);
128 #define __HYPERCALL_3ARG(a1,a2,a3) \
129 __HYPERCALL_2ARG(a1,a2) __arg3 = (unsigned long)(a3);
130 #define __HYPERCALL_4ARG(a1,a2,a3,a4) \
131 __HYPERCALL_3ARG(a1,a2,a3) __arg4 = (unsigned long)(a4);
132 #define __HYPERCALL_5ARG(a1,a2,a3,a4,a5) \
133 __HYPERCALL_4ARG(a1,a2,a3,a4) __arg5 = (unsigned long)(a5);
134
135 #define __HYPERCALL_CLOBBER5 "memory"
136 #define __HYPERCALL_CLOBBER4 __HYPERCALL_CLOBBER5, __HYPERCALL_ARG5REG
137 #define __HYPERCALL_CLOBBER3 __HYPERCALL_CLOBBER4, __HYPERCALL_ARG4REG
138 #define __HYPERCALL_CLOBBER2 __HYPERCALL_CLOBBER3, __HYPERCALL_ARG3REG
139 #define __HYPERCALL_CLOBBER1 __HYPERCALL_CLOBBER2, __HYPERCALL_ARG2REG
140 #define __HYPERCALL_CLOBBER0 __HYPERCALL_CLOBBER1, __HYPERCALL_ARG1REG
141
142 #define _hypercall0(type, name) \
143 ({ \
144 __HYPERCALL_DECLS; \
145 __HYPERCALL_0ARG(); \
146 asm volatile (__HYPERCALL \
147 : __HYPERCALL_0PARAM \
148 : __HYPERCALL_ENTRY(name) \
149 : __HYPERCALL_CLOBBER0); \
150 (type)__res; \
151 })
152
153 #define _hypercall1(type, name, a1) \
154 ({ \
155 __HYPERCALL_DECLS; \
156 __HYPERCALL_1ARG(a1); \
157 asm volatile (__HYPERCALL \
158 : __HYPERCALL_1PARAM \
159 : __HYPERCALL_ENTRY(name) \
160 : __HYPERCALL_CLOBBER1); \
161 (type)__res; \
162 })
163
164 #define _hypercall2(type, name, a1, a2) \
165 ({ \
166 __HYPERCALL_DECLS; \
167 __HYPERCALL_2ARG(a1, a2); \
168 asm volatile (__HYPERCALL \
169 : __HYPERCALL_2PARAM \
170 : __HYPERCALL_ENTRY(name) \
171 : __HYPERCALL_CLOBBER2); \
172 (type)__res; \
173 })
174
175 #define _hypercall3(type, name, a1, a2, a3) \
176 ({ \
177 __HYPERCALL_DECLS; \
178 __HYPERCALL_3ARG(a1, a2, a3); \
179 asm volatile (__HYPERCALL \
180 : __HYPERCALL_3PARAM \
181 : __HYPERCALL_ENTRY(name) \
182 : __HYPERCALL_CLOBBER3); \
183 (type)__res; \
184 })
185
186 #define _hypercall4(type, name, a1, a2, a3, a4) \
187 ({ \
188 __HYPERCALL_DECLS; \
189 __HYPERCALL_4ARG(a1, a2, a3, a4); \
190 asm volatile (__HYPERCALL \
191 : __HYPERCALL_4PARAM \
192 : __HYPERCALL_ENTRY(name) \
193 : __HYPERCALL_CLOBBER4); \
194 (type)__res; \
195 })
196
197 #define _hypercall5(type, name, a1, a2, a3, a4, a5) \
198 ({ \
199 __HYPERCALL_DECLS; \
200 __HYPERCALL_5ARG(a1, a2, a3, a4, a5); \
201 asm volatile (__HYPERCALL \
202 : __HYPERCALL_5PARAM \
203 : __HYPERCALL_ENTRY(name) \
204 : __HYPERCALL_CLOBBER5); \
205 (type)__res; \
206 })
207
208 static inline long
209 privcmd_call(unsigned call,
210 unsigned long a1, unsigned long a2,
211 unsigned long a3, unsigned long a4,
212 unsigned long a5)
213 {
214 __HYPERCALL_DECLS;
215 __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
216
217 asm volatile("call *%[call]"
218 : __HYPERCALL_5PARAM
219 : [call] "a" (&hypercall_page[call])
220 : __HYPERCALL_CLOBBER5);
221
222 return (long)__res;
223 }
224
225 static inline int
226 HYPERVISOR_set_trap_table(struct trap_info *table)
227 {
228 return _hypercall1(int, set_trap_table, table);
229 }
230
231 static inline int
232 HYPERVISOR_mmu_update(struct mmu_update *req, int count,
233 int *success_count, domid_t domid)
234 {
235 return _hypercall4(int, mmu_update, req, count, success_count, domid);
236 }
237
238 static inline int
239 HYPERVISOR_mmuext_op(struct mmuext_op *op, int count,
240 int *success_count, domid_t domid)
241 {
242 return _hypercall4(int, mmuext_op, op, count, success_count, domid);
243 }
244
245 static inline int
246 HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
247 {
248 return _hypercall2(int, set_gdt, frame_list, entries);
249 }
250
251 static inline int
252 HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
253 {
254 return _hypercall2(int, stack_switch, ss, esp);
255 }
256
257 #ifdef CONFIG_X86_32
258 static inline int
259 HYPERVISOR_set_callbacks(unsigned long event_selector,
260 unsigned long event_address,
261 unsigned long failsafe_selector,
262 unsigned long failsafe_address)
263 {
264 return _hypercall4(int, set_callbacks,
265 event_selector, event_address,
266 failsafe_selector, failsafe_address);
267 }
268 #else /* CONFIG_X86_64 */
269 static inline int
270 HYPERVISOR_set_callbacks(unsigned long event_address,
271 unsigned long failsafe_address,
272 unsigned long syscall_address)
273 {
274 return _hypercall3(int, set_callbacks,
275 event_address, failsafe_address,
276 syscall_address);
277 }
278 #endif /* CONFIG_X86_{32,64} */
279
280 static inline int
281 HYPERVISOR_callback_op(int cmd, void *arg)
282 {
283 return _hypercall2(int, callback_op, cmd, arg);
284 }
285
286 static inline int
287 HYPERVISOR_fpu_taskswitch(int set)
288 {
289 return _hypercall1(int, fpu_taskswitch, set);
290 }
291
292 static inline int
293 HYPERVISOR_sched_op(int cmd, void *arg)
294 {
295 return _hypercall2(int, sched_op, cmd, arg);
296 }
297
298 static inline long
299 HYPERVISOR_set_timer_op(u64 timeout)
300 {
301 unsigned long timeout_hi = (unsigned long)(timeout>>32);
302 unsigned long timeout_lo = (unsigned long)timeout;
303 return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
304 }
305
306 static inline int
307 HYPERVISOR_mca(struct xen_mc *mc_op)
308 {
309 mc_op->interface_version = XEN_MCA_INTERFACE_VERSION;
310 return _hypercall1(int, mca, mc_op);
311 }
312
313 static inline int
314 HYPERVISOR_platform_op(struct xen_platform_op *op)
315 {
316 op->interface_version = XENPF_INTERFACE_VERSION;
317 return _hypercall1(int, platform_op, op);
318 }
319
320 static inline int
321 HYPERVISOR_set_debugreg(int reg, unsigned long value)
322 {
323 return _hypercall2(int, set_debugreg, reg, value);
324 }
325
326 static inline unsigned long
327 HYPERVISOR_get_debugreg(int reg)
328 {
329 return _hypercall1(unsigned long, get_debugreg, reg);
330 }
331
332 static inline int
333 HYPERVISOR_update_descriptor(u64 ma, u64 desc)
334 {
335 if (sizeof(u64) == sizeof(long))
336 return _hypercall2(int, update_descriptor, ma, desc);
337 return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
338 }
339
340 static inline long
341 HYPERVISOR_memory_op(unsigned int cmd, void *arg)
342 {
343 return _hypercall2(long, memory_op, cmd, arg);
344 }
345
346 static inline int
347 HYPERVISOR_multicall(void *call_list, uint32_t nr_calls)
348 {
349 return _hypercall2(int, multicall, call_list, nr_calls);
350 }
351
352 static inline int
353 HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
354 unsigned long flags)
355 {
356 if (sizeof(new_val) == sizeof(long))
357 return _hypercall3(int, update_va_mapping, va,
358 new_val.pte, flags);
359 else
360 return _hypercall4(int, update_va_mapping, va,
361 new_val.pte, new_val.pte >> 32, flags);
362 }
363 extern int __must_check xen_event_channel_op_compat(int, void *);
364
365 static inline int
366 HYPERVISOR_event_channel_op(int cmd, void *arg)
367 {
368 int rc = _hypercall2(int, event_channel_op, cmd, arg);
369 if (unlikely(rc == -ENOSYS))
370 rc = xen_event_channel_op_compat(cmd, arg);
371 return rc;
372 }
373
374 static inline int
375 HYPERVISOR_xen_version(int cmd, void *arg)
376 {
377 return _hypercall2(int, xen_version, cmd, arg);
378 }
379
380 static inline int
381 HYPERVISOR_console_io(int cmd, int count, char *str)
382 {
383 return _hypercall3(int, console_io, cmd, count, str);
384 }
385
386 extern int __must_check xen_physdev_op_compat(int, void *);
387
388 static inline int
389 HYPERVISOR_physdev_op(int cmd, void *arg)
390 {
391 int rc = _hypercall2(int, physdev_op, cmd, arg);
392 if (unlikely(rc == -ENOSYS))
393 rc = xen_physdev_op_compat(cmd, arg);
394 return rc;
395 }
396
397 static inline int
398 HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
399 {
400 return _hypercall3(int, grant_table_op, cmd, uop, count);
401 }
402
403 static inline int
404 HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val,
405 unsigned long flags, domid_t domid)
406 {
407 if (sizeof(new_val) == sizeof(long))
408 return _hypercall4(int, update_va_mapping_otherdomain, va,
409 new_val.pte, flags, domid);
410 else
411 return _hypercall5(int, update_va_mapping_otherdomain, va,
412 new_val.pte, new_val.pte >> 32,
413 flags, domid);
414 }
415
416 static inline int
417 HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type)
418 {
419 return _hypercall2(int, vm_assist, cmd, type);
420 }
421
422 static inline int
423 HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
424 {
425 return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
426 }
427
428 #ifdef CONFIG_X86_64
429 static inline int
430 HYPERVISOR_set_segment_base(int reg, unsigned long value)
431 {
432 return _hypercall2(int, set_segment_base, reg, value);
433 }
434 #endif
435
436 static inline int
437 HYPERVISOR_suspend(unsigned long start_info_mfn)
438 {
439 struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
440
441 /*
442 * For a PV guest the tools require that the start_info mfn be
443 * present in rdx/edx when the hypercall is made. Per the
444 * hypercall calling convention this is the third hypercall
445 * argument, which is start_info_mfn here.
446 */
447 return _hypercall3(int, sched_op, SCHEDOP_shutdown, &r, start_info_mfn);
448 }
449
450 static inline int
451 HYPERVISOR_nmi_op(unsigned long op, unsigned long arg)
452 {
453 return _hypercall2(int, nmi_op, op, arg);
454 }
455
456 static inline unsigned long __must_check
457 HYPERVISOR_hvm_op(int op, void *arg)
458 {
459 return _hypercall2(unsigned long, hvm_op, op, arg);
460 }
461
462 static inline int
463 HYPERVISOR_tmem_op(
464 struct tmem_op *op)
465 {
466 return _hypercall1(int, tmem_op, op);
467 }
468
469 static inline int
470 HYPERVISOR_xenpmu_op(unsigned int op, void *arg)
471 {
472 return _hypercall2(int, xenpmu_op, op, arg);
473 }
474
475 static inline int
476 HYPERVISOR_dm_op(
477 domid_t dom, unsigned int nr_bufs, void *bufs)
478 {
479 return _hypercall3(int, dm_op, dom, nr_bufs, bufs);
480 }
481
482 static inline void
483 MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
484 {
485 mcl->op = __HYPERVISOR_fpu_taskswitch;
486 mcl->args[0] = set;
487
488 trace_xen_mc_entry(mcl, 1);
489 }
490
491 static inline void
492 MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
493 pte_t new_val, unsigned long flags)
494 {
495 mcl->op = __HYPERVISOR_update_va_mapping;
496 mcl->args[0] = va;
497 if (sizeof(new_val) == sizeof(long)) {
498 mcl->args[1] = new_val.pte;
499 mcl->args[2] = flags;
500 } else {
501 mcl->args[1] = new_val.pte;
502 mcl->args[2] = new_val.pte >> 32;
503 mcl->args[3] = flags;
504 }
505
506 trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 3 : 4);
507 }
508
509 static inline void
510 MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
511 void *uop, unsigned int count)
512 {
513 mcl->op = __HYPERVISOR_grant_table_op;
514 mcl->args[0] = cmd;
515 mcl->args[1] = (unsigned long)uop;
516 mcl->args[2] = count;
517
518 trace_xen_mc_entry(mcl, 3);
519 }
520
521 static inline void
522 MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long va,
523 pte_t new_val, unsigned long flags,
524 domid_t domid)
525 {
526 mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
527 mcl->args[0] = va;
528 if (sizeof(new_val) == sizeof(long)) {
529 mcl->args[1] = new_val.pte;
530 mcl->args[2] = flags;
531 mcl->args[3] = domid;
532 } else {
533 mcl->args[1] = new_val.pte;
534 mcl->args[2] = new_val.pte >> 32;
535 mcl->args[3] = flags;
536 mcl->args[4] = domid;
537 }
538
539 trace_xen_mc_entry(mcl, sizeof(new_val) == sizeof(long) ? 4 : 5);
540 }
541
542 static inline void
543 MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
544 struct desc_struct desc)
545 {
546 mcl->op = __HYPERVISOR_update_descriptor;
547 if (sizeof(maddr) == sizeof(long)) {
548 mcl->args[0] = maddr;
549 mcl->args[1] = *(unsigned long *)&desc;
550 } else {
551 mcl->args[0] = maddr;
552 mcl->args[1] = maddr >> 32;
553 mcl->args[2] = desc.a;
554 mcl->args[3] = desc.b;
555 }
556
557 trace_xen_mc_entry(mcl, sizeof(maddr) == sizeof(long) ? 2 : 4);
558 }
559
560 static inline void
561 MULTI_memory_op(struct multicall_entry *mcl, unsigned int cmd, void *arg)
562 {
563 mcl->op = __HYPERVISOR_memory_op;
564 mcl->args[0] = cmd;
565 mcl->args[1] = (unsigned long)arg;
566
567 trace_xen_mc_entry(mcl, 2);
568 }
569
570 static inline void
571 MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
572 int count, int *success_count, domid_t domid)
573 {
574 mcl->op = __HYPERVISOR_mmu_update;
575 mcl->args[0] = (unsigned long)req;
576 mcl->args[1] = count;
577 mcl->args[2] = (unsigned long)success_count;
578 mcl->args[3] = domid;
579
580 trace_xen_mc_entry(mcl, 4);
581 }
582
583 static inline void
584 MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
585 int *success_count, domid_t domid)
586 {
587 mcl->op = __HYPERVISOR_mmuext_op;
588 mcl->args[0] = (unsigned long)op;
589 mcl->args[1] = count;
590 mcl->args[2] = (unsigned long)success_count;
591 mcl->args[3] = domid;
592
593 trace_xen_mc_entry(mcl, 4);
594 }
595
596 static inline void
597 MULTI_set_gdt(struct multicall_entry *mcl, unsigned long *frames, int entries)
598 {
599 mcl->op = __HYPERVISOR_set_gdt;
600 mcl->args[0] = (unsigned long)frames;
601 mcl->args[1] = entries;
602
603 trace_xen_mc_entry(mcl, 2);
604 }
605
606 static inline void
607 MULTI_stack_switch(struct multicall_entry *mcl,
608 unsigned long ss, unsigned long esp)
609 {
610 mcl->op = __HYPERVISOR_stack_switch;
611 mcl->args[0] = ss;
612 mcl->args[1] = esp;
613
614 trace_xen_mc_entry(mcl, 2);
615 }
616
617 #endif /* _ASM_X86_XEN_HYPERCALL_H */