1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright © 2019 Oracle and/or its affiliates. All rights reserved.
4 * Copyright © 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
13 #include <linux/kvm_host.h>
14 #include <linux/sched/stat.h>
16 #include <trace/events/kvm.h>
17 #include <xen/interface/xen.h>
18 #include <xen/interface/vcpu.h>
22 DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled
, HZ
);
24 static int kvm_xen_shared_info_init(struct kvm
*kvm
, gfn_t gfn
)
26 gpa_t gpa
= gfn_to_gpa(gfn
);
27 int wc_ofs
, sec_hi_ofs
;
29 int idx
= srcu_read_lock(&kvm
->srcu
);
31 if (kvm_is_error_hva(gfn_to_hva(kvm
, gfn
))) {
35 kvm
->arch
.xen
.shinfo_gfn
= gfn
;
37 /* Paranoia checks on the 32-bit struct layout */
38 BUILD_BUG_ON(offsetof(struct compat_shared_info
, wc
) != 0x900);
39 BUILD_BUG_ON(offsetof(struct compat_shared_info
, arch
.wc_sec_hi
) != 0x924);
40 BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info
, version
) != 0);
42 /* 32-bit location by default */
43 wc_ofs
= offsetof(struct compat_shared_info
, wc
);
44 sec_hi_ofs
= offsetof(struct compat_shared_info
, arch
.wc_sec_hi
);
47 /* Paranoia checks on the 64-bit struct layout */
48 BUILD_BUG_ON(offsetof(struct shared_info
, wc
) != 0xc00);
49 BUILD_BUG_ON(offsetof(struct shared_info
, wc_sec_hi
) != 0xc0c);
51 if (kvm
->arch
.xen
.long_mode
) {
52 wc_ofs
= offsetof(struct shared_info
, wc
);
53 sec_hi_ofs
= offsetof(struct shared_info
, wc_sec_hi
);
57 kvm_write_wall_clock(kvm
, gpa
+ wc_ofs
, sec_hi_ofs
- wc_ofs
);
58 kvm_make_all_cpus_request(kvm
, KVM_REQ_MASTERCLOCK_UPDATE
);
61 srcu_read_unlock(&kvm
->srcu
, idx
);
65 static void kvm_xen_update_runstate(struct kvm_vcpu
*v
, int state
)
67 struct kvm_vcpu_xen
*vx
= &v
->arch
.xen
;
68 u64 now
= get_kvmclock_ns(v
->kvm
);
69 u64 delta_ns
= now
- vx
->runstate_entry_time
;
70 u64 run_delay
= current
->sched_info
.run_delay
;
72 if (unlikely(!vx
->runstate_entry_time
))
73 vx
->current_runstate
= RUNSTATE_offline
;
76 * Time waiting for the scheduler isn't "stolen" if the
77 * vCPU wasn't running anyway.
79 if (vx
->current_runstate
== RUNSTATE_running
) {
80 u64 steal_ns
= run_delay
- vx
->last_steal
;
84 vx
->runstate_times
[RUNSTATE_runnable
] += steal_ns
;
86 vx
->last_steal
= run_delay
;
88 vx
->runstate_times
[vx
->current_runstate
] += delta_ns
;
89 vx
->current_runstate
= state
;
90 vx
->runstate_entry_time
= now
;
93 void kvm_xen_update_runstate_guest(struct kvm_vcpu
*v
, int state
)
95 struct kvm_vcpu_xen
*vx
= &v
->arch
.xen
;
96 uint64_t state_entry_time
;
99 kvm_xen_update_runstate(v
, state
);
101 if (!vx
->runstate_set
)
104 BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info
) != 0x2c);
106 offset
= offsetof(struct compat_vcpu_runstate_info
, state_entry_time
);
109 * The only difference is alignment of uint64_t in 32-bit.
110 * So the first field 'state' is accessed directly using
111 * offsetof() (where its offset happens to be zero), while the
112 * remaining fields which are all uint64_t, start at 'offset'
113 * which we tweak here by adding 4.
115 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info
, state_entry_time
) !=
116 offsetof(struct compat_vcpu_runstate_info
, state_entry_time
) + 4);
117 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info
, time
) !=
118 offsetof(struct compat_vcpu_runstate_info
, time
) + 4);
120 if (v
->kvm
->arch
.xen
.long_mode
)
121 offset
= offsetof(struct vcpu_runstate_info
, state_entry_time
);
124 * First write the updated state_entry_time at the appropriate
125 * location determined by 'offset'.
127 state_entry_time
= vx
->runstate_entry_time
;
128 state_entry_time
|= XEN_RUNSTATE_UPDATE
;
130 BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info
*)0)->state_entry_time
) !=
131 sizeof(state_entry_time
));
132 BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info
*)0)->state_entry_time
) !=
133 sizeof(state_entry_time
));
135 if (kvm_write_guest_offset_cached(v
->kvm
, &v
->arch
.xen
.runstate_cache
,
136 &state_entry_time
, offset
,
137 sizeof(state_entry_time
)))
142 * Next, write the new runstate. This is in the *same* place
143 * for 32-bit and 64-bit guests, asserted here for paranoia.
145 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info
, state
) !=
146 offsetof(struct compat_vcpu_runstate_info
, state
));
147 BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info
*)0)->state
) !=
148 sizeof(vx
->current_runstate
));
149 BUILD_BUG_ON(sizeof(((struct compat_vcpu_runstate_info
*)0)->state
) !=
150 sizeof(vx
->current_runstate
));
152 if (kvm_write_guest_offset_cached(v
->kvm
, &v
->arch
.xen
.runstate_cache
,
153 &vx
->current_runstate
,
154 offsetof(struct vcpu_runstate_info
, state
),
155 sizeof(vx
->current_runstate
)))
159 * Write the actual runstate times immediately after the
160 * runstate_entry_time.
162 BUILD_BUG_ON(offsetof(struct vcpu_runstate_info
, state_entry_time
) !=
163 offsetof(struct vcpu_runstate_info
, time
) - sizeof(u64
));
164 BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info
, state_entry_time
) !=
165 offsetof(struct compat_vcpu_runstate_info
, time
) - sizeof(u64
));
166 BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info
*)0)->time
) !=
167 sizeof(((struct compat_vcpu_runstate_info
*)0)->time
));
168 BUILD_BUG_ON(sizeof(((struct vcpu_runstate_info
*)0)->time
) !=
169 sizeof(vx
->runstate_times
));
171 if (kvm_write_guest_offset_cached(v
->kvm
, &v
->arch
.xen
.runstate_cache
,
172 &vx
->runstate_times
[0],
173 offset
+ sizeof(u64
),
174 sizeof(vx
->runstate_times
)))
180 * Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
181 * runstate_entry_time field.
184 state_entry_time
&= ~XEN_RUNSTATE_UPDATE
;
185 if (kvm_write_guest_offset_cached(v
->kvm
, &v
->arch
.xen
.runstate_cache
,
186 &state_entry_time
, offset
,
187 sizeof(state_entry_time
)))
191 int __kvm_xen_has_interrupt(struct kvm_vcpu
*v
)
196 * If the global upcall vector (HVMIRQ_callback_vector) is set and
197 * the vCPU's evtchn_upcall_pending flag is set, the IRQ is pending.
199 struct gfn_to_hva_cache
*ghc
= &v
->arch
.xen
.vcpu_info_cache
;
200 struct kvm_memslots
*slots
= kvm_memslots(v
->kvm
);
201 unsigned int offset
= offsetof(struct vcpu_info
, evtchn_upcall_pending
);
203 /* No need for compat handling here */
204 BUILD_BUG_ON(offsetof(struct vcpu_info
, evtchn_upcall_pending
) !=
205 offsetof(struct compat_vcpu_info
, evtchn_upcall_pending
));
206 BUILD_BUG_ON(sizeof(rc
) !=
207 sizeof(((struct vcpu_info
*)0)->evtchn_upcall_pending
));
208 BUILD_BUG_ON(sizeof(rc
) !=
209 sizeof(((struct compat_vcpu_info
*)0)->evtchn_upcall_pending
));
212 * For efficiency, this mirrors the checks for using the valid
213 * cache in kvm_read_guest_offset_cached(), but just uses
214 * __get_user() instead. And falls back to the slow path.
216 if (likely(slots
->generation
== ghc
->generation
&&
217 !kvm_is_error_hva(ghc
->hva
) && ghc
->memslot
)) {
219 __get_user(rc
, (u8 __user
*)ghc
->hva
+ offset
);
222 kvm_read_guest_offset_cached(v
->kvm
, ghc
, &rc
, offset
,
229 int kvm_xen_hvm_set_attr(struct kvm
*kvm
, struct kvm_xen_hvm_attr
*data
)
233 mutex_lock(&kvm
->lock
);
235 switch (data
->type
) {
236 case KVM_XEN_ATTR_TYPE_LONG_MODE
:
237 if (!IS_ENABLED(CONFIG_64BIT
) && data
->u
.long_mode
) {
240 kvm
->arch
.xen
.long_mode
= !!data
->u
.long_mode
;
245 case KVM_XEN_ATTR_TYPE_SHARED_INFO
:
246 if (data
->u
.shared_info
.gfn
== GPA_INVALID
) {
247 kvm
->arch
.xen
.shinfo_gfn
= GPA_INVALID
;
251 r
= kvm_xen_shared_info_init(kvm
, data
->u
.shared_info
.gfn
);
255 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR
:
256 if (data
->u
.vector
&& data
->u
.vector
< 0x10)
259 kvm
->arch
.xen
.upcall_vector
= data
->u
.vector
;
268 mutex_unlock(&kvm
->lock
);
272 int kvm_xen_hvm_get_attr(struct kvm
*kvm
, struct kvm_xen_hvm_attr
*data
)
276 mutex_lock(&kvm
->lock
);
278 switch (data
->type
) {
279 case KVM_XEN_ATTR_TYPE_LONG_MODE
:
280 data
->u
.long_mode
= kvm
->arch
.xen
.long_mode
;
284 case KVM_XEN_ATTR_TYPE_SHARED_INFO
:
285 data
->u
.shared_info
.gfn
= gpa_to_gfn(kvm
->arch
.xen
.shinfo_gfn
);
289 case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR
:
290 data
->u
.vector
= kvm
->arch
.xen
.upcall_vector
;
298 mutex_unlock(&kvm
->lock
);
302 int kvm_xen_vcpu_set_attr(struct kvm_vcpu
*vcpu
, struct kvm_xen_vcpu_attr
*data
)
304 int idx
, r
= -ENOENT
;
306 mutex_lock(&vcpu
->kvm
->lock
);
307 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
309 switch (data
->type
) {
310 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO
:
311 /* No compat necessary here. */
312 BUILD_BUG_ON(sizeof(struct vcpu_info
) !=
313 sizeof(struct compat_vcpu_info
));
314 BUILD_BUG_ON(offsetof(struct vcpu_info
, time
) !=
315 offsetof(struct compat_vcpu_info
, time
));
317 if (data
->u
.gpa
== GPA_INVALID
) {
318 vcpu
->arch
.xen
.vcpu_info_set
= false;
323 r
= kvm_gfn_to_hva_cache_init(vcpu
->kvm
,
324 &vcpu
->arch
.xen
.vcpu_info_cache
,
326 sizeof(struct vcpu_info
));
328 vcpu
->arch
.xen
.vcpu_info_set
= true;
329 kvm_make_request(KVM_REQ_CLOCK_UPDATE
, vcpu
);
333 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO
:
334 if (data
->u
.gpa
== GPA_INVALID
) {
335 vcpu
->arch
.xen
.vcpu_time_info_set
= false;
340 r
= kvm_gfn_to_hva_cache_init(vcpu
->kvm
,
341 &vcpu
->arch
.xen
.vcpu_time_info_cache
,
343 sizeof(struct pvclock_vcpu_time_info
));
345 vcpu
->arch
.xen
.vcpu_time_info_set
= true;
346 kvm_make_request(KVM_REQ_CLOCK_UPDATE
, vcpu
);
350 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR
:
351 if (!sched_info_on()) {
355 if (data
->u
.gpa
== GPA_INVALID
) {
356 vcpu
->arch
.xen
.runstate_set
= false;
361 r
= kvm_gfn_to_hva_cache_init(vcpu
->kvm
,
362 &vcpu
->arch
.xen
.runstate_cache
,
364 sizeof(struct vcpu_runstate_info
));
366 vcpu
->arch
.xen
.runstate_set
= true;
370 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT
:
371 if (!sched_info_on()) {
375 if (data
->u
.runstate
.state
> RUNSTATE_offline
) {
380 kvm_xen_update_runstate(vcpu
, data
->u
.runstate
.state
);
384 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA
:
385 if (!sched_info_on()) {
389 if (data
->u
.runstate
.state
> RUNSTATE_offline
) {
393 if (data
->u
.runstate
.state_entry_time
!=
394 (data
->u
.runstate
.time_running
+
395 data
->u
.runstate
.time_runnable
+
396 data
->u
.runstate
.time_blocked
+
397 data
->u
.runstate
.time_offline
)) {
401 if (get_kvmclock_ns(vcpu
->kvm
) <
402 data
->u
.runstate
.state_entry_time
) {
407 vcpu
->arch
.xen
.current_runstate
= data
->u
.runstate
.state
;
408 vcpu
->arch
.xen
.runstate_entry_time
=
409 data
->u
.runstate
.state_entry_time
;
410 vcpu
->arch
.xen
.runstate_times
[RUNSTATE_running
] =
411 data
->u
.runstate
.time_running
;
412 vcpu
->arch
.xen
.runstate_times
[RUNSTATE_runnable
] =
413 data
->u
.runstate
.time_runnable
;
414 vcpu
->arch
.xen
.runstate_times
[RUNSTATE_blocked
] =
415 data
->u
.runstate
.time_blocked
;
416 vcpu
->arch
.xen
.runstate_times
[RUNSTATE_offline
] =
417 data
->u
.runstate
.time_offline
;
418 vcpu
->arch
.xen
.last_steal
= current
->sched_info
.run_delay
;
422 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST
:
423 if (!sched_info_on()) {
427 if (data
->u
.runstate
.state
> RUNSTATE_offline
&&
428 data
->u
.runstate
.state
!= (u64
)-1) {
432 /* The adjustment must add up */
433 if (data
->u
.runstate
.state_entry_time
!=
434 (data
->u
.runstate
.time_running
+
435 data
->u
.runstate
.time_runnable
+
436 data
->u
.runstate
.time_blocked
+
437 data
->u
.runstate
.time_offline
)) {
442 if (get_kvmclock_ns(vcpu
->kvm
) <
443 (vcpu
->arch
.xen
.runstate_entry_time
+
444 data
->u
.runstate
.state_entry_time
)) {
449 vcpu
->arch
.xen
.runstate_entry_time
+=
450 data
->u
.runstate
.state_entry_time
;
451 vcpu
->arch
.xen
.runstate_times
[RUNSTATE_running
] +=
452 data
->u
.runstate
.time_running
;
453 vcpu
->arch
.xen
.runstate_times
[RUNSTATE_runnable
] +=
454 data
->u
.runstate
.time_runnable
;
455 vcpu
->arch
.xen
.runstate_times
[RUNSTATE_blocked
] +=
456 data
->u
.runstate
.time_blocked
;
457 vcpu
->arch
.xen
.runstate_times
[RUNSTATE_offline
] +=
458 data
->u
.runstate
.time_offline
;
460 if (data
->u
.runstate
.state
<= RUNSTATE_offline
)
461 kvm_xen_update_runstate(vcpu
, data
->u
.runstate
.state
);
469 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
470 mutex_unlock(&vcpu
->kvm
->lock
);
474 int kvm_xen_vcpu_get_attr(struct kvm_vcpu
*vcpu
, struct kvm_xen_vcpu_attr
*data
)
478 mutex_lock(&vcpu
->kvm
->lock
);
480 switch (data
->type
) {
481 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO
:
482 if (vcpu
->arch
.xen
.vcpu_info_set
)
483 data
->u
.gpa
= vcpu
->arch
.xen
.vcpu_info_cache
.gpa
;
485 data
->u
.gpa
= GPA_INVALID
;
489 case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO
:
490 if (vcpu
->arch
.xen
.vcpu_time_info_set
)
491 data
->u
.gpa
= vcpu
->arch
.xen
.vcpu_time_info_cache
.gpa
;
493 data
->u
.gpa
= GPA_INVALID
;
497 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR
:
498 if (!sched_info_on()) {
502 if (vcpu
->arch
.xen
.runstate_set
) {
503 data
->u
.gpa
= vcpu
->arch
.xen
.runstate_cache
.gpa
;
508 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT
:
509 if (!sched_info_on()) {
513 data
->u
.runstate
.state
= vcpu
->arch
.xen
.current_runstate
;
517 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA
:
518 if (!sched_info_on()) {
522 data
->u
.runstate
.state
= vcpu
->arch
.xen
.current_runstate
;
523 data
->u
.runstate
.state_entry_time
=
524 vcpu
->arch
.xen
.runstate_entry_time
;
525 data
->u
.runstate
.time_running
=
526 vcpu
->arch
.xen
.runstate_times
[RUNSTATE_running
];
527 data
->u
.runstate
.time_runnable
=
528 vcpu
->arch
.xen
.runstate_times
[RUNSTATE_runnable
];
529 data
->u
.runstate
.time_blocked
=
530 vcpu
->arch
.xen
.runstate_times
[RUNSTATE_blocked
];
531 data
->u
.runstate
.time_offline
=
532 vcpu
->arch
.xen
.runstate_times
[RUNSTATE_offline
];
536 case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST
:
544 mutex_unlock(&vcpu
->kvm
->lock
);
548 int kvm_xen_write_hypercall_page(struct kvm_vcpu
*vcpu
, u64 data
)
550 struct kvm
*kvm
= vcpu
->kvm
;
551 u32 page_num
= data
& ~PAGE_MASK
;
552 u64 page_addr
= data
& PAGE_MASK
;
553 bool lm
= is_long_mode(vcpu
);
555 /* Latch long_mode for shared_info pages etc. */
556 vcpu
->kvm
->arch
.xen
.long_mode
= lm
;
559 * If Xen hypercall intercept is enabled, fill the hypercall
560 * page with VMCALL/VMMCALL instructions since that's what
561 * we catch. Else the VMM has provided the hypercall pages
562 * with instructions of its own choosing, so use those.
564 if (kvm_xen_hypercall_enabled(kvm
)) {
571 /* mov imm32, %eax */
572 instructions
[0] = 0xb8;
574 /* vmcall / vmmcall */
575 kvm_x86_ops
.patch_hypercall(vcpu
, instructions
+ 5);
578 instructions
[8] = 0xc3;
581 memset(instructions
+ 9, 0xcc, sizeof(instructions
) - 9);
583 for (i
= 0; i
< PAGE_SIZE
/ sizeof(instructions
); i
++) {
584 *(u32
*)&instructions
[1] = i
;
585 if (kvm_vcpu_write_guest(vcpu
,
586 page_addr
+ (i
* sizeof(instructions
)),
587 instructions
, sizeof(instructions
)))
592 * Note, truncation is a non-issue as 'lm' is guaranteed to be
593 * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes.
595 hva_t blob_addr
= lm
? kvm
->arch
.xen_hvm_config
.blob_addr_64
596 : kvm
->arch
.xen_hvm_config
.blob_addr_32
;
597 u8 blob_size
= lm
? kvm
->arch
.xen_hvm_config
.blob_size_64
598 : kvm
->arch
.xen_hvm_config
.blob_size_32
;
601 if (page_num
>= blob_size
)
604 blob_addr
+= page_num
* PAGE_SIZE
;
606 page
= memdup_user((u8 __user
*)blob_addr
, PAGE_SIZE
);
608 return PTR_ERR(page
);
610 if (kvm_vcpu_write_guest(vcpu
, page_addr
, page
, PAGE_SIZE
)) {
618 int kvm_xen_hvm_config(struct kvm
*kvm
, struct kvm_xen_hvm_config
*xhc
)
620 if (xhc
->flags
& ~KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL
)
624 * With hypercall interception the kernel generates its own
625 * hypercall page so it must not be provided.
627 if ((xhc
->flags
& KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL
) &&
628 (xhc
->blob_addr_32
|| xhc
->blob_addr_64
||
629 xhc
->blob_size_32
|| xhc
->blob_size_64
))
632 mutex_lock(&kvm
->lock
);
634 if (xhc
->msr
&& !kvm
->arch
.xen_hvm_config
.msr
)
635 static_branch_inc(&kvm_xen_enabled
.key
);
636 else if (!xhc
->msr
&& kvm
->arch
.xen_hvm_config
.msr
)
637 static_branch_slow_dec_deferred(&kvm_xen_enabled
);
639 memcpy(&kvm
->arch
.xen_hvm_config
, xhc
, sizeof(*xhc
));
641 mutex_unlock(&kvm
->lock
);
645 void kvm_xen_init_vm(struct kvm
*kvm
)
647 kvm
->arch
.xen
.shinfo_gfn
= GPA_INVALID
;
650 void kvm_xen_destroy_vm(struct kvm
*kvm
)
652 if (kvm
->arch
.xen_hvm_config
.msr
)
653 static_branch_slow_dec_deferred(&kvm_xen_enabled
);
656 static int kvm_xen_hypercall_set_result(struct kvm_vcpu
*vcpu
, u64 result
)
658 kvm_rax_write(vcpu
, result
);
659 return kvm_skip_emulated_instruction(vcpu
);
662 static int kvm_xen_hypercall_complete_userspace(struct kvm_vcpu
*vcpu
)
664 struct kvm_run
*run
= vcpu
->run
;
666 if (unlikely(!kvm_is_linear_rip(vcpu
, vcpu
->arch
.xen
.hypercall_rip
)))
669 return kvm_xen_hypercall_set_result(vcpu
, run
->xen
.u
.hcall
.result
);
672 int kvm_xen_hypercall(struct kvm_vcpu
*vcpu
)
675 u64 input
, params
[6];
677 input
= (u64
)kvm_register_read(vcpu
, VCPU_REGS_RAX
);
679 /* Hyper-V hypercalls get bit 31 set in EAX */
680 if ((input
& 0x80000000) &&
681 kvm_hv_hypercall_enabled(vcpu
))
682 return kvm_hv_hypercall(vcpu
);
684 longmode
= is_64_bit_mode(vcpu
);
686 params
[0] = (u32
)kvm_rbx_read(vcpu
);
687 params
[1] = (u32
)kvm_rcx_read(vcpu
);
688 params
[2] = (u32
)kvm_rdx_read(vcpu
);
689 params
[3] = (u32
)kvm_rsi_read(vcpu
);
690 params
[4] = (u32
)kvm_rdi_read(vcpu
);
691 params
[5] = (u32
)kvm_rbp_read(vcpu
);
695 params
[0] = (u64
)kvm_rdi_read(vcpu
);
696 params
[1] = (u64
)kvm_rsi_read(vcpu
);
697 params
[2] = (u64
)kvm_rdx_read(vcpu
);
698 params
[3] = (u64
)kvm_r10_read(vcpu
);
699 params
[4] = (u64
)kvm_r8_read(vcpu
);
700 params
[5] = (u64
)kvm_r9_read(vcpu
);
703 trace_kvm_xen_hypercall(input
, params
[0], params
[1], params
[2],
704 params
[3], params
[4], params
[5]);
706 vcpu
->run
->exit_reason
= KVM_EXIT_XEN
;
707 vcpu
->run
->xen
.type
= KVM_EXIT_XEN_HCALL
;
708 vcpu
->run
->xen
.u
.hcall
.longmode
= longmode
;
709 vcpu
->run
->xen
.u
.hcall
.cpl
= kvm_x86_ops
.get_cpl(vcpu
);
710 vcpu
->run
->xen
.u
.hcall
.input
= input
;
711 vcpu
->run
->xen
.u
.hcall
.params
[0] = params
[0];
712 vcpu
->run
->xen
.u
.hcall
.params
[1] = params
[1];
713 vcpu
->run
->xen
.u
.hcall
.params
[2] = params
[2];
714 vcpu
->run
->xen
.u
.hcall
.params
[3] = params
[3];
715 vcpu
->run
->xen
.u
.hcall
.params
[4] = params
[4];
716 vcpu
->run
->xen
.u
.hcall
.params
[5] = params
[5];
717 vcpu
->arch
.xen
.hypercall_rip
= kvm_get_linear_rip(vcpu
);
718 vcpu
->arch
.complete_userspace_io
=
719 kvm_xen_hypercall_complete_userspace
;