]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/paravirt-spinlocks.c
x86/process: Allow runtime control of Speculative Store Bypass
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / paravirt-spinlocks.c
CommitLineData
d5de8841
JF
1/*
2 * Split spinlock implementation out into its own file, so it can be
3 * compiled in a FTRACE-compatible way.
4 */
5#include <linux/spinlock.h>
186f4360 6#include <linux/export.h>
96f853ea 7#include <linux/jump_label.h>
d5de8841
JF
8
9#include <asm/paravirt.h>
10
f233f7f1
PZI
11__visible void __native_queued_spin_unlock(struct qspinlock *lock)
12{
13 native_queued_spin_unlock(lock);
14}
f233f7f1
PZI
15PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
16
17bool pv_is_native_spin_unlock(void)
18{
19 return pv_lock_ops.queued_spin_unlock.func ==
20 __raw_callee_save___native_queued_spin_unlock;
21}
f233f7f1 22
6c62985d 23__visible bool __native_vcpu_is_preempted(long cpu)
3cded417
PZ
24{
25 return false;
26}
27PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
28
29bool pv_is_native_vcpu_is_preempted(void)
446f3dc8 30{
3cded417
PZ
31 return pv_lock_ops.vcpu_is_preempted.func ==
32 __raw_callee_save___native_vcpu_is_preempted;
446f3dc8
PX
33}
34
d5de8841
JF
35struct pv_lock_ops pv_lock_ops = {
36#ifdef CONFIG_SMP
f233f7f1
PZI
37 .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
38 .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
39 .wait = paravirt_nop,
40 .kick = paravirt_nop,
3cded417 41 .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
f233f7f1 42#endif /* SMP */
d5de8841 43};
25258ef7 44EXPORT_SYMBOL(pv_lock_ops);