2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #include <linux/bug.h>
20 #include <linux/cpu_pm.h>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/list.h>
25 #include <linux/module.h>
26 #include <linux/vmalloc.h>
28 #include <linux/mman.h>
29 #include <linux/sched.h>
30 #include <linux/kvm.h>
31 #include <linux/kvm_irqfd.h>
32 #include <linux/irqbypass.h>
33 #include <linux/sched/stat.h>
34 #include <trace/events/kvm.h>
35 #include <kvm/arm_pmu.h>
36 #include <kvm/arm_psci.h>
38 #define CREATE_TRACE_POINTS
41 #include <linux/uaccess.h>
42 #include <asm/ptrace.h>
44 #include <asm/tlbflush.h>
45 #include <asm/cacheflush.h>
46 #include <asm/cpufeature.h>
48 #include <asm/kvm_arm.h>
49 #include <asm/kvm_asm.h>
50 #include <asm/kvm_mmu.h>
51 #include <asm/kvm_emulate.h>
52 #include <asm/kvm_coproc.h>
53 #include <asm/sections.h>
56 __asm__(".arch_extension virt");
59 DEFINE_PER_CPU(kvm_cpu_context_t
, kvm_host_cpu_state
);
60 static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page
);
62 /* Per-CPU variable containing the currently running vcpu. */
63 static DEFINE_PER_CPU(struct kvm_vcpu
*, kvm_arm_running_vcpu
);
65 /* The VMID used in the VTTBR */
66 static atomic64_t kvm_vmid_gen
= ATOMIC64_INIT(1);
67 static u32 kvm_next_vmid
;
68 static DEFINE_SPINLOCK(kvm_vmid_lock
);
70 static bool vgic_present
;
72 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled
);
74 static void kvm_arm_set_running_vcpu(struct kvm_vcpu
*vcpu
)
76 __this_cpu_write(kvm_arm_running_vcpu
, vcpu
);
79 DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use
);
82 * kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
83 * Must be called from non-preemptible context
85 struct kvm_vcpu
*kvm_arm_get_running_vcpu(void)
87 return __this_cpu_read(kvm_arm_running_vcpu
);
91 * kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
93 struct kvm_vcpu
* __percpu
*kvm_get_running_vcpus(void)
95 return &kvm_arm_running_vcpu
;
98 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
100 return kvm_vcpu_exiting_guest_mode(vcpu
) == IN_GUEST_MODE
;
103 int kvm_arch_hardware_setup(void)
108 void kvm_arch_check_processor_compat(void *rtn
)
115 * kvm_arch_init_vm - initializes a VM data structure
116 * @kvm: pointer to the KVM struct
118 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
122 ret
= kvm_arm_setup_stage2(kvm
, type
);
126 kvm
->arch
.last_vcpu_ran
= alloc_percpu(typeof(*kvm
->arch
.last_vcpu_ran
));
127 if (!kvm
->arch
.last_vcpu_ran
)
130 for_each_possible_cpu(cpu
)
131 *per_cpu_ptr(kvm
->arch
.last_vcpu_ran
, cpu
) = -1;
133 ret
= kvm_alloc_stage2_pgd(kvm
);
137 ret
= create_hyp_mappings(kvm
, kvm
+ 1, PAGE_HYP
);
139 goto out_free_stage2_pgd
;
141 kvm_vgic_early_init(kvm
);
143 /* Mark the initial VMID generation invalid */
144 kvm
->arch
.vmid
.vmid_gen
= 0;
146 /* The maximum number of VCPUs is limited by the host's GIC model */
147 kvm
->arch
.max_vcpus
= vgic_present
?
148 kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS
;
152 kvm_free_stage2_pgd(kvm
);
154 free_percpu(kvm
->arch
.last_vcpu_ran
);
155 kvm
->arch
.last_vcpu_ran
= NULL
;
159 bool kvm_arch_has_vcpu_debugfs(void)
164 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu
*vcpu
)
169 vm_fault_t
kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
171 return VM_FAULT_SIGBUS
;
176 * kvm_arch_destroy_vm - destroy the VM data structure
177 * @kvm: pointer to the KVM struct
179 void kvm_arch_destroy_vm(struct kvm
*kvm
)
183 kvm_vgic_destroy(kvm
);
185 free_percpu(kvm
->arch
.last_vcpu_ran
);
186 kvm
->arch
.last_vcpu_ran
= NULL
;
188 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
190 kvm_arch_vcpu_free(kvm
->vcpus
[i
]);
191 kvm
->vcpus
[i
] = NULL
;
194 atomic_set(&kvm
->online_vcpus
, 0);
197 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
201 case KVM_CAP_IRQCHIP
:
204 case KVM_CAP_IOEVENTFD
:
205 case KVM_CAP_DEVICE_CTRL
:
206 case KVM_CAP_USER_MEMORY
:
207 case KVM_CAP_SYNC_MMU
:
208 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS
:
209 case KVM_CAP_ONE_REG
:
210 case KVM_CAP_ARM_PSCI
:
211 case KVM_CAP_ARM_PSCI_0_2
:
212 case KVM_CAP_READONLY_MEM
:
213 case KVM_CAP_MP_STATE
:
214 case KVM_CAP_IMMEDIATE_EXIT
:
215 case KVM_CAP_VCPU_EVENTS
:
218 case KVM_CAP_ARM_SET_DEVICE_ADDR
:
221 case KVM_CAP_NR_VCPUS
:
222 r
= num_online_cpus();
224 case KVM_CAP_MAX_VCPUS
:
227 case KVM_CAP_NR_MEMSLOTS
:
228 r
= KVM_USER_MEM_SLOTS
;
230 case KVM_CAP_MSI_DEVID
:
234 r
= kvm
->arch
.vgic
.msis_require_devid
;
236 case KVM_CAP_ARM_USER_IRQ
:
238 * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
239 * (bump this number if adding more devices)
244 r
= kvm_arch_vm_ioctl_check_extension(kvm
, ext
);
250 long kvm_arch_dev_ioctl(struct file
*filp
,
251 unsigned int ioctl
, unsigned long arg
)
256 struct kvm
*kvm_arch_alloc_vm(void)
259 return kzalloc(sizeof(struct kvm
), GFP_KERNEL
);
261 return vzalloc(sizeof(struct kvm
));
264 void kvm_arch_free_vm(struct kvm
*kvm
)
272 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
275 struct kvm_vcpu
*vcpu
;
277 if (irqchip_in_kernel(kvm
) && vgic_initialized(kvm
)) {
282 if (id
>= kvm
->arch
.max_vcpus
) {
287 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
293 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
297 err
= create_hyp_mappings(vcpu
, vcpu
+ 1, PAGE_HYP
);
303 kvm_vcpu_uninit(vcpu
);
305 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
310 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
314 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
316 if (vcpu
->arch
.has_run_once
&& unlikely(!irqchip_in_kernel(vcpu
->kvm
)))
317 static_branch_dec(&userspace_irqchip_in_use
);
319 kvm_mmu_free_memory_caches(vcpu
);
320 kvm_timer_vcpu_terminate(vcpu
);
321 kvm_pmu_vcpu_destroy(vcpu
);
322 kvm_vcpu_uninit(vcpu
);
323 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
326 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
328 kvm_arch_vcpu_free(vcpu
);
331 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
333 return kvm_timer_is_pending(vcpu
);
336 void kvm_arch_vcpu_blocking(struct kvm_vcpu
*vcpu
)
338 kvm_vgic_v4_enable_doorbell(vcpu
);
341 void kvm_arch_vcpu_unblocking(struct kvm_vcpu
*vcpu
)
343 kvm_vgic_v4_disable_doorbell(vcpu
);
346 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
348 /* Force users to call KVM_ARM_VCPU_INIT */
349 vcpu
->arch
.target
= -1;
350 bitmap_zero(vcpu
->arch
.features
, KVM_VCPU_MAX_FEATURES
);
352 /* Set up the timer */
353 kvm_timer_vcpu_init(vcpu
);
355 kvm_arm_reset_debug_ptr(vcpu
);
357 return kvm_vgic_vcpu_init(vcpu
);
360 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
364 last_ran
= this_cpu_ptr(vcpu
->kvm
->arch
.last_vcpu_ran
);
367 * We might get preempted before the vCPU actually runs, but
368 * over-invalidation doesn't affect correctness.
370 if (*last_ran
!= vcpu
->vcpu_id
) {
371 kvm_call_hyp(__kvm_tlb_flush_local_vmid
, vcpu
);
372 *last_ran
= vcpu
->vcpu_id
;
376 vcpu
->arch
.host_cpu_context
= this_cpu_ptr(&kvm_host_cpu_state
);
378 kvm_arm_set_running_vcpu(vcpu
);
380 kvm_timer_vcpu_load(vcpu
);
381 kvm_vcpu_load_sysregs(vcpu
);
382 kvm_arch_vcpu_load_fp(vcpu
);
384 if (single_task_running())
385 vcpu_clear_wfe_traps(vcpu
);
387 vcpu_set_wfe_traps(vcpu
);
390 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
392 kvm_arch_vcpu_put_fp(vcpu
);
393 kvm_vcpu_put_sysregs(vcpu
);
394 kvm_timer_vcpu_put(vcpu
);
399 kvm_arm_set_running_vcpu(NULL
);
402 static void vcpu_power_off(struct kvm_vcpu
*vcpu
)
404 vcpu
->arch
.power_off
= true;
405 kvm_make_request(KVM_REQ_SLEEP
, vcpu
);
409 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
410 struct kvm_mp_state
*mp_state
)
412 if (vcpu
->arch
.power_off
)
413 mp_state
->mp_state
= KVM_MP_STATE_STOPPED
;
415 mp_state
->mp_state
= KVM_MP_STATE_RUNNABLE
;
420 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
421 struct kvm_mp_state
*mp_state
)
425 switch (mp_state
->mp_state
) {
426 case KVM_MP_STATE_RUNNABLE
:
427 vcpu
->arch
.power_off
= false;
429 case KVM_MP_STATE_STOPPED
:
430 vcpu_power_off(vcpu
);
440 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
441 * @v: The VCPU pointer
443 * If the guest CPU is not waiting for interrupts or an interrupt line is
444 * asserted, the CPU is by definition runnable.
446 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
448 bool irq_lines
= *vcpu_hcr(v
) & (HCR_VI
| HCR_VF
);
449 return ((irq_lines
|| kvm_vgic_vcpu_pending_irq(v
))
450 && !v
->arch
.power_off
&& !v
->arch
.pause
);
453 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu
*vcpu
)
455 return vcpu_mode_priv(vcpu
);
458 /* Just ensure a guest exit from a particular CPU */
459 static void exit_vm_noop(void *info
)
463 void force_vm_exit(const cpumask_t
*mask
)
466 smp_call_function_many(mask
, exit_vm_noop
, NULL
, true);
471 * need_new_vmid_gen - check that the VMID is still valid
472 * @vmid: The VMID to check
474 * return true if there is a new generation of VMIDs being used
476 * The hardware supports a limited set of values with the value zero reserved
477 * for the host, so we check if an assigned value belongs to a previous
478 * generation, which which requires us to assign a new value. If we're the
479 * first to use a VMID for the new generation, we must flush necessary caches
480 * and TLBs on all CPUs.
482 static bool need_new_vmid_gen(struct kvm_vmid
*vmid
)
484 u64 current_vmid_gen
= atomic64_read(&kvm_vmid_gen
);
485 smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
486 return unlikely(READ_ONCE(vmid
->vmid_gen
) != current_vmid_gen
);
490 * update_vmid - Update the vmid with a valid VMID for the current generation
491 * @kvm: The guest that struct vmid belongs to
492 * @vmid: The stage-2 VMID information struct
494 static void update_vmid(struct kvm_vmid
*vmid
)
496 if (!need_new_vmid_gen(vmid
))
499 spin_lock(&kvm_vmid_lock
);
502 * We need to re-check the vmid_gen here to ensure that if another vcpu
503 * already allocated a valid vmid for this vm, then this vcpu should
506 if (!need_new_vmid_gen(vmid
)) {
507 spin_unlock(&kvm_vmid_lock
);
511 /* First user of a new VMID generation? */
512 if (unlikely(kvm_next_vmid
== 0)) {
513 atomic64_inc(&kvm_vmid_gen
);
517 * On SMP we know no other CPUs can use this CPU's or each
518 * other's VMID after force_vm_exit returns since the
519 * kvm_vmid_lock blocks them from reentry to the guest.
521 force_vm_exit(cpu_all_mask
);
523 * Now broadcast TLB + ICACHE invalidation over the inner
524 * shareable domain to make sure all data structures are
527 kvm_call_hyp(__kvm_flush_vm_context
);
530 vmid
->vmid
= kvm_next_vmid
;
532 kvm_next_vmid
&= (1 << kvm_get_vmid_bits()) - 1;
535 WRITE_ONCE(vmid
->vmid_gen
, atomic64_read(&kvm_vmid_gen
));
537 spin_unlock(&kvm_vmid_lock
);
540 static int kvm_vcpu_first_run_init(struct kvm_vcpu
*vcpu
)
542 struct kvm
*kvm
= vcpu
->kvm
;
545 if (likely(vcpu
->arch
.has_run_once
))
548 vcpu
->arch
.has_run_once
= true;
550 if (likely(irqchip_in_kernel(kvm
))) {
552 * Map the VGIC hardware resources before running a vcpu the
553 * first time on this VM.
555 if (unlikely(!vgic_ready(kvm
))) {
556 ret
= kvm_vgic_map_resources(kvm
);
562 * Tell the rest of the code that there are userspace irqchip
565 static_branch_inc(&userspace_irqchip_in_use
);
568 ret
= kvm_timer_enable(vcpu
);
572 ret
= kvm_arm_pmu_v3_enable(vcpu
);
577 bool kvm_arch_intc_initialized(struct kvm
*kvm
)
579 return vgic_initialized(kvm
);
582 void kvm_arm_halt_guest(struct kvm
*kvm
)
585 struct kvm_vcpu
*vcpu
;
587 kvm_for_each_vcpu(i
, vcpu
, kvm
)
588 vcpu
->arch
.pause
= true;
589 kvm_make_all_cpus_request(kvm
, KVM_REQ_SLEEP
);
592 void kvm_arm_resume_guest(struct kvm
*kvm
)
595 struct kvm_vcpu
*vcpu
;
597 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
598 vcpu
->arch
.pause
= false;
599 swake_up_one(kvm_arch_vcpu_wq(vcpu
));
603 static void vcpu_req_sleep(struct kvm_vcpu
*vcpu
)
605 struct swait_queue_head
*wq
= kvm_arch_vcpu_wq(vcpu
);
607 swait_event_interruptible_exclusive(*wq
, ((!vcpu
->arch
.power_off
) &&
608 (!vcpu
->arch
.pause
)));
610 if (vcpu
->arch
.power_off
|| vcpu
->arch
.pause
) {
611 /* Awaken to handle a signal, request we sleep again later. */
612 kvm_make_request(KVM_REQ_SLEEP
, vcpu
);
616 * Make sure we will observe a potential reset request if we've
617 * observed a change to the power state. Pairs with the smp_wmb() in
618 * kvm_psci_vcpu_on().
623 static int kvm_vcpu_initialized(struct kvm_vcpu
*vcpu
)
625 return vcpu
->arch
.target
>= 0;
628 static void check_vcpu_requests(struct kvm_vcpu
*vcpu
)
630 if (kvm_request_pending(vcpu
)) {
631 if (kvm_check_request(KVM_REQ_SLEEP
, vcpu
))
632 vcpu_req_sleep(vcpu
);
634 if (kvm_check_request(KVM_REQ_VCPU_RESET
, vcpu
))
635 kvm_reset_vcpu(vcpu
);
638 * Clear IRQ_PENDING requests that were made to guarantee
639 * that a VCPU sees new virtual interrupts.
641 kvm_check_request(KVM_REQ_IRQ_PENDING
, vcpu
);
646 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
647 * @vcpu: The VCPU pointer
648 * @run: The kvm_run structure pointer used for userspace state exchange
650 * This function is called through the VCPU_RUN ioctl called from user space. It
651 * will execute VM code in a loop until the time slice for the process is used
652 * or some emulation is needed from user space in which case the function will
653 * return with return value 0 and with the kvm_run structure filled in with the
654 * required data for the requested emulation.
656 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
660 if (unlikely(!kvm_vcpu_initialized(vcpu
)))
663 ret
= kvm_vcpu_first_run_init(vcpu
);
667 if (run
->exit_reason
== KVM_EXIT_MMIO
) {
668 ret
= kvm_handle_mmio_return(vcpu
, vcpu
->run
);
673 if (run
->immediate_exit
)
678 kvm_sigset_activate(vcpu
);
681 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
684 * Check conditions before entering the guest
688 update_vmid(&vcpu
->kvm
->arch
.vmid
);
690 check_vcpu_requests(vcpu
);
693 * Preparing the interrupts to be injected also
694 * involves poking the GIC, which must be done in a
695 * non-preemptible context.
699 kvm_pmu_flush_hwstate(vcpu
);
703 kvm_vgic_flush_hwstate(vcpu
);
706 * Exit if we have a signal pending so that we can deliver the
707 * signal to user space.
709 if (signal_pending(current
)) {
711 run
->exit_reason
= KVM_EXIT_INTR
;
715 * If we're using a userspace irqchip, then check if we need
716 * to tell a userspace irqchip about timer or PMU level
717 * changes and if so, exit to userspace (the actual level
718 * state gets updated in kvm_timer_update_run and
719 * kvm_pmu_update_run below).
721 if (static_branch_unlikely(&userspace_irqchip_in_use
)) {
722 if (kvm_timer_should_notify_user(vcpu
) ||
723 kvm_pmu_should_notify_user(vcpu
)) {
725 run
->exit_reason
= KVM_EXIT_INTR
;
730 * Ensure we set mode to IN_GUEST_MODE after we disable
731 * interrupts and before the final VCPU requests check.
732 * See the comment in kvm_vcpu_exiting_guest_mode() and
733 * Documentation/virtual/kvm/vcpu-requests.rst
735 smp_store_mb(vcpu
->mode
, IN_GUEST_MODE
);
737 if (ret
<= 0 || need_new_vmid_gen(&vcpu
->kvm
->arch
.vmid
) ||
738 kvm_request_pending(vcpu
)) {
739 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
740 isb(); /* Ensure work in x_flush_hwstate is committed */
741 kvm_pmu_sync_hwstate(vcpu
);
742 if (static_branch_unlikely(&userspace_irqchip_in_use
))
743 kvm_timer_sync_hwstate(vcpu
);
744 kvm_vgic_sync_hwstate(vcpu
);
750 kvm_arm_setup_debug(vcpu
);
752 /**************************************************************
755 trace_kvm_entry(*vcpu_pc(vcpu
));
756 guest_enter_irqoff();
759 kvm_arm_vhe_guest_enter();
760 ret
= kvm_vcpu_run_vhe(vcpu
);
761 kvm_arm_vhe_guest_exit();
763 ret
= kvm_call_hyp_ret(__kvm_vcpu_run_nvhe
, vcpu
);
766 vcpu
->mode
= OUTSIDE_GUEST_MODE
;
770 *************************************************************/
772 kvm_arm_clear_debug(vcpu
);
775 * We must sync the PMU state before the vgic state so
776 * that the vgic can properly sample the updated state of the
779 kvm_pmu_sync_hwstate(vcpu
);
782 * Sync the vgic state before syncing the timer state because
783 * the timer code needs to know if the virtual timer
784 * interrupts are active.
786 kvm_vgic_sync_hwstate(vcpu
);
789 * Sync the timer hardware state before enabling interrupts as
790 * we don't want vtimer interrupts to race with syncing the
791 * timer virtual interrupt state.
793 if (static_branch_unlikely(&userspace_irqchip_in_use
))
794 kvm_timer_sync_hwstate(vcpu
);
796 kvm_arch_vcpu_ctxsync_fp(vcpu
);
799 * We may have taken a host interrupt in HYP mode (ie
800 * while executing the guest). This interrupt is still
801 * pending, as we haven't serviced it yet!
803 * We're now back in SVC mode, with interrupts
804 * disabled. Enabling the interrupts now will have
805 * the effect of taking the interrupt again, in SVC
811 * We do local_irq_enable() before calling guest_exit() so
812 * that if a timer interrupt hits while running the guest we
813 * account that tick as being spent in the guest. We enable
814 * preemption after calling guest_exit() so that if we get
815 * preempted we make sure ticks after that is not counted as
819 trace_kvm_exit(ret
, kvm_vcpu_trap_get_class(vcpu
), *vcpu_pc(vcpu
));
821 /* Exit types that need handling before we can be preempted */
822 handle_exit_early(vcpu
, run
, ret
);
826 ret
= handle_exit(vcpu
, run
, ret
);
829 /* Tell userspace about in-kernel device output levels */
830 if (unlikely(!irqchip_in_kernel(vcpu
->kvm
))) {
831 kvm_timer_update_run(vcpu
);
832 kvm_pmu_update_run(vcpu
);
835 kvm_sigset_deactivate(vcpu
);
841 static int vcpu_interrupt_line(struct kvm_vcpu
*vcpu
, int number
, bool level
)
847 if (number
== KVM_ARM_IRQ_CPU_IRQ
)
848 bit_index
= __ffs(HCR_VI
);
849 else /* KVM_ARM_IRQ_CPU_FIQ */
850 bit_index
= __ffs(HCR_VF
);
852 hcr
= vcpu_hcr(vcpu
);
854 set
= test_and_set_bit(bit_index
, hcr
);
856 set
= test_and_clear_bit(bit_index
, hcr
);
859 * If we didn't change anything, no need to wake up or kick other CPUs
865 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
866 * trigger a world-switch round on the running physical CPU to set the
867 * virtual IRQ/FIQ fields in the HCR appropriately.
869 kvm_make_request(KVM_REQ_IRQ_PENDING
, vcpu
);
875 int kvm_vm_ioctl_irq_line(struct kvm
*kvm
, struct kvm_irq_level
*irq_level
,
878 u32 irq
= irq_level
->irq
;
879 unsigned int irq_type
, vcpu_idx
, irq_num
;
880 int nrcpus
= atomic_read(&kvm
->online_vcpus
);
881 struct kvm_vcpu
*vcpu
= NULL
;
882 bool level
= irq_level
->level
;
884 irq_type
= (irq
>> KVM_ARM_IRQ_TYPE_SHIFT
) & KVM_ARM_IRQ_TYPE_MASK
;
885 vcpu_idx
= (irq
>> KVM_ARM_IRQ_VCPU_SHIFT
) & KVM_ARM_IRQ_VCPU_MASK
;
886 irq_num
= (irq
>> KVM_ARM_IRQ_NUM_SHIFT
) & KVM_ARM_IRQ_NUM_MASK
;
888 trace_kvm_irq_line(irq_type
, vcpu_idx
, irq_num
, irq_level
->level
);
891 case KVM_ARM_IRQ_TYPE_CPU
:
892 if (irqchip_in_kernel(kvm
))
895 if (vcpu_idx
>= nrcpus
)
898 vcpu
= kvm_get_vcpu(kvm
, vcpu_idx
);
902 if (irq_num
> KVM_ARM_IRQ_CPU_FIQ
)
905 return vcpu_interrupt_line(vcpu
, irq_num
, level
);
906 case KVM_ARM_IRQ_TYPE_PPI
:
907 if (!irqchip_in_kernel(kvm
))
910 if (vcpu_idx
>= nrcpus
)
913 vcpu
= kvm_get_vcpu(kvm
, vcpu_idx
);
917 if (irq_num
< VGIC_NR_SGIS
|| irq_num
>= VGIC_NR_PRIVATE_IRQS
)
920 return kvm_vgic_inject_irq(kvm
, vcpu
->vcpu_id
, irq_num
, level
, NULL
);
921 case KVM_ARM_IRQ_TYPE_SPI
:
922 if (!irqchip_in_kernel(kvm
))
925 if (irq_num
< VGIC_NR_PRIVATE_IRQS
)
928 return kvm_vgic_inject_irq(kvm
, 0, irq_num
, level
, NULL
);
934 static int kvm_vcpu_set_target(struct kvm_vcpu
*vcpu
,
935 const struct kvm_vcpu_init
*init
)
938 int phys_target
= kvm_target_cpu();
940 if (init
->target
!= phys_target
)
944 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
945 * use the same target.
947 if (vcpu
->arch
.target
!= -1 && vcpu
->arch
.target
!= init
->target
)
950 /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
951 for (i
= 0; i
< sizeof(init
->features
) * 8; i
++) {
952 bool set
= (init
->features
[i
/ 32] & (1 << (i
% 32)));
954 if (set
&& i
>= KVM_VCPU_MAX_FEATURES
)
958 * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
959 * use the same feature set.
961 if (vcpu
->arch
.target
!= -1 && i
< KVM_VCPU_MAX_FEATURES
&&
962 test_bit(i
, vcpu
->arch
.features
) != set
)
966 set_bit(i
, vcpu
->arch
.features
);
969 vcpu
->arch
.target
= phys_target
;
971 /* Now we know what it is, we can reset it. */
972 return kvm_reset_vcpu(vcpu
);
976 static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu
*vcpu
,
977 struct kvm_vcpu_init
*init
)
981 ret
= kvm_vcpu_set_target(vcpu
, init
);
986 * Ensure a rebooted VM will fault in RAM pages and detect if the
987 * guest MMU is turned off and flush the caches as needed.
989 if (vcpu
->arch
.has_run_once
)
990 stage2_unmap_vm(vcpu
->kvm
);
992 vcpu_reset_hcr(vcpu
);
995 * Handle the "start in power-off" case.
997 if (test_bit(KVM_ARM_VCPU_POWER_OFF
, vcpu
->arch
.features
))
998 vcpu_power_off(vcpu
);
1000 vcpu
->arch
.power_off
= false;
1005 static int kvm_arm_vcpu_set_attr(struct kvm_vcpu
*vcpu
,
1006 struct kvm_device_attr
*attr
)
1010 switch (attr
->group
) {
1012 ret
= kvm_arm_vcpu_arch_set_attr(vcpu
, attr
);
1019 static int kvm_arm_vcpu_get_attr(struct kvm_vcpu
*vcpu
,
1020 struct kvm_device_attr
*attr
)
1024 switch (attr
->group
) {
1026 ret
= kvm_arm_vcpu_arch_get_attr(vcpu
, attr
);
1033 static int kvm_arm_vcpu_has_attr(struct kvm_vcpu
*vcpu
,
1034 struct kvm_device_attr
*attr
)
1038 switch (attr
->group
) {
1040 ret
= kvm_arm_vcpu_arch_has_attr(vcpu
, attr
);
1047 static int kvm_arm_vcpu_get_events(struct kvm_vcpu
*vcpu
,
1048 struct kvm_vcpu_events
*events
)
1050 memset(events
, 0, sizeof(*events
));
1052 return __kvm_arm_vcpu_get_events(vcpu
, events
);
1055 static int kvm_arm_vcpu_set_events(struct kvm_vcpu
*vcpu
,
1056 struct kvm_vcpu_events
*events
)
1060 /* check whether the reserved field is zero */
1061 for (i
= 0; i
< ARRAY_SIZE(events
->reserved
); i
++)
1062 if (events
->reserved
[i
])
1065 /* check whether the pad field is zero */
1066 for (i
= 0; i
< ARRAY_SIZE(events
->exception
.pad
); i
++)
1067 if (events
->exception
.pad
[i
])
1070 return __kvm_arm_vcpu_set_events(vcpu
, events
);
1073 long kvm_arch_vcpu_ioctl(struct file
*filp
,
1074 unsigned int ioctl
, unsigned long arg
)
1076 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1077 void __user
*argp
= (void __user
*)arg
;
1078 struct kvm_device_attr attr
;
1082 case KVM_ARM_VCPU_INIT
: {
1083 struct kvm_vcpu_init init
;
1086 if (copy_from_user(&init
, argp
, sizeof(init
)))
1089 r
= kvm_arch_vcpu_ioctl_vcpu_init(vcpu
, &init
);
1092 case KVM_SET_ONE_REG
:
1093 case KVM_GET_ONE_REG
: {
1094 struct kvm_one_reg reg
;
1097 if (unlikely(!kvm_vcpu_initialized(vcpu
)))
1101 if (copy_from_user(®
, argp
, sizeof(reg
)))
1104 if (ioctl
== KVM_SET_ONE_REG
)
1105 r
= kvm_arm_set_reg(vcpu
, ®
);
1107 r
= kvm_arm_get_reg(vcpu
, ®
);
1110 case KVM_GET_REG_LIST
: {
1111 struct kvm_reg_list __user
*user_list
= argp
;
1112 struct kvm_reg_list reg_list
;
1116 if (unlikely(!kvm_vcpu_initialized(vcpu
)))
1120 if (copy_from_user(®_list
, user_list
, sizeof(reg_list
)))
1123 reg_list
.n
= kvm_arm_num_regs(vcpu
);
1124 if (copy_to_user(user_list
, ®_list
, sizeof(reg_list
)))
1129 r
= kvm_arm_copy_reg_indices(vcpu
, user_list
->reg
);
1132 case KVM_SET_DEVICE_ATTR
: {
1134 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
1136 r
= kvm_arm_vcpu_set_attr(vcpu
, &attr
);
1139 case KVM_GET_DEVICE_ATTR
: {
1141 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
1143 r
= kvm_arm_vcpu_get_attr(vcpu
, &attr
);
1146 case KVM_HAS_DEVICE_ATTR
: {
1148 if (copy_from_user(&attr
, argp
, sizeof(attr
)))
1150 r
= kvm_arm_vcpu_has_attr(vcpu
, &attr
);
1153 case KVM_GET_VCPU_EVENTS
: {
1154 struct kvm_vcpu_events events
;
1156 if (kvm_arm_vcpu_get_events(vcpu
, &events
))
1159 if (copy_to_user(argp
, &events
, sizeof(events
)))
1164 case KVM_SET_VCPU_EVENTS
: {
1165 struct kvm_vcpu_events events
;
1167 if (copy_from_user(&events
, argp
, sizeof(events
)))
1170 return kvm_arm_vcpu_set_events(vcpu
, &events
);
1180 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
1181 * @kvm: kvm instance
1182 * @log: slot id and address to which we copy the log
1184 * Steps 1-4 below provide general overview of dirty page logging. See
1185 * kvm_get_dirty_log_protect() function description for additional details.
1187 * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
1188 * always flush the TLB (step 4) even if previous step failed and the dirty
1189 * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
1190 * does not preclude user space subsequent dirty log read. Flushing TLB ensures
1191 * writes will be marked dirty for next log read.
1193 * 1. Take a snapshot of the bit and clear it if needed.
1194 * 2. Write protect the corresponding page.
1195 * 3. Copy the snapshot to the userspace.
1196 * 4. Flush TLB's if needed.
1198 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
1203 mutex_lock(&kvm
->slots_lock
);
1205 r
= kvm_get_dirty_log_protect(kvm
, log
, &flush
);
1208 kvm_flush_remote_tlbs(kvm
);
1210 mutex_unlock(&kvm
->slots_lock
);
1214 int kvm_vm_ioctl_clear_dirty_log(struct kvm
*kvm
, struct kvm_clear_dirty_log
*log
)
1219 mutex_lock(&kvm
->slots_lock
);
1221 r
= kvm_clear_dirty_log_protect(kvm
, log
, &flush
);
1224 kvm_flush_remote_tlbs(kvm
);
1226 mutex_unlock(&kvm
->slots_lock
);
1230 static int kvm_vm_ioctl_set_device_addr(struct kvm
*kvm
,
1231 struct kvm_arm_device_addr
*dev_addr
)
1233 unsigned long dev_id
, type
;
1235 dev_id
= (dev_addr
->id
& KVM_ARM_DEVICE_ID_MASK
) >>
1236 KVM_ARM_DEVICE_ID_SHIFT
;
1237 type
= (dev_addr
->id
& KVM_ARM_DEVICE_TYPE_MASK
) >>
1238 KVM_ARM_DEVICE_TYPE_SHIFT
;
1241 case KVM_ARM_DEVICE_VGIC_V2
:
1244 return kvm_vgic_addr(kvm
, type
, &dev_addr
->addr
, true);
1250 long kvm_arch_vm_ioctl(struct file
*filp
,
1251 unsigned int ioctl
, unsigned long arg
)
1253 struct kvm
*kvm
= filp
->private_data
;
1254 void __user
*argp
= (void __user
*)arg
;
1257 case KVM_CREATE_IRQCHIP
: {
1261 mutex_lock(&kvm
->lock
);
1262 ret
= kvm_vgic_create(kvm
, KVM_DEV_TYPE_ARM_VGIC_V2
);
1263 mutex_unlock(&kvm
->lock
);
1266 case KVM_ARM_SET_DEVICE_ADDR
: {
1267 struct kvm_arm_device_addr dev_addr
;
1269 if (copy_from_user(&dev_addr
, argp
, sizeof(dev_addr
)))
1271 return kvm_vm_ioctl_set_device_addr(kvm
, &dev_addr
);
1273 case KVM_ARM_PREFERRED_TARGET
: {
1275 struct kvm_vcpu_init init
;
1277 err
= kvm_vcpu_preferred_target(&init
);
1281 if (copy_to_user(argp
, &init
, sizeof(init
)))
1291 static void cpu_init_hyp_mode(void *dummy
)
1293 phys_addr_t pgd_ptr
;
1294 unsigned long hyp_stack_ptr
;
1295 unsigned long stack_page
;
1296 unsigned long vector_ptr
;
1298 /* Switch from the HYP stub to our own HYP init vector */
1299 __hyp_set_vectors(kvm_get_idmap_vector());
1301 pgd_ptr
= kvm_mmu_get_httbr();
1302 stack_page
= __this_cpu_read(kvm_arm_hyp_stack_page
);
1303 hyp_stack_ptr
= stack_page
+ PAGE_SIZE
;
1304 vector_ptr
= (unsigned long)kvm_get_hyp_vector();
1306 __cpu_init_hyp_mode(pgd_ptr
, hyp_stack_ptr
, vector_ptr
);
1307 __cpu_init_stage2();
1310 static void cpu_hyp_reset(void)
1312 if (!is_kernel_in_hyp_mode())
1313 __hyp_reset_vectors();
1316 static void cpu_hyp_reinit(void)
1320 if (is_kernel_in_hyp_mode())
1321 kvm_timer_init_vhe();
1323 cpu_init_hyp_mode(NULL
);
1325 kvm_arm_init_debug();
1328 kvm_vgic_init_cpu_hardware();
1331 static void _kvm_arch_hardware_enable(void *discard
)
1333 if (!__this_cpu_read(kvm_arm_hardware_enabled
)) {
1335 __this_cpu_write(kvm_arm_hardware_enabled
, 1);
1339 int kvm_arch_hardware_enable(void)
1341 _kvm_arch_hardware_enable(NULL
);
1345 static void _kvm_arch_hardware_disable(void *discard
)
1347 if (__this_cpu_read(kvm_arm_hardware_enabled
)) {
1349 __this_cpu_write(kvm_arm_hardware_enabled
, 0);
1353 void kvm_arch_hardware_disable(void)
1355 _kvm_arch_hardware_disable(NULL
);
1358 #ifdef CONFIG_CPU_PM
1359 static int hyp_init_cpu_pm_notifier(struct notifier_block
*self
,
1364 * kvm_arm_hardware_enabled is left with its old value over
1365 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
1370 if (__this_cpu_read(kvm_arm_hardware_enabled
))
1372 * don't update kvm_arm_hardware_enabled here
1373 * so that the hardware will be re-enabled
1374 * when we resume. See below.
1379 case CPU_PM_ENTER_FAILED
:
1381 if (__this_cpu_read(kvm_arm_hardware_enabled
))
1382 /* The hardware was enabled before suspend. */
1392 static struct notifier_block hyp_init_cpu_pm_nb
= {
1393 .notifier_call
= hyp_init_cpu_pm_notifier
,
1396 static void __init
hyp_cpu_pm_init(void)
1398 cpu_pm_register_notifier(&hyp_init_cpu_pm_nb
);
1400 static void __init
hyp_cpu_pm_exit(void)
1402 cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb
);
1405 static inline void hyp_cpu_pm_init(void)
1408 static inline void hyp_cpu_pm_exit(void)
1413 static int init_common_resources(void)
1415 kvm_set_ipa_limit();
1420 static int init_subsystems(void)
1425 * Enable hardware so that subsystem initialisation can access EL2.
1427 on_each_cpu(_kvm_arch_hardware_enable
, NULL
, 1);
1430 * Register CPU lower-power notifier
1435 * Init HYP view of VGIC
1437 err
= kvm_vgic_hyp_init();
1440 vgic_present
= true;
1444 vgic_present
= false;
1452 * Init HYP architected timer support
1454 err
= kvm_timer_hyp_init(vgic_present
);
1459 kvm_coproc_table_init();
1462 on_each_cpu(_kvm_arch_hardware_disable
, NULL
, 1);
1467 static void teardown_hyp_mode(void)
1472 for_each_possible_cpu(cpu
)
1473 free_page(per_cpu(kvm_arm_hyp_stack_page
, cpu
));
1478 * Inits Hyp-mode on all online CPUs
1480 static int init_hyp_mode(void)
1486 * Allocate Hyp PGD and setup Hyp identity mapping
1488 err
= kvm_mmu_init();
1493 * Allocate stack pages for Hypervisor-mode
1495 for_each_possible_cpu(cpu
) {
1496 unsigned long stack_page
;
1498 stack_page
= __get_free_page(GFP_KERNEL
);
1504 per_cpu(kvm_arm_hyp_stack_page
, cpu
) = stack_page
;
1508 * Map the Hyp-code called directly from the host
1510 err
= create_hyp_mappings(kvm_ksym_ref(__hyp_text_start
),
1511 kvm_ksym_ref(__hyp_text_end
), PAGE_HYP_EXEC
);
1513 kvm_err("Cannot map world-switch code\n");
1517 err
= create_hyp_mappings(kvm_ksym_ref(__start_rodata
),
1518 kvm_ksym_ref(__end_rodata
), PAGE_HYP_RO
);
1520 kvm_err("Cannot map rodata section\n");
1524 err
= create_hyp_mappings(kvm_ksym_ref(__bss_start
),
1525 kvm_ksym_ref(__bss_stop
), PAGE_HYP_RO
);
1527 kvm_err("Cannot map bss section\n");
1531 err
= kvm_map_vectors();
1533 kvm_err("Cannot map vectors\n");
1538 * Map the Hyp stack pages
1540 for_each_possible_cpu(cpu
) {
1541 char *stack_page
= (char *)per_cpu(kvm_arm_hyp_stack_page
, cpu
);
1542 err
= create_hyp_mappings(stack_page
, stack_page
+ PAGE_SIZE
,
1546 kvm_err("Cannot map hyp stack\n");
1551 for_each_possible_cpu(cpu
) {
1552 kvm_cpu_context_t
*cpu_ctxt
;
1554 cpu_ctxt
= per_cpu_ptr(&kvm_host_cpu_state
, cpu
);
1555 kvm_init_host_cpu_context(cpu_ctxt
, cpu
);
1556 err
= create_hyp_mappings(cpu_ctxt
, cpu_ctxt
+ 1, PAGE_HYP
);
1559 kvm_err("Cannot map host CPU state: %d\n", err
);
1564 err
= hyp_map_aux_data();
1566 kvm_err("Cannot map host auxiliary data: %d\n", err
);
1571 teardown_hyp_mode();
1572 kvm_err("error initializing Hyp mode: %d\n", err
);
1576 static void check_kvm_target_cpu(void *ret
)
1578 *(int *)ret
= kvm_target_cpu();
1581 struct kvm_vcpu
*kvm_mpidr_to_vcpu(struct kvm
*kvm
, unsigned long mpidr
)
1583 struct kvm_vcpu
*vcpu
;
1586 mpidr
&= MPIDR_HWID_BITMASK
;
1587 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1588 if (mpidr
== kvm_vcpu_get_mpidr_aff(vcpu
))
1594 bool kvm_arch_has_irq_bypass(void)
1599 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer
*cons
,
1600 struct irq_bypass_producer
*prod
)
1602 struct kvm_kernel_irqfd
*irqfd
=
1603 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
1605 return kvm_vgic_v4_set_forwarding(irqfd
->kvm
, prod
->irq
,
1608 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer
*cons
,
1609 struct irq_bypass_producer
*prod
)
1611 struct kvm_kernel_irqfd
*irqfd
=
1612 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
1614 kvm_vgic_v4_unset_forwarding(irqfd
->kvm
, prod
->irq
,
1618 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer
*cons
)
1620 struct kvm_kernel_irqfd
*irqfd
=
1621 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
1623 kvm_arm_halt_guest(irqfd
->kvm
);
1626 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer
*cons
)
1628 struct kvm_kernel_irqfd
*irqfd
=
1629 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
1631 kvm_arm_resume_guest(irqfd
->kvm
);
1635 * Initialize Hyp-mode and memory mappings on all CPUs.
1637 int kvm_arch_init(void *opaque
)
1643 if (!is_hyp_mode_available()) {
1644 kvm_info("HYP mode not available\n");
1648 in_hyp_mode
= is_kernel_in_hyp_mode();
1650 if (!in_hyp_mode
&& kvm_arch_requires_vhe()) {
1651 kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n");
1655 for_each_online_cpu(cpu
) {
1656 smp_call_function_single(cpu
, check_kvm_target_cpu
, &ret
, 1);
1658 kvm_err("Error, CPU %d not supported!\n", cpu
);
1663 err
= init_common_resources();
1668 err
= init_hyp_mode();
1673 err
= init_subsystems();
1678 kvm_info("VHE mode initialized successfully\n");
1680 kvm_info("Hyp mode initialized successfully\n");
1686 teardown_hyp_mode();
1691 /* NOP: Compiling as a module not supported */
1692 void kvm_arch_exit(void)
1694 kvm_perf_teardown();
1697 static int arm_init(void)
1699 int rc
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
1703 module_init(arm_init
);