]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/arm/kvm/arm.c
KVM: ARM: World-switch implementation
[mirror_ubuntu-zesty-kernel.git] / arch / arm / kvm / arm.c
CommitLineData
749cf76c
CD
1/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
19#include <linux/errno.h>
20#include <linux/err.h>
21#include <linux/kvm_host.h>
22#include <linux/module.h>
23#include <linux/vmalloc.h>
24#include <linux/fs.h>
25#include <linux/mman.h>
26#include <linux/sched.h>
86ce8535 27#include <linux/kvm.h>
749cf76c
CD
28#include <trace/events/kvm.h>
29
30#define CREATE_TRACE_POINTS
31#include "trace.h"
32
33#include <asm/unified.h>
34#include <asm/uaccess.h>
35#include <asm/ptrace.h>
36#include <asm/mman.h>
37#include <asm/cputype.h>
342cd0ab
CD
38#include <asm/tlbflush.h>
39#include <asm/virt.h>
40#include <asm/kvm_arm.h>
41#include <asm/kvm_asm.h>
42#include <asm/kvm_mmu.h>
f7ed45be 43#include <asm/kvm_emulate.h>
749cf76c
CD
44
45#ifdef REQUIRES_VIRT
46__asm__(".arch_extension virt");
47#endif
48
342cd0ab
CD
49static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
50static struct vfp_hard_struct __percpu *kvm_host_vfp_state;
51static unsigned long hyp_default_vectors;
52
f7ed45be
CD
53/* The VMID used in the VTTBR */
54static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
55static u8 kvm_next_vmid;
56static DEFINE_SPINLOCK(kvm_vmid_lock);
342cd0ab 57
749cf76c
CD
58int kvm_arch_hardware_enable(void *garbage)
59{
60 return 0;
61}
62
63int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
64{
65 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
66}
67
68void kvm_arch_hardware_disable(void *garbage)
69{
70}
71
72int kvm_arch_hardware_setup(void)
73{
74 return 0;
75}
76
77void kvm_arch_hardware_unsetup(void)
78{
79}
80
81void kvm_arch_check_processor_compat(void *rtn)
82{
83 *(int *)rtn = 0;
84}
85
86void kvm_arch_sync_events(struct kvm *kvm)
87{
88}
89
d5d8184d
CD
90/**
91 * kvm_arch_init_vm - initializes a VM data structure
92 * @kvm: pointer to the KVM struct
93 */
749cf76c
CD
94int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
95{
d5d8184d
CD
96 int ret = 0;
97
749cf76c
CD
98 if (type)
99 return -EINVAL;
100
d5d8184d
CD
101 ret = kvm_alloc_stage2_pgd(kvm);
102 if (ret)
103 goto out_fail_alloc;
104
105 ret = create_hyp_mappings(kvm, kvm + 1);
106 if (ret)
107 goto out_free_stage2_pgd;
108
109 /* Mark the initial VMID generation invalid */
110 kvm->arch.vmid_gen = 0;
111
112 return ret;
113out_free_stage2_pgd:
114 kvm_free_stage2_pgd(kvm);
115out_fail_alloc:
116 return ret;
749cf76c
CD
117}
118
119int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
120{
121 return VM_FAULT_SIGBUS;
122}
123
124void kvm_arch_free_memslot(struct kvm_memory_slot *free,
125 struct kvm_memory_slot *dont)
126{
127}
128
129int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
130{
131 return 0;
132}
133
d5d8184d
CD
134/**
135 * kvm_arch_destroy_vm - destroy the VM data structure
136 * @kvm: pointer to the KVM struct
137 */
749cf76c
CD
138void kvm_arch_destroy_vm(struct kvm *kvm)
139{
140 int i;
141
d5d8184d
CD
142 kvm_free_stage2_pgd(kvm);
143
749cf76c
CD
144 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
145 if (kvm->vcpus[i]) {
146 kvm_arch_vcpu_free(kvm->vcpus[i]);
147 kvm->vcpus[i] = NULL;
148 }
149 }
150}
151
152int kvm_dev_ioctl_check_extension(long ext)
153{
154 int r;
155 switch (ext) {
156 case KVM_CAP_USER_MEMORY:
157 case KVM_CAP_SYNC_MMU:
158 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
159 case KVM_CAP_ONE_REG:
160 r = 1;
161 break;
162 case KVM_CAP_COALESCED_MMIO:
163 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
164 break;
165 case KVM_CAP_NR_VCPUS:
166 r = num_online_cpus();
167 break;
168 case KVM_CAP_MAX_VCPUS:
169 r = KVM_MAX_VCPUS;
170 break;
171 default:
172 r = 0;
173 break;
174 }
175 return r;
176}
177
178long kvm_arch_dev_ioctl(struct file *filp,
179 unsigned int ioctl, unsigned long arg)
180{
181 return -EINVAL;
182}
183
184int kvm_arch_set_memory_region(struct kvm *kvm,
185 struct kvm_userspace_memory_region *mem,
186 struct kvm_memory_slot old,
187 int user_alloc)
188{
189 return 0;
190}
191
192int kvm_arch_prepare_memory_region(struct kvm *kvm,
193 struct kvm_memory_slot *memslot,
194 struct kvm_memory_slot old,
195 struct kvm_userspace_memory_region *mem,
196 int user_alloc)
197{
198 return 0;
199}
200
201void kvm_arch_commit_memory_region(struct kvm *kvm,
202 struct kvm_userspace_memory_region *mem,
203 struct kvm_memory_slot old,
204 int user_alloc)
205{
206}
207
208void kvm_arch_flush_shadow_all(struct kvm *kvm)
209{
210}
211
212void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
213 struct kvm_memory_slot *slot)
214{
215}
216
217struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
218{
219 int err;
220 struct kvm_vcpu *vcpu;
221
222 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
223 if (!vcpu) {
224 err = -ENOMEM;
225 goto out;
226 }
227
228 err = kvm_vcpu_init(vcpu, kvm, id);
229 if (err)
230 goto free_vcpu;
231
d5d8184d
CD
232 err = create_hyp_mappings(vcpu, vcpu + 1);
233 if (err)
234 goto vcpu_uninit;
235
749cf76c 236 return vcpu;
d5d8184d
CD
237vcpu_uninit:
238 kvm_vcpu_uninit(vcpu);
749cf76c
CD
239free_vcpu:
240 kmem_cache_free(kvm_vcpu_cache, vcpu);
241out:
242 return ERR_PTR(err);
243}
244
245int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
246{
247 return 0;
248}
249
250void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
251{
d5d8184d
CD
252 kvm_mmu_free_memory_caches(vcpu);
253 kmem_cache_free(kvm_vcpu_cache, vcpu);
749cf76c
CD
254}
255
256void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
257{
258 kvm_arch_vcpu_free(vcpu);
259}
260
261int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
262{
263 return 0;
264}
265
266int __attribute_const__ kvm_target_cpu(void)
267{
268 unsigned long implementor = read_cpuid_implementor();
269 unsigned long part_number = read_cpuid_part_number();
270
271 if (implementor != ARM_CPU_IMP_ARM)
272 return -EINVAL;
273
274 switch (part_number) {
275 case ARM_CPU_PART_CORTEX_A15:
276 return KVM_ARM_TARGET_CORTEX_A15;
277 default:
278 return -EINVAL;
279 }
280}
281
282int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
283{
f7ed45be
CD
284 /* Force users to call KVM_ARM_VCPU_INIT */
285 vcpu->arch.target = -1;
749cf76c
CD
286 return 0;
287}
288
289void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
290{
291}
292
293void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
294{
86ce8535 295 vcpu->cpu = cpu;
f7ed45be 296 vcpu->arch.vfp_host = this_cpu_ptr(kvm_host_vfp_state);
749cf76c
CD
297}
298
299void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
300{
301}
302
303int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
304 struct kvm_guest_debug *dbg)
305{
306 return -EINVAL;
307}
308
309
310int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
311 struct kvm_mp_state *mp_state)
312{
313 return -EINVAL;
314}
315
316int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
317 struct kvm_mp_state *mp_state)
318{
319 return -EINVAL;
320}
321
322int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
323{
324 return 0;
325}
326
f7ed45be
CD
327/* Just ensure a guest exit from a particular CPU */
328static void exit_vm_noop(void *info)
329{
330}
331
332void force_vm_exit(const cpumask_t *mask)
333{
334 smp_call_function_many(mask, exit_vm_noop, NULL, true);
335}
336
337/**
338 * need_new_vmid_gen - check that the VMID is still valid
339 * @kvm: The VM's VMID to checkt
340 *
341 * return true if there is a new generation of VMIDs being used
342 *
343 * The hardware supports only 256 values with the value zero reserved for the
344 * host, so we check if an assigned value belongs to a previous generation,
345 * which which requires us to assign a new value. If we're the first to use a
346 * VMID for the new generation, we must flush necessary caches and TLBs on all
347 * CPUs.
348 */
349static bool need_new_vmid_gen(struct kvm *kvm)
350{
351 return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
352}
353
354/**
355 * update_vttbr - Update the VTTBR with a valid VMID before the guest runs
356 * @kvm The guest that we are about to run
357 *
358 * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
359 * VM has a valid VMID, otherwise assigns a new one and flushes corresponding
360 * caches and TLBs.
361 */
362static void update_vttbr(struct kvm *kvm)
363{
364 phys_addr_t pgd_phys;
365 u64 vmid;
366
367 if (!need_new_vmid_gen(kvm))
368 return;
369
370 spin_lock(&kvm_vmid_lock);
371
372 /*
373 * We need to re-check the vmid_gen here to ensure that if another vcpu
374 * already allocated a valid vmid for this vm, then this vcpu should
375 * use the same vmid.
376 */
377 if (!need_new_vmid_gen(kvm)) {
378 spin_unlock(&kvm_vmid_lock);
379 return;
380 }
381
382 /* First user of a new VMID generation? */
383 if (unlikely(kvm_next_vmid == 0)) {
384 atomic64_inc(&kvm_vmid_gen);
385 kvm_next_vmid = 1;
386
387 /*
388 * On SMP we know no other CPUs can use this CPU's or each
389 * other's VMID after force_vm_exit returns since the
390 * kvm_vmid_lock blocks them from reentry to the guest.
391 */
392 force_vm_exit(cpu_all_mask);
393 /*
394 * Now broadcast TLB + ICACHE invalidation over the inner
395 * shareable domain to make sure all data structures are
396 * clean.
397 */
398 kvm_call_hyp(__kvm_flush_vm_context);
399 }
400
401 kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
402 kvm->arch.vmid = kvm_next_vmid;
403 kvm_next_vmid++;
404
405 /* update vttbr to be used with the new vmid */
406 pgd_phys = virt_to_phys(kvm->arch.pgd);
407 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
408 kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK;
409 kvm->arch.vttbr |= vmid;
410
411 spin_unlock(&kvm_vmid_lock);
412}
413
414/*
415 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
416 * proper exit to QEMU.
417 */
418static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
419 int exception_index)
420{
421 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
422 return 0;
423}
424
425static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
426{
427 if (likely(vcpu->arch.has_run_once))
428 return 0;
429
430 vcpu->arch.has_run_once = true;
431 return 0;
432}
433
434/**
435 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
436 * @vcpu: The VCPU pointer
437 * @run: The kvm_run structure pointer used for userspace state exchange
438 *
439 * This function is called through the VCPU_RUN ioctl called from user space. It
440 * will execute VM code in a loop until the time slice for the process is used
441 * or some emulation is needed from user space in which case the function will
442 * return with return value 0 and with the kvm_run structure filled in with the
443 * required data for the requested emulation.
444 */
749cf76c
CD
445int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
446{
f7ed45be
CD
447 int ret;
448 sigset_t sigsaved;
449
450 /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */
451 if (unlikely(vcpu->arch.target < 0))
452 return -ENOEXEC;
453
454 ret = kvm_vcpu_first_run_init(vcpu);
455 if (ret)
456 return ret;
457
458 if (vcpu->sigset_active)
459 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
460
461 ret = 1;
462 run->exit_reason = KVM_EXIT_UNKNOWN;
463 while (ret > 0) {
464 /*
465 * Check conditions before entering the guest
466 */
467 cond_resched();
468
469 update_vttbr(vcpu->kvm);
470
471 local_irq_disable();
472
473 /*
474 * Re-check atomic conditions
475 */
476 if (signal_pending(current)) {
477 ret = -EINTR;
478 run->exit_reason = KVM_EXIT_INTR;
479 }
480
481 if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
482 local_irq_enable();
483 continue;
484 }
485
486 /**************************************************************
487 * Enter the guest
488 */
489 trace_kvm_entry(*vcpu_pc(vcpu));
490 kvm_guest_enter();
491 vcpu->mode = IN_GUEST_MODE;
492
493 ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
494
495 vcpu->mode = OUTSIDE_GUEST_MODE;
496 kvm_guest_exit();
497 trace_kvm_exit(*vcpu_pc(vcpu));
498 /*
499 * We may have taken a host interrupt in HYP mode (ie
500 * while executing the guest). This interrupt is still
501 * pending, as we haven't serviced it yet!
502 *
503 * We're now back in SVC mode, with interrupts
504 * disabled. Enabling the interrupts now will have
505 * the effect of taking the interrupt again, in SVC
506 * mode this time.
507 */
508 local_irq_enable();
509
510 /*
511 * Back from guest
512 *************************************************************/
513
514 ret = handle_exit(vcpu, run, ret);
515 }
516
517 if (vcpu->sigset_active)
518 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
519 return ret;
749cf76c
CD
520}
521
86ce8535
CD
522static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
523{
524 int bit_index;
525 bool set;
526 unsigned long *ptr;
527
528 if (number == KVM_ARM_IRQ_CPU_IRQ)
529 bit_index = __ffs(HCR_VI);
530 else /* KVM_ARM_IRQ_CPU_FIQ */
531 bit_index = __ffs(HCR_VF);
532
533 ptr = (unsigned long *)&vcpu->arch.irq_lines;
534 if (level)
535 set = test_and_set_bit(bit_index, ptr);
536 else
537 set = test_and_clear_bit(bit_index, ptr);
538
539 /*
540 * If we didn't change anything, no need to wake up or kick other CPUs
541 */
542 if (set == level)
543 return 0;
544
545 /*
546 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
547 * trigger a world-switch round on the running physical CPU to set the
548 * virtual IRQ/FIQ fields in the HCR appropriately.
549 */
550 kvm_vcpu_kick(vcpu);
551
552 return 0;
553}
554
555int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level)
556{
557 u32 irq = irq_level->irq;
558 unsigned int irq_type, vcpu_idx, irq_num;
559 int nrcpus = atomic_read(&kvm->online_vcpus);
560 struct kvm_vcpu *vcpu = NULL;
561 bool level = irq_level->level;
562
563 irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
564 vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
565 irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
566
567 trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
568
569 if (irq_type != KVM_ARM_IRQ_TYPE_CPU)
570 return -EINVAL;
571
572 if (vcpu_idx >= nrcpus)
573 return -EINVAL;
574
575 vcpu = kvm_get_vcpu(kvm, vcpu_idx);
576 if (!vcpu)
577 return -EINVAL;
578
579 if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
580 return -EINVAL;
581
582 return vcpu_interrupt_line(vcpu, irq_num, level);
583}
584
749cf76c
CD
585long kvm_arch_vcpu_ioctl(struct file *filp,
586 unsigned int ioctl, unsigned long arg)
587{
588 struct kvm_vcpu *vcpu = filp->private_data;
589 void __user *argp = (void __user *)arg;
590
591 switch (ioctl) {
592 case KVM_ARM_VCPU_INIT: {
593 struct kvm_vcpu_init init;
594
595 if (copy_from_user(&init, argp, sizeof(init)))
596 return -EFAULT;
597
598 return kvm_vcpu_set_target(vcpu, &init);
599
600 }
601 case KVM_SET_ONE_REG:
602 case KVM_GET_ONE_REG: {
603 struct kvm_one_reg reg;
604 if (copy_from_user(&reg, argp, sizeof(reg)))
605 return -EFAULT;
606 if (ioctl == KVM_SET_ONE_REG)
607 return kvm_arm_set_reg(vcpu, &reg);
608 else
609 return kvm_arm_get_reg(vcpu, &reg);
610 }
611 case KVM_GET_REG_LIST: {
612 struct kvm_reg_list __user *user_list = argp;
613 struct kvm_reg_list reg_list;
614 unsigned n;
615
616 if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
617 return -EFAULT;
618 n = reg_list.n;
619 reg_list.n = kvm_arm_num_regs(vcpu);
620 if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
621 return -EFAULT;
622 if (n < reg_list.n)
623 return -E2BIG;
624 return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
625 }
626 default:
627 return -EINVAL;
628 }
629}
630
631int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
632{
633 return -EINVAL;
634}
635
636long kvm_arch_vm_ioctl(struct file *filp,
637 unsigned int ioctl, unsigned long arg)
638{
639 return -EINVAL;
640}
641
342cd0ab
CD
642static void cpu_init_hyp_mode(void *vector)
643{
644 unsigned long long pgd_ptr;
645 unsigned long pgd_low, pgd_high;
646 unsigned long hyp_stack_ptr;
647 unsigned long stack_page;
648 unsigned long vector_ptr;
649
650 /* Switch from the HYP stub to our own HYP init vector */
651 __hyp_set_vectors((unsigned long)vector);
652
653 pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
654 pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
655 pgd_high = (pgd_ptr >> 32ULL);
656 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
657 hyp_stack_ptr = stack_page + PAGE_SIZE;
658 vector_ptr = (unsigned long)__kvm_hyp_vector;
659
660 /*
661 * Call initialization code, and switch to the full blown
662 * HYP code. The init code doesn't need to preserve these registers as
663 * r1-r3 and r12 are already callee save according to the AAPCS.
664 * Note that we slightly misuse the prototype by casing the pgd_low to
665 * a void *.
666 */
667 kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
668}
669
670/**
671 * Inits Hyp-mode on all online CPUs
672 */
673static int init_hyp_mode(void)
674{
675 phys_addr_t init_phys_addr;
676 int cpu;
677 int err = 0;
678
679 /*
680 * Allocate Hyp PGD and setup Hyp identity mapping
681 */
682 err = kvm_mmu_init();
683 if (err)
684 goto out_err;
685
686 /*
687 * It is probably enough to obtain the default on one
688 * CPU. It's unlikely to be different on the others.
689 */
690 hyp_default_vectors = __hyp_get_vectors();
691
692 /*
693 * Allocate stack pages for Hypervisor-mode
694 */
695 for_each_possible_cpu(cpu) {
696 unsigned long stack_page;
697
698 stack_page = __get_free_page(GFP_KERNEL);
699 if (!stack_page) {
700 err = -ENOMEM;
701 goto out_free_stack_pages;
702 }
703
704 per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
705 }
706
707 /*
708 * Execute the init code on each CPU.
709 *
710 * Note: The stack is not mapped yet, so don't do anything else than
711 * initializing the hypervisor mode on each CPU using a local stack
712 * space for temporary storage.
713 */
714 init_phys_addr = virt_to_phys(__kvm_hyp_init);
715 for_each_online_cpu(cpu) {
716 smp_call_function_single(cpu, cpu_init_hyp_mode,
717 (void *)(long)init_phys_addr, 1);
718 }
719
720 /*
721 * Unmap the identity mapping
722 */
723 kvm_clear_hyp_idmap();
724
725 /*
726 * Map the Hyp-code called directly from the host
727 */
728 err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
729 if (err) {
730 kvm_err("Cannot map world-switch code\n");
731 goto out_free_mappings;
732 }
733
734 /*
735 * Map the Hyp stack pages
736 */
737 for_each_possible_cpu(cpu) {
738 char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
739 err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE);
740
741 if (err) {
742 kvm_err("Cannot map hyp stack\n");
743 goto out_free_mappings;
744 }
745 }
746
747 /*
748 * Map the host VFP structures
749 */
750 kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct);
751 if (!kvm_host_vfp_state) {
752 err = -ENOMEM;
753 kvm_err("Cannot allocate host VFP state\n");
754 goto out_free_mappings;
755 }
756
757 for_each_possible_cpu(cpu) {
758 struct vfp_hard_struct *vfp;
759
760 vfp = per_cpu_ptr(kvm_host_vfp_state, cpu);
761 err = create_hyp_mappings(vfp, vfp + 1);
762
763 if (err) {
764 kvm_err("Cannot map host VFP state: %d\n", err);
765 goto out_free_vfp;
766 }
767 }
768
769 kvm_info("Hyp mode initialized successfully\n");
770 return 0;
771out_free_vfp:
772 free_percpu(kvm_host_vfp_state);
773out_free_mappings:
774 free_hyp_pmds();
775out_free_stack_pages:
776 for_each_possible_cpu(cpu)
777 free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
778out_err:
779 kvm_err("error initializing Hyp mode: %d\n", err);
780 return err;
781}
782
783/**
784 * Initialize Hyp-mode and memory mappings on all CPUs.
785 */
749cf76c
CD
786int kvm_arch_init(void *opaque)
787{
342cd0ab
CD
788 int err;
789
790 if (!is_hyp_mode_available()) {
791 kvm_err("HYP mode not available\n");
792 return -ENODEV;
793 }
794
795 if (kvm_target_cpu() < 0) {
796 kvm_err("Target CPU not supported!\n");
797 return -ENODEV;
798 }
799
800 err = init_hyp_mode();
801 if (err)
802 goto out_err;
803
749cf76c 804 return 0;
342cd0ab
CD
805out_err:
806 return err;
749cf76c
CD
807}
808
809/* NOP: Compiling as a module not supported */
810void kvm_arch_exit(void)
811{
812}
813
814static int arm_init(void)
815{
816 int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
817 return rc;
818}
819
820module_init(arm_init);