]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/hyperv/mmu.c
Merge branch 'x86/mm' into x86/platform, to pick up TLB flush dependency
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / hyperv / mmu.c
CommitLineData
2ffd9e33
VK
1#define pr_fmt(fmt) "Hyper-V: " fmt
2
3#include <linux/hyperv.h>
4#include <linux/log2.h>
5#include <linux/slab.h>
6#include <linux/types.h>
7
8#include <asm/fpu/api.h>
9#include <asm/mshyperv.h>
10#include <asm/msr.h>
11#include <asm/tlbflush.h>
12
13/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
14struct hv_flush_pcpu {
15 u64 address_space;
16 u64 flags;
17 u64 processor_mask;
18 u64 gva_list[];
19};
20
21/* Each gva in gva_list encodes up to 4096 pages to flush */
22#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
23
24static struct hv_flush_pcpu __percpu *pcpu_flush;
25
26/*
27 * Fills in gva_list starting from offset. Returns the number of items added.
28 */
29static inline int fill_gva_list(u64 gva_list[], int offset,
30 unsigned long start, unsigned long end)
31{
32 int gva_n = offset;
33 unsigned long cur = start, diff;
34
35 do {
36 diff = end > cur ? end - cur : 0;
37
38 gva_list[gva_n] = cur & PAGE_MASK;
39 /*
40 * Lower 12 bits encode the number of additional
41 * pages to flush (in addition to the 'cur' page).
42 */
43 if (diff >= HV_TLB_FLUSH_UNIT)
44 gva_list[gva_n] |= ~PAGE_MASK;
45 else if (diff)
46 gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
47
48 cur += HV_TLB_FLUSH_UNIT;
49 gva_n++;
50
51 } while (cur < end);
52
53 return gva_n - offset;
54}
55
56static void hyperv_flush_tlb_others(const struct cpumask *cpus,
57 const struct flush_tlb_info *info)
58{
59 int cpu, vcpu, gva_n, max_gvas;
60 struct hv_flush_pcpu *flush;
61 u64 status = U64_MAX;
62 unsigned long flags;
63
64 if (!pcpu_flush || !hv_hypercall_pg)
65 goto do_native;
66
67 if (cpumask_empty(cpus))
68 return;
69
70 local_irq_save(flags);
71
72 flush = this_cpu_ptr(pcpu_flush);
73
74 if (info->mm) {
75 flush->address_space = virt_to_phys(info->mm->pgd);
76 flush->flags = 0;
77 } else {
78 flush->address_space = 0;
79 flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
80 }
81
82 flush->processor_mask = 0;
83 if (cpumask_equal(cpus, cpu_present_mask)) {
84 flush->flags |= HV_FLUSH_ALL_PROCESSORS;
85 } else {
86 for_each_cpu(cpu, cpus) {
87 vcpu = hv_cpu_number_to_vp_number(cpu);
88 if (vcpu >= 64)
89 goto do_native;
90
91 __set_bit(vcpu, (unsigned long *)
92 &flush->processor_mask);
93 }
94 }
95
96 /*
97 * We can flush not more than max_gvas with one hypercall. Flush the
98 * whole address space if we were asked to do more.
99 */
100 max_gvas = (PAGE_SIZE - sizeof(*flush)) / sizeof(flush->gva_list[0]);
101
102 if (info->end == TLB_FLUSH_ALL) {
103 flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
104 status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
105 flush, NULL);
106 } else if (info->end &&
107 ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) {
108 status = hv_do_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
109 flush, NULL);
110 } else {
111 gva_n = fill_gva_list(flush->gva_list, 0,
112 info->start, info->end);
113 status = hv_do_rep_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST,
114 gva_n, 0, flush, NULL);
115 }
116
117 local_irq_restore(flags);
118
119 if (!(status & HV_HYPERCALL_RESULT_MASK))
120 return;
121do_native:
122 native_flush_tlb_others(cpus, info);
123}
124
125void hyperv_setup_mmu_ops(void)
126{
127 if (ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED) {
128 pr_info("Using hypercall for remote TLB flush\n");
129 pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
130 setup_clear_cpu_cap(X86_FEATURE_PCID);
131 }
132}
133
134void hyper_alloc_mmu(void)
135{
136 if (ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED)
137 pcpu_flush = __alloc_percpu(PAGE_SIZE, PAGE_SIZE);
138}