]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/arm64/mm/context.c
treewide: Use fallthrough pseudo-keyword
[mirror_ubuntu-jammy-kernel.git] / arch / arm64 / mm / context.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/mm/context.c
4 *
5 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
6 * Copyright (C) 2012 ARM Ltd.
7 */
8
9 #include <linux/bitfield.h>
10 #include <linux/bitops.h>
11 #include <linux/sched.h>
12 #include <linux/slab.h>
13 #include <linux/mm.h>
14
15 #include <asm/cpufeature.h>
16 #include <asm/mmu_context.h>
17 #include <asm/smp.h>
18 #include <asm/tlbflush.h>
19
20 static u32 asid_bits;
21 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
22
23 static atomic64_t asid_generation;
24 static unsigned long *asid_map;
25
26 static DEFINE_PER_CPU(atomic64_t, active_asids);
27 static DEFINE_PER_CPU(u64, reserved_asids);
28 static cpumask_t tlb_flush_pending;
29
30 #define ASID_MASK (~GENMASK(asid_bits - 1, 0))
31 #define ASID_FIRST_VERSION (1UL << asid_bits)
32
33 #define NUM_USER_ASIDS ASID_FIRST_VERSION
34 #define asid2idx(asid) ((asid) & ~ASID_MASK)
35 #define idx2asid(idx) asid2idx(idx)
36
37 /* Get the ASIDBits supported by the current CPU */
38 static u32 get_cpu_asid_bits(void)
39 {
40 u32 asid;
41 int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
42 ID_AA64MMFR0_ASID_SHIFT);
43
44 switch (fld) {
45 default:
46 pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
47 smp_processor_id(), fld);
48 fallthrough;
49 case 0:
50 asid = 8;
51 break;
52 case 2:
53 asid = 16;
54 }
55
56 return asid;
57 }
58
59 /* Check if the current cpu's ASIDBits is compatible with asid_bits */
60 void verify_cpu_asid_bits(void)
61 {
62 u32 asid = get_cpu_asid_bits();
63
64 if (asid < asid_bits) {
65 /*
66 * We cannot decrease the ASID size at runtime, so panic if we support
67 * fewer ASID bits than the boot CPU.
68 */
69 pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
70 smp_processor_id(), asid, asid_bits);
71 cpu_panic_kernel();
72 }
73 }
74
75 static void set_kpti_asid_bits(void)
76 {
77 unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
78 /*
79 * In case of KPTI kernel/user ASIDs are allocated in
80 * pairs, the bottom bit distinguishes the two: if it
81 * is set, then the ASID will map only userspace. Thus
82 * mark even as reserved for kernel.
83 */
84 memset(asid_map, 0xaa, len);
85 }
86
87 static void set_reserved_asid_bits(void)
88 {
89 if (arm64_kernel_unmapped_at_el0())
90 set_kpti_asid_bits();
91 else
92 bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
93 }
94
95 #define asid_gen_match(asid) \
96 (!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits))
97
98 static void flush_context(void)
99 {
100 int i;
101 u64 asid;
102
103 /* Update the list of reserved ASIDs and the ASID bitmap. */
104 set_reserved_asid_bits();
105
106 for_each_possible_cpu(i) {
107 asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
108 /*
109 * If this CPU has already been through a
110 * rollover, but hasn't run another task in
111 * the meantime, we must preserve its reserved
112 * ASID, as this is the only trace we have of
113 * the process it is still running.
114 */
115 if (asid == 0)
116 asid = per_cpu(reserved_asids, i);
117 __set_bit(asid2idx(asid), asid_map);
118 per_cpu(reserved_asids, i) = asid;
119 }
120
121 /*
122 * Queue a TLB invalidation for each CPU to perform on next
123 * context-switch
124 */
125 cpumask_setall(&tlb_flush_pending);
126 }
127
128 static bool check_update_reserved_asid(u64 asid, u64 newasid)
129 {
130 int cpu;
131 bool hit = false;
132
133 /*
134 * Iterate over the set of reserved ASIDs looking for a match.
135 * If we find one, then we can update our mm to use newasid
136 * (i.e. the same ASID in the current generation) but we can't
137 * exit the loop early, since we need to ensure that all copies
138 * of the old ASID are updated to reflect the mm. Failure to do
139 * so could result in us missing the reserved ASID in a future
140 * generation.
141 */
142 for_each_possible_cpu(cpu) {
143 if (per_cpu(reserved_asids, cpu) == asid) {
144 hit = true;
145 per_cpu(reserved_asids, cpu) = newasid;
146 }
147 }
148
149 return hit;
150 }
151
152 static u64 new_context(struct mm_struct *mm)
153 {
154 static u32 cur_idx = 1;
155 u64 asid = atomic64_read(&mm->context.id);
156 u64 generation = atomic64_read(&asid_generation);
157
158 if (asid != 0) {
159 u64 newasid = generation | (asid & ~ASID_MASK);
160
161 /*
162 * If our current ASID was active during a rollover, we
163 * can continue to use it and this was just a false alarm.
164 */
165 if (check_update_reserved_asid(asid, newasid))
166 return newasid;
167
168 /*
169 * We had a valid ASID in a previous life, so try to re-use
170 * it if possible.
171 */
172 if (!__test_and_set_bit(asid2idx(asid), asid_map))
173 return newasid;
174 }
175
176 /*
177 * Allocate a free ASID. If we can't find one, take a note of the
178 * currently active ASIDs and mark the TLBs as requiring flushes. We
179 * always count from ASID #2 (index 1), as we use ASID #0 when setting
180 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
181 * pairs.
182 */
183 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
184 if (asid != NUM_USER_ASIDS)
185 goto set_asid;
186
187 /* We're out of ASIDs, so increment the global generation count */
188 generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
189 &asid_generation);
190 flush_context();
191
192 /* We have more ASIDs than CPUs, so this will always succeed */
193 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
194
195 set_asid:
196 __set_bit(asid, asid_map);
197 cur_idx = asid;
198 return idx2asid(asid) | generation;
199 }
200
201 void check_and_switch_context(struct mm_struct *mm)
202 {
203 unsigned long flags;
204 unsigned int cpu;
205 u64 asid, old_active_asid;
206
207 if (system_supports_cnp())
208 cpu_set_reserved_ttbr0();
209
210 asid = atomic64_read(&mm->context.id);
211
212 /*
213 * The memory ordering here is subtle.
214 * If our active_asids is non-zero and the ASID matches the current
215 * generation, then we update the active_asids entry with a relaxed
216 * cmpxchg. Racing with a concurrent rollover means that either:
217 *
218 * - We get a zero back from the cmpxchg and end up waiting on the
219 * lock. Taking the lock synchronises with the rollover and so
220 * we are forced to see the updated generation.
221 *
222 * - We get a valid ASID back from the cmpxchg, which means the
223 * relaxed xchg in flush_context will treat us as reserved
224 * because atomic RmWs are totally ordered for a given location.
225 */
226 old_active_asid = atomic64_read(this_cpu_ptr(&active_asids));
227 if (old_active_asid && asid_gen_match(asid) &&
228 atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_asids),
229 old_active_asid, asid))
230 goto switch_mm_fastpath;
231
232 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
233 /* Check that our ASID belongs to the current generation. */
234 asid = atomic64_read(&mm->context.id);
235 if (!asid_gen_match(asid)) {
236 asid = new_context(mm);
237 atomic64_set(&mm->context.id, asid);
238 }
239
240 cpu = smp_processor_id();
241 if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
242 local_flush_tlb_all();
243
244 atomic64_set(this_cpu_ptr(&active_asids), asid);
245 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
246
247 switch_mm_fastpath:
248
249 arm64_apply_bp_hardening();
250
251 /*
252 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
253 * emulating PAN.
254 */
255 if (!system_uses_ttbr0_pan())
256 cpu_switch_mm(mm->pgd, mm);
257 }
258
259 /* Errata workaround post TTBRx_EL1 update. */
260 asmlinkage void post_ttbr_update_workaround(void)
261 {
262 if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456))
263 return;
264
265 asm(ALTERNATIVE("nop; nop; nop",
266 "ic iallu; dsb nsh; isb",
267 ARM64_WORKAROUND_CAVIUM_27456));
268 }
269
270 void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
271 {
272 unsigned long ttbr1 = read_sysreg(ttbr1_el1);
273 unsigned long asid = ASID(mm);
274 unsigned long ttbr0 = phys_to_ttbr(pgd_phys);
275
276 /* Skip CNP for the reserved ASID */
277 if (system_supports_cnp() && asid)
278 ttbr0 |= TTBR_CNP_BIT;
279
280 /* SW PAN needs a copy of the ASID in TTBR0 for entry */
281 if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN))
282 ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid);
283
284 /* Set ASID in TTBR1 since TCR.A1 is set */
285 ttbr1 &= ~TTBR_ASID_MASK;
286 ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
287
288 write_sysreg(ttbr1, ttbr1_el1);
289 isb();
290 write_sysreg(ttbr0, ttbr0_el1);
291 isb();
292 post_ttbr_update_workaround();
293 }
294
295 static int asids_update_limit(void)
296 {
297 unsigned long num_available_asids = NUM_USER_ASIDS;
298
299 if (arm64_kernel_unmapped_at_el0())
300 num_available_asids /= 2;
301 /*
302 * Expect allocation after rollover to fail if we don't have at least
303 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
304 */
305 WARN_ON(num_available_asids - 1 <= num_possible_cpus());
306 pr_info("ASID allocator initialised with %lu entries\n",
307 num_available_asids);
308 return 0;
309 }
310 arch_initcall(asids_update_limit);
311
312 static int asids_init(void)
313 {
314 asid_bits = get_cpu_asid_bits();
315 atomic64_set(&asid_generation, ASID_FIRST_VERSION);
316 asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
317 GFP_KERNEL);
318 if (!asid_map)
319 panic("Failed to allocate bitmap for %lu ASIDs\n",
320 NUM_USER_ASIDS);
321
322 /*
323 * We cannot call set_reserved_asid_bits() here because CPU
324 * caps are not finalized yet, so it is safer to assume KPTI
325 * and reserve kernel ASID's from beginning.
326 */
327 if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
328 set_kpti_asid_bits();
329 return 0;
330 }
331 early_initcall(asids_init);