]>
Commit | Line | Data |
---|---|---|
b3901d54 CM |
1 | /* |
2 | * Based on arch/arm/mm/context.c | |
3 | * | |
4 | * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
5aec715d | 20 | #include <linux/bitops.h> |
b3901d54 | 21 | #include <linux/sched.h> |
5aec715d | 22 | #include <linux/slab.h> |
b3901d54 | 23 | #include <linux/mm.h> |
b3901d54 | 24 | |
5aec715d | 25 | #include <asm/cpufeature.h> |
b3901d54 | 26 | #include <asm/mmu_context.h> |
13f417f3 | 27 | #include <asm/smp.h> |
b3901d54 | 28 | #include <asm/tlbflush.h> |
b3901d54 | 29 | |
5aec715d WD |
30 | static u32 asid_bits; |
31 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | |
b3901d54 | 32 | |
5aec715d WD |
33 | static atomic64_t asid_generation; |
34 | static unsigned long *asid_map; | |
b3901d54 | 35 | |
5aec715d WD |
36 | static DEFINE_PER_CPU(atomic64_t, active_asids); |
37 | static DEFINE_PER_CPU(u64, reserved_asids); | |
38 | static cpumask_t tlb_flush_pending; | |
b3901d54 | 39 | |
5aec715d WD |
40 | #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) |
41 | #define ASID_FIRST_VERSION (1UL << asid_bits) | |
42 | #define NUM_USER_ASIDS ASID_FIRST_VERSION | |
43 | ||
038dc9c6 SP |
44 | /* Get the ASIDBits supported by the current CPU */ |
45 | static u32 get_cpu_asid_bits(void) | |
46 | { | |
47 | u32 asid; | |
1cc6ed90 | 48 | int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1), |
038dc9c6 SP |
49 | ID_AA64MMFR0_ASID_SHIFT); |
50 | ||
51 | switch (fld) { | |
52 | default: | |
53 | pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", | |
54 | smp_processor_id(), fld); | |
55 | /* Fallthrough */ | |
56 | case 0: | |
57 | asid = 8; | |
58 | break; | |
59 | case 2: | |
60 | asid = 16; | |
61 | } | |
62 | ||
63 | return asid; | |
64 | } | |
65 | ||
13f417f3 SP |
66 | /* Check if the current cpu's ASIDBits is compatible with asid_bits */ |
67 | void verify_cpu_asid_bits(void) | |
68 | { | |
69 | u32 asid = get_cpu_asid_bits(); | |
70 | ||
71 | if (asid < asid_bits) { | |
72 | /* | |
73 | * We cannot decrease the ASID size at runtime, so panic if we support | |
74 | * fewer ASID bits than the boot CPU. | |
75 | */ | |
76 | pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n", | |
77 | smp_processor_id(), asid, asid_bits); | |
17eebd1a | 78 | cpu_panic_kernel(); |
13f417f3 SP |
79 | } |
80 | } | |
81 | ||
5aec715d | 82 | static void flush_context(unsigned int cpu) |
b3901d54 | 83 | { |
5aec715d WD |
84 | int i; |
85 | u64 asid; | |
86 | ||
87 | /* Update the list of reserved ASIDs and the ASID bitmap. */ | |
88 | bitmap_clear(asid_map, 0, NUM_USER_ASIDS); | |
89 | ||
90 | /* | |
91 | * Ensure the generation bump is observed before we xchg the | |
92 | * active_asids. | |
93 | */ | |
94 | smp_wmb(); | |
95 | ||
96 | for_each_possible_cpu(i) { | |
97 | asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); | |
98 | /* | |
99 | * If this CPU has already been through a | |
100 | * rollover, but hasn't run another task in | |
101 | * the meantime, we must preserve its reserved | |
102 | * ASID, as this is the only trace we have of | |
103 | * the process it is still running. | |
104 | */ | |
105 | if (asid == 0) | |
106 | asid = per_cpu(reserved_asids, i); | |
107 | __set_bit(asid & ~ASID_MASK, asid_map); | |
108 | per_cpu(reserved_asids, i) = asid; | |
109 | } | |
110 | ||
111 | /* Queue a TLB invalidate and flush the I-cache if necessary. */ | |
112 | cpumask_setall(&tlb_flush_pending); | |
113 | ||
114 | if (icache_is_aivivt()) | |
115 | __flush_icache_all(); | |
b3901d54 CM |
116 | } |
117 | ||
0ebea808 | 118 | static bool check_update_reserved_asid(u64 asid, u64 newasid) |
b3901d54 | 119 | { |
5aec715d | 120 | int cpu; |
0ebea808 WD |
121 | bool hit = false; |
122 | ||
123 | /* | |
124 | * Iterate over the set of reserved ASIDs looking for a match. | |
125 | * If we find one, then we can update our mm to use newasid | |
126 | * (i.e. the same ASID in the current generation) but we can't | |
127 | * exit the loop early, since we need to ensure that all copies | |
128 | * of the old ASID are updated to reflect the mm. Failure to do | |
129 | * so could result in us missing the reserved ASID in a future | |
130 | * generation. | |
131 | */ | |
132 | for_each_possible_cpu(cpu) { | |
133 | if (per_cpu(reserved_asids, cpu) == asid) { | |
134 | hit = true; | |
135 | per_cpu(reserved_asids, cpu) = newasid; | |
136 | } | |
137 | } | |
138 | ||
139 | return hit; | |
b3901d54 CM |
140 | } |
141 | ||
5aec715d | 142 | static u64 new_context(struct mm_struct *mm, unsigned int cpu) |
b3901d54 | 143 | { |
5aec715d WD |
144 | static u32 cur_idx = 1; |
145 | u64 asid = atomic64_read(&mm->context.id); | |
146 | u64 generation = atomic64_read(&asid_generation); | |
b3901d54 | 147 | |
5aec715d | 148 | if (asid != 0) { |
0ebea808 WD |
149 | u64 newasid = generation | (asid & ~ASID_MASK); |
150 | ||
b3901d54 | 151 | /* |
5aec715d WD |
152 | * If our current ASID was active during a rollover, we |
153 | * can continue to use it and this was just a false alarm. | |
b3901d54 | 154 | */ |
0ebea808 WD |
155 | if (check_update_reserved_asid(asid, newasid)) |
156 | return newasid; | |
5aec715d WD |
157 | |
158 | /* | |
159 | * We had a valid ASID in a previous life, so try to re-use | |
160 | * it if possible. | |
161 | */ | |
162 | asid &= ~ASID_MASK; | |
163 | if (!__test_and_set_bit(asid, asid_map)) | |
0ebea808 | 164 | return newasid; |
b3901d54 | 165 | } |
b3901d54 CM |
166 | |
167 | /* | |
5aec715d WD |
168 | * Allocate a free ASID. If we can't find one, take a note of the |
169 | * currently active ASIDs and mark the TLBs as requiring flushes. | |
170 | * We always count from ASID #1, as we use ASID #0 when setting a | |
171 | * reserved TTBR0 for the init_mm. | |
b3901d54 | 172 | */ |
5aec715d WD |
173 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); |
174 | if (asid != NUM_USER_ASIDS) | |
175 | goto set_asid; | |
176 | ||
177 | /* We're out of ASIDs, so increment the global generation count */ | |
178 | generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION, | |
179 | &asid_generation); | |
180 | flush_context(cpu); | |
181 | ||
f7e0efc9 | 182 | /* We have more ASIDs than CPUs, so this will always succeed */ |
5aec715d WD |
183 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
184 | ||
185 | set_asid: | |
186 | __set_bit(asid, asid_map); | |
187 | cur_idx = asid; | |
0ebea808 | 188 | return asid | generation; |
b3901d54 CM |
189 | } |
190 | ||
5aec715d | 191 | void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) |
b3901d54 | 192 | { |
5aec715d WD |
193 | unsigned long flags; |
194 | u64 asid; | |
195 | ||
196 | asid = atomic64_read(&mm->context.id); | |
b3901d54 | 197 | |
565630d5 | 198 | /* |
5aec715d WD |
199 | * The memory ordering here is subtle. We rely on the control |
200 | * dependency between the generation read and the update of | |
201 | * active_asids to ensure that we are synchronised with a | |
202 | * parallel rollover (i.e. this pairs with the smp_wmb() in | |
203 | * flush_context). | |
565630d5 | 204 | */ |
5aec715d WD |
205 | if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits) |
206 | && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid)) | |
207 | goto switch_mm_fastpath; | |
208 | ||
209 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); | |
210 | /* Check that our ASID belongs to the current generation. */ | |
211 | asid = atomic64_read(&mm->context.id); | |
212 | if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) { | |
213 | asid = new_context(mm, cpu); | |
214 | atomic64_set(&mm->context.id, asid); | |
215 | } | |
565630d5 | 216 | |
5aec715d WD |
217 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) |
218 | local_flush_tlb_all(); | |
b3901d54 | 219 | |
5aec715d | 220 | atomic64_set(&per_cpu(active_asids, cpu), asid); |
5aec715d | 221 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); |
b3901d54 | 222 | |
5aec715d | 223 | switch_mm_fastpath: |
39bc88e5 CM |
224 | /* |
225 | * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when | |
226 | * emulating PAN. | |
227 | */ | |
228 | if (!system_uses_ttbr0_pan()) | |
229 | cpu_switch_mm(mm->pgd, mm); | |
b3901d54 CM |
230 | } |
231 | ||
5aec715d | 232 | static int asids_init(void) |
b3901d54 | 233 | { |
038dc9c6 | 234 | asid_bits = get_cpu_asid_bits(); |
f7e0efc9 JPB |
235 | /* |
236 | * Expect allocation after rollover to fail if we don't have at least | |
237 | * one more ASID than CPUs. ASID #0 is reserved for init_mm. | |
238 | */ | |
239 | WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus()); | |
5aec715d WD |
240 | atomic64_set(&asid_generation, ASID_FIRST_VERSION); |
241 | asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map), | |
242 | GFP_KERNEL); | |
243 | if (!asid_map) | |
244 | panic("Failed to allocate bitmap for %lu ASIDs\n", | |
245 | NUM_USER_ASIDS); | |
246 | ||
247 | pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); | |
248 | return 0; | |
b3901d54 | 249 | } |
5aec715d | 250 | early_initcall(asids_init); |