]>
Commit | Line | Data |
---|---|---|
b3901d54 CM |
1 | /* |
2 | * Based on arch/arm/mm/context.c | |
3 | * | |
4 | * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
5aec715d | 20 | #include <linux/bitops.h> |
b3901d54 | 21 | #include <linux/sched.h> |
5aec715d | 22 | #include <linux/slab.h> |
b3901d54 | 23 | #include <linux/mm.h> |
b3901d54 | 24 | |
5aec715d | 25 | #include <asm/cpufeature.h> |
b3901d54 | 26 | #include <asm/mmu_context.h> |
13f417f3 | 27 | #include <asm/smp.h> |
b3901d54 | 28 | #include <asm/tlbflush.h> |
b3901d54 | 29 | |
5aec715d WD |
30 | static u32 asid_bits; |
31 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | |
b3901d54 | 32 | |
5aec715d WD |
33 | static atomic64_t asid_generation; |
34 | static unsigned long *asid_map; | |
b3901d54 | 35 | |
5aec715d WD |
36 | static DEFINE_PER_CPU(atomic64_t, active_asids); |
37 | static DEFINE_PER_CPU(u64, reserved_asids); | |
38 | static cpumask_t tlb_flush_pending; | |
b3901d54 | 39 | |
5aec715d WD |
40 | #define ASID_MASK (~GENMASK(asid_bits - 1, 0)) |
41 | #define ASID_FIRST_VERSION (1UL << asid_bits) | |
42 | #define NUM_USER_ASIDS ASID_FIRST_VERSION | |
43 | ||
038dc9c6 SP |
44 | /* Get the ASIDBits supported by the current CPU */ |
45 | static u32 get_cpu_asid_bits(void) | |
46 | { | |
47 | u32 asid; | |
28c5dcb2 | 48 | int fld = cpuid_feature_extract_unsigned_field(read_cpuid(SYS_ID_AA64MMFR0_EL1), |
038dc9c6 SP |
49 | ID_AA64MMFR0_ASID_SHIFT); |
50 | ||
51 | switch (fld) { | |
52 | default: | |
53 | pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", | |
54 | smp_processor_id(), fld); | |
55 | /* Fallthrough */ | |
56 | case 0: | |
57 | asid = 8; | |
58 | break; | |
59 | case 2: | |
60 | asid = 16; | |
61 | } | |
62 | ||
63 | return asid; | |
64 | } | |
65 | ||
13f417f3 SP |
66 | /* Check if the current cpu's ASIDBits is compatible with asid_bits */ |
67 | void verify_cpu_asid_bits(void) | |
68 | { | |
69 | u32 asid = get_cpu_asid_bits(); | |
70 | ||
71 | if (asid < asid_bits) { | |
72 | /* | |
73 | * We cannot decrease the ASID size at runtime, so panic if we support | |
74 | * fewer ASID bits than the boot CPU. | |
75 | */ | |
76 | pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n", | |
77 | smp_processor_id(), asid, asid_bits); | |
78 | update_cpu_boot_status(CPU_PANIC_KERNEL); | |
79 | cpu_park_loop(); | |
80 | } | |
81 | } | |
82 | ||
5aec715d | 83 | static void flush_context(unsigned int cpu) |
b3901d54 | 84 | { |
5aec715d WD |
85 | int i; |
86 | u64 asid; | |
87 | ||
88 | /* Update the list of reserved ASIDs and the ASID bitmap. */ | |
89 | bitmap_clear(asid_map, 0, NUM_USER_ASIDS); | |
90 | ||
91 | /* | |
92 | * Ensure the generation bump is observed before we xchg the | |
93 | * active_asids. | |
94 | */ | |
95 | smp_wmb(); | |
96 | ||
97 | for_each_possible_cpu(i) { | |
98 | asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); | |
99 | /* | |
100 | * If this CPU has already been through a | |
101 | * rollover, but hasn't run another task in | |
102 | * the meantime, we must preserve its reserved | |
103 | * ASID, as this is the only trace we have of | |
104 | * the process it is still running. | |
105 | */ | |
106 | if (asid == 0) | |
107 | asid = per_cpu(reserved_asids, i); | |
108 | __set_bit(asid & ~ASID_MASK, asid_map); | |
109 | per_cpu(reserved_asids, i) = asid; | |
110 | } | |
111 | ||
112 | /* Queue a TLB invalidate and flush the I-cache if necessary. */ | |
113 | cpumask_setall(&tlb_flush_pending); | |
114 | ||
115 | if (icache_is_aivivt()) | |
116 | __flush_icache_all(); | |
b3901d54 CM |
117 | } |
118 | ||
0ebea808 | 119 | static bool check_update_reserved_asid(u64 asid, u64 newasid) |
b3901d54 | 120 | { |
5aec715d | 121 | int cpu; |
0ebea808 WD |
122 | bool hit = false; |
123 | ||
124 | /* | |
125 | * Iterate over the set of reserved ASIDs looking for a match. | |
126 | * If we find one, then we can update our mm to use newasid | |
127 | * (i.e. the same ASID in the current generation) but we can't | |
128 | * exit the loop early, since we need to ensure that all copies | |
129 | * of the old ASID are updated to reflect the mm. Failure to do | |
130 | * so could result in us missing the reserved ASID in a future | |
131 | * generation. | |
132 | */ | |
133 | for_each_possible_cpu(cpu) { | |
134 | if (per_cpu(reserved_asids, cpu) == asid) { | |
135 | hit = true; | |
136 | per_cpu(reserved_asids, cpu) = newasid; | |
137 | } | |
138 | } | |
139 | ||
140 | return hit; | |
b3901d54 CM |
141 | } |
142 | ||
5aec715d | 143 | static u64 new_context(struct mm_struct *mm, unsigned int cpu) |
b3901d54 | 144 | { |
5aec715d WD |
145 | static u32 cur_idx = 1; |
146 | u64 asid = atomic64_read(&mm->context.id); | |
147 | u64 generation = atomic64_read(&asid_generation); | |
b3901d54 | 148 | |
5aec715d | 149 | if (asid != 0) { |
0ebea808 WD |
150 | u64 newasid = generation | (asid & ~ASID_MASK); |
151 | ||
b3901d54 | 152 | /* |
5aec715d WD |
153 | * If our current ASID was active during a rollover, we |
154 | * can continue to use it and this was just a false alarm. | |
b3901d54 | 155 | */ |
0ebea808 WD |
156 | if (check_update_reserved_asid(asid, newasid)) |
157 | return newasid; | |
5aec715d WD |
158 | |
159 | /* | |
160 | * We had a valid ASID in a previous life, so try to re-use | |
161 | * it if possible. | |
162 | */ | |
163 | asid &= ~ASID_MASK; | |
164 | if (!__test_and_set_bit(asid, asid_map)) | |
0ebea808 | 165 | return newasid; |
b3901d54 | 166 | } |
b3901d54 CM |
167 | |
168 | /* | |
5aec715d WD |
169 | * Allocate a free ASID. If we can't find one, take a note of the |
170 | * currently active ASIDs and mark the TLBs as requiring flushes. | |
171 | * We always count from ASID #1, as we use ASID #0 when setting a | |
172 | * reserved TTBR0 for the init_mm. | |
b3901d54 | 173 | */ |
5aec715d WD |
174 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); |
175 | if (asid != NUM_USER_ASIDS) | |
176 | goto set_asid; | |
177 | ||
178 | /* We're out of ASIDs, so increment the global generation count */ | |
179 | generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION, | |
180 | &asid_generation); | |
181 | flush_context(cpu); | |
182 | ||
183 | /* We have at least 1 ASID per CPU, so this will always succeed */ | |
184 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); | |
185 | ||
186 | set_asid: | |
187 | __set_bit(asid, asid_map); | |
188 | cur_idx = asid; | |
0ebea808 | 189 | return asid | generation; |
b3901d54 CM |
190 | } |
191 | ||
5aec715d | 192 | void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) |
b3901d54 | 193 | { |
5aec715d WD |
194 | unsigned long flags; |
195 | u64 asid; | |
196 | ||
197 | asid = atomic64_read(&mm->context.id); | |
b3901d54 | 198 | |
565630d5 | 199 | /* |
5aec715d WD |
200 | * The memory ordering here is subtle. We rely on the control |
201 | * dependency between the generation read and the update of | |
202 | * active_asids to ensure that we are synchronised with a | |
203 | * parallel rollover (i.e. this pairs with the smp_wmb() in | |
204 | * flush_context). | |
565630d5 | 205 | */ |
5aec715d WD |
206 | if (!((asid ^ atomic64_read(&asid_generation)) >> asid_bits) |
207 | && atomic64_xchg_relaxed(&per_cpu(active_asids, cpu), asid)) | |
208 | goto switch_mm_fastpath; | |
209 | ||
210 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); | |
211 | /* Check that our ASID belongs to the current generation. */ | |
212 | asid = atomic64_read(&mm->context.id); | |
213 | if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) { | |
214 | asid = new_context(mm, cpu); | |
215 | atomic64_set(&mm->context.id, asid); | |
216 | } | |
565630d5 | 217 | |
5aec715d WD |
218 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) |
219 | local_flush_tlb_all(); | |
b3901d54 | 220 | |
5aec715d | 221 | atomic64_set(&per_cpu(active_asids, cpu), asid); |
5aec715d | 222 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); |
b3901d54 | 223 | |
5aec715d | 224 | switch_mm_fastpath: |
b3901d54 CM |
225 | cpu_switch_mm(mm->pgd, mm); |
226 | } | |
227 | ||
5aec715d | 228 | static int asids_init(void) |
b3901d54 | 229 | { |
038dc9c6 | 230 | asid_bits = get_cpu_asid_bits(); |
5aec715d WD |
231 | /* If we end up with more CPUs than ASIDs, expect things to crash */ |
232 | WARN_ON(NUM_USER_ASIDS < num_possible_cpus()); | |
233 | atomic64_set(&asid_generation, ASID_FIRST_VERSION); | |
234 | asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map), | |
235 | GFP_KERNEL); | |
236 | if (!asid_map) | |
237 | panic("Failed to allocate bitmap for %lu ASIDs\n", | |
238 | NUM_USER_ASIDS); | |
239 | ||
240 | pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS); | |
241 | return 0; | |
b3901d54 | 242 | } |
5aec715d | 243 | early_initcall(asids_init); |