]>
Commit | Line | Data |
---|---|---|
5e696617 BH |
1 | /* |
2 | * This file contains the routines for handling the MMU on those | |
3 | * PowerPC implementations where the MMU is not using the hash | |
4 | * table, such as 8xx, 4xx, BookE's etc... | |
5 | * | |
6 | * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org> | |
7 | * IBM Corp. | |
8 | * | |
9 | * Derived from previous arch/powerpc/mm/mmu_context.c | |
10 | * and arch/powerpc/include/asm/mmu_context.h | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version | |
15 | * 2 of the License, or (at your option) any later version. | |
16 | * | |
2ca8cf73 BH |
17 | * TODO: |
18 | * | |
19 | * - The global context lock will not scale very well | |
20 | * - The maps should be dynamically allocated to allow for processors | |
21 | * that support more PID bits at runtime | |
22 | * - Implement flush_tlb_mm() by making the context stale and picking | |
23 | * a new one | |
24 | * - More aggressively clear stale map bits and maybe find some way to | |
25 | * also clear mm->cpu_vm_mask bits when processes are migrated | |
5e696617 BH |
26 | */ |
27 | ||
f1167fb3 BH |
28 | //#define DEBUG_MAP_CONSISTENCY |
29 | //#define DEBUG_CLAMP_LAST_CONTEXT 31 | |
fcce8109 BH |
30 | //#define DEBUG_HARDER |
31 | ||
32 | /* We don't use DEBUG because it tends to be compiled in always nowadays | |
33 | * and this would generate way too much output | |
34 | */ | |
35 | #ifdef DEBUG_HARDER | |
36 | #define pr_hard(args...) printk(KERN_DEBUG args) | |
37 | #define pr_hardcont(args...) printk(KERN_CONT args) | |
38 | #else | |
39 | #define pr_hard(args...) do { } while(0) | |
40 | #define pr_hardcont(args...) do { } while(0) | |
41 | #endif | |
2ca8cf73 BH |
42 | |
43 | #include <linux/kernel.h> | |
5e696617 BH |
44 | #include <linux/mm.h> |
45 | #include <linux/init.h> | |
77520351 BH |
46 | #include <linux/spinlock.h> |
47 | #include <linux/bootmem.h> | |
48 | #include <linux/notifier.h> | |
49 | #include <linux/cpu.h> | |
5e696617 BH |
50 | |
51 | #include <asm/mmu_context.h> | |
52 | #include <asm/tlbflush.h> | |
5e696617 | 53 | |
77520351 | 54 | static unsigned int first_context, last_context; |
2ca8cf73 | 55 | static unsigned int next_context, nr_free_contexts; |
77520351 BH |
56 | static unsigned long *context_map; |
57 | static unsigned long *stale_map[NR_CPUS]; | |
58 | static struct mm_struct **context_mm; | |
be833f33 | 59 | static DEFINE_RAW_SPINLOCK(context_lock); |
5e696617 | 60 | |
77520351 BH |
61 | #define CTX_MAP_SIZE \ |
62 | (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) | |
63 | ||
64 | ||
5e696617 | 65 | /* Steal a context from a task that has one at the moment. |
2ca8cf73 BH |
66 | * |
67 | * This is used when we are running out of available PID numbers | |
68 | * on the processors. | |
69 | * | |
5e696617 BH |
70 | * This isn't an LRU system, it just frees up each context in |
71 | * turn (sort-of pseudo-random replacement :). This would be the | |
72 | * place to implement an LRU scheme if anyone was motivated to do it. | |
73 | * -- paulus | |
2ca8cf73 BH |
74 | * |
75 | * For context stealing, we use a slightly different approach for | |
76 | * SMP and UP. Basically, the UP one is simpler and doesn't use | |
77 | * the stale map as we can just flush the local CPU | |
78 | * -- benh | |
5e696617 | 79 | */ |
2ca8cf73 BH |
80 | #ifdef CONFIG_SMP |
81 | static unsigned int steal_context_smp(unsigned int id) | |
5e696617 BH |
82 | { |
83 | struct mm_struct *mm; | |
fcce8109 | 84 | unsigned int cpu, max, i; |
5e696617 | 85 | |
77520351 | 86 | max = last_context - first_context; |
5e696617 | 87 | |
2ca8cf73 BH |
88 | /* Attempt to free next_context first and then loop until we manage */ |
89 | while (max--) { | |
90 | /* Pick up the victim mm */ | |
91 | mm = context_mm[id]; | |
5e696617 | 92 | |
2ca8cf73 BH |
93 | /* We have a candidate victim, check if it's active, on SMP |
94 | * we cannot steal active contexts | |
95 | */ | |
96 | if (mm->context.active) { | |
97 | id++; | |
77520351 BH |
98 | if (id > last_context) |
99 | id = first_context; | |
2ca8cf73 BH |
100 | continue; |
101 | } | |
fcce8109 | 102 | pr_hardcont(" | steal %d from 0x%p", id, mm); |
2ca8cf73 BH |
103 | |
104 | /* Mark this mm has having no context anymore */ | |
105 | mm->context.id = MMU_NO_CONTEXT; | |
106 | ||
fcce8109 BH |
107 | /* Mark it stale on all CPUs that used this mm. For threaded |
108 | * implementations, we set it on all threads on each core | |
109 | * represented in the mask. A future implementation will use | |
110 | * a core map instead but this will do for now. | |
111 | */ | |
112 | for_each_cpu(cpu, mm_cpumask(mm)) { | |
113 | for (i = cpu_first_thread_in_core(cpu); | |
114 | i <= cpu_last_thread_in_core(cpu); i++) | |
115 | __set_bit(id, stale_map[i]); | |
116 | cpu = i - 1; | |
117 | } | |
2ca8cf73 BH |
118 | return id; |
119 | } | |
120 | ||
121 | /* This will happen if you have more CPUs than available contexts, | |
122 | * all we can do here is wait a bit and try again | |
123 | */ | |
be833f33 | 124 | raw_spin_unlock(&context_lock); |
2ca8cf73 | 125 | cpu_relax(); |
be833f33 | 126 | raw_spin_lock(&context_lock); |
3035c863 BH |
127 | |
128 | /* This will cause the caller to try again */ | |
129 | return MMU_NO_CONTEXT; | |
2ca8cf73 BH |
130 | } |
131 | #endif /* CONFIG_SMP */ | |
132 | ||
133 | /* Note that this will also be called on SMP if all other CPUs are | |
134 | * offlined, which means that it may be called for cpu != 0. For | |
135 | * this to work, we somewhat assume that CPUs that are onlined | |
136 | * come up with a fully clean TLB (or are cleaned when offlined) | |
5e696617 | 137 | */ |
2ca8cf73 | 138 | static unsigned int steal_context_up(unsigned int id) |
5e696617 | 139 | { |
2ca8cf73 BH |
140 | struct mm_struct *mm; |
141 | int cpu = smp_processor_id(); | |
5e696617 | 142 | |
2ca8cf73 BH |
143 | /* Pick up the victim mm */ |
144 | mm = context_mm[id]; | |
145 | ||
fcce8109 | 146 | pr_hardcont(" | steal %d from 0x%p", id, mm); |
5e696617 | 147 | |
2ca8cf73 BH |
148 | /* Flush the TLB for that context */ |
149 | local_flush_tlb_mm(mm); | |
150 | ||
8e35961b HS |
151 | /* Mark this mm has having no context anymore */ |
152 | mm->context.id = MMU_NO_CONTEXT; | |
153 | ||
2ca8cf73 BH |
154 | /* XXX This clear should ultimately be part of local_flush_tlb_mm */ |
155 | __clear_bit(id, stale_map[cpu]); | |
156 | ||
157 | return id; | |
158 | } | |
159 | ||
160 | #ifdef DEBUG_MAP_CONSISTENCY | |
161 | static void context_check_map(void) | |
162 | { | |
163 | unsigned int id, nrf, nact; | |
164 | ||
165 | nrf = nact = 0; | |
77520351 | 166 | for (id = first_context; id <= last_context; id++) { |
2ca8cf73 BH |
167 | int used = test_bit(id, context_map); |
168 | if (!used) | |
169 | nrf++; | |
170 | if (used != (context_mm[id] != NULL)) | |
171 | pr_err("MMU: Context %d is %s and MM is %p !\n", | |
172 | id, used ? "used" : "free", context_mm[id]); | |
173 | if (context_mm[id] != NULL) | |
174 | nact += context_mm[id]->context.active; | |
5e696617 | 175 | } |
2ca8cf73 BH |
176 | if (nrf != nr_free_contexts) { |
177 | pr_err("MMU: Free context count out of sync ! (%d vs %d)\n", | |
178 | nr_free_contexts, nrf); | |
179 | nr_free_contexts = nrf; | |
180 | } | |
181 | if (nact > num_online_cpus()) | |
182 | pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n", | |
183 | nact, num_online_cpus()); | |
77520351 BH |
184 | if (first_context > 0 && !test_bit(0, context_map)) |
185 | pr_err("MMU: Context 0 has been freed !!!\n"); | |
5e696617 | 186 | } |
2ca8cf73 BH |
187 | #else |
188 | static void context_check_map(void) { } | |
189 | #endif | |
5e696617 BH |
190 | |
191 | void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) | |
192 | { | |
67050b5c | 193 | unsigned int i, id, cpu = smp_processor_id(); |
2ca8cf73 | 194 | unsigned long *map; |
5e696617 | 195 | |
2ca8cf73 | 196 | /* No lockless fast path .. yet */ |
be833f33 | 197 | raw_spin_lock(&context_lock); |
2ca8cf73 | 198 | |
fcce8109 BH |
199 | pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", |
200 | cpu, next, next->context.active, next->context.id); | |
2ca8cf73 BH |
201 | |
202 | #ifdef CONFIG_SMP | |
203 | /* Mark us active and the previous one not anymore */ | |
204 | next->context.active++; | |
205 | if (prev) { | |
fcce8109 | 206 | pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active); |
2ca8cf73 BH |
207 | WARN_ON(prev->context.active < 1); |
208 | prev->context.active--; | |
209 | } | |
3035c863 BH |
210 | |
211 | again: | |
2ca8cf73 BH |
212 | #endif /* CONFIG_SMP */ |
213 | ||
214 | /* If we already have a valid assigned context, skip all that */ | |
215 | id = next->context.id; | |
fcce8109 BH |
216 | if (likely(id != MMU_NO_CONTEXT)) { |
217 | #ifdef DEBUG_MAP_CONSISTENCY | |
218 | if (context_mm[id] != next) | |
219 | pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n", | |
220 | next, id, id, context_mm[id]); | |
221 | #endif | |
2ca8cf73 | 222 | goto ctxt_ok; |
fcce8109 | 223 | } |
2ca8cf73 BH |
224 | |
225 | /* We really don't have a context, let's try to acquire one */ | |
226 | id = next_context; | |
77520351 BH |
227 | if (id > last_context) |
228 | id = first_context; | |
2ca8cf73 BH |
229 | map = context_map; |
230 | ||
231 | /* No more free contexts, let's try to steal one */ | |
232 | if (nr_free_contexts == 0) { | |
233 | #ifdef CONFIG_SMP | |
234 | if (num_online_cpus() > 1) { | |
235 | id = steal_context_smp(id); | |
3035c863 BH |
236 | if (id == MMU_NO_CONTEXT) |
237 | goto again; | |
5156ddce | 238 | goto stolen; |
2ca8cf73 BH |
239 | } |
240 | #endif /* CONFIG_SMP */ | |
241 | id = steal_context_up(id); | |
242 | goto stolen; | |
243 | } | |
244 | nr_free_contexts--; | |
245 | ||
246 | /* We know there's at least one free context, try to find it */ | |
247 | while (__test_and_set_bit(id, map)) { | |
77520351 BH |
248 | id = find_next_zero_bit(map, last_context+1, id); |
249 | if (id > last_context) | |
250 | id = first_context; | |
2ca8cf73 BH |
251 | } |
252 | stolen: | |
253 | next_context = id + 1; | |
254 | context_mm[id] = next; | |
255 | next->context.id = id; | |
fcce8109 | 256 | pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts); |
2ca8cf73 BH |
257 | |
258 | context_check_map(); | |
259 | ctxt_ok: | |
260 | ||
261 | /* If that context got marked stale on this CPU, then flush the | |
262 | * local TLB for it and unmark it before we use it | |
263 | */ | |
264 | if (test_bit(id, stale_map[cpu])) { | |
fcce8109 BH |
265 | pr_hardcont(" | stale flush %d [%d..%d]", |
266 | id, cpu_first_thread_in_core(cpu), | |
267 | cpu_last_thread_in_core(cpu)); | |
268 | ||
2ca8cf73 BH |
269 | local_flush_tlb_mm(next); |
270 | ||
271 | /* XXX This clear should ultimately be part of local_flush_tlb_mm */ | |
67050b5c KG |
272 | for (i = cpu_first_thread_in_core(cpu); |
273 | i <= cpu_last_thread_in_core(cpu); i++) { | |
274 | __clear_bit(id, stale_map[i]); | |
275 | } | |
2ca8cf73 BH |
276 | } |
277 | ||
278 | /* Flick the MMU and release lock */ | |
fcce8109 | 279 | pr_hardcont(" -> %d\n", id); |
2ca8cf73 | 280 | set_context(id, next->pgd); |
be833f33 | 281 | raw_spin_unlock(&context_lock); |
5e696617 BH |
282 | } |
283 | ||
284 | /* | |
285 | * Set up the context for a new address space. | |
286 | */ | |
287 | int init_new_context(struct task_struct *t, struct mm_struct *mm) | |
288 | { | |
fcce8109 BH |
289 | pr_hard("initing context for mm @%p\n", mm); |
290 | ||
2ca8cf73 BH |
291 | mm->context.id = MMU_NO_CONTEXT; |
292 | mm->context.active = 0; | |
293 | ||
5e696617 BH |
294 | return 0; |
295 | } | |
296 | ||
297 | /* | |
298 | * We're finished using the context for an address space. | |
299 | */ | |
300 | void destroy_context(struct mm_struct *mm) | |
301 | { | |
b46b6942 | 302 | unsigned long flags; |
2ca8cf73 BH |
303 | unsigned int id; |
304 | ||
305 | if (mm->context.id == MMU_NO_CONTEXT) | |
306 | return; | |
307 | ||
308 | WARN_ON(mm->context.active != 0); | |
309 | ||
be833f33 | 310 | raw_spin_lock_irqsave(&context_lock, flags); |
2ca8cf73 BH |
311 | id = mm->context.id; |
312 | if (id != MMU_NO_CONTEXT) { | |
313 | __clear_bit(id, context_map); | |
314 | mm->context.id = MMU_NO_CONTEXT; | |
315 | #ifdef DEBUG_MAP_CONSISTENCY | |
316 | mm->context.active = 0; | |
2ca8cf73 | 317 | #endif |
3035c863 | 318 | context_mm[id] = NULL; |
2ca8cf73 | 319 | nr_free_contexts++; |
5e696617 | 320 | } |
be833f33 | 321 | raw_spin_unlock_irqrestore(&context_lock, flags); |
5e696617 BH |
322 | } |
323 | ||
77520351 BH |
324 | #ifdef CONFIG_SMP |
325 | ||
326 | static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, | |
327 | unsigned long action, void *hcpu) | |
328 | { | |
329 | unsigned int cpu = (unsigned int)(long)hcpu; | |
fcce8109 BH |
330 | #ifdef CONFIG_HOTPLUG_CPU |
331 | struct task_struct *p; | |
332 | #endif | |
77520351 BH |
333 | /* We don't touch CPU 0 map, it's allocated at aboot and kept |
334 | * around forever | |
335 | */ | |
336 | if (cpu == 0) | |
337 | return NOTIFY_OK; | |
338 | ||
339 | switch (action) { | |
340 | case CPU_ONLINE: | |
341 | case CPU_ONLINE_FROZEN: | |
a1ac38ab | 342 | pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu); |
77520351 BH |
343 | stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); |
344 | break; | |
345 | #ifdef CONFIG_HOTPLUG_CPU | |
346 | case CPU_DEAD: | |
347 | case CPU_DEAD_FROZEN: | |
a1ac38ab | 348 | pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); |
77520351 BH |
349 | kfree(stale_map[cpu]); |
350 | stale_map[cpu] = NULL; | |
fcce8109 BH |
351 | |
352 | /* We also clear the cpu_vm_mask bits of CPUs going away */ | |
353 | read_lock(&tasklist_lock); | |
354 | for_each_process(p) { | |
355 | if (p->mm) | |
f04b10cd | 356 | cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); |
fcce8109 BH |
357 | } |
358 | read_unlock(&tasklist_lock); | |
359 | break; | |
360 | #endif /* CONFIG_HOTPLUG_CPU */ | |
77520351 BH |
361 | } |
362 | return NOTIFY_OK; | |
363 | } | |
364 | ||
365 | static struct notifier_block __cpuinitdata mmu_context_cpu_nb = { | |
366 | .notifier_call = mmu_context_cpu_notify, | |
367 | }; | |
368 | ||
369 | #endif /* CONFIG_SMP */ | |
5e696617 BH |
370 | |
371 | /* | |
372 | * Initialize the context management stuff. | |
373 | */ | |
374 | void __init mmu_context_init(void) | |
375 | { | |
2ca8cf73 BH |
376 | /* Mark init_mm as being active on all possible CPUs since |
377 | * we'll get called with prev == init_mm the first time | |
378 | * we schedule on a given CPU | |
379 | */ | |
380 | init_mm.context.active = NR_CPUS; | |
381 | ||
77520351 BH |
382 | /* |
383 | * The MPC8xx has only 16 contexts. We rotate through them on each | |
384 | * task switch. A better way would be to keep track of tasks that | |
385 | * own contexts, and implement an LRU usage. That way very active | |
386 | * tasks don't always have to pay the TLB reload overhead. The | |
387 | * kernel pages are mapped shared, so the kernel can run on behalf | |
388 | * of any task that makes a kernel entry. Shared does not mean they | |
389 | * are not protected, just that the ASID comparison is not performed. | |
390 | * -- Dan | |
391 | * | |
392 | * The IBM4xx has 256 contexts, so we can just rotate through these | |
393 | * as a way of "switching" contexts. If the TID of the TLB is zero, | |
394 | * the PID/TID comparison is disabled, so we can use a TID of zero | |
395 | * to represent all kernel pages as shared among all contexts. | |
396 | * -- Dan | |
397 | */ | |
398 | if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { | |
399 | first_context = 0; | |
400 | last_context = 15; | |
401 | } else { | |
402 | first_context = 1; | |
403 | last_context = 255; | |
404 | } | |
405 | ||
406 | #ifdef DEBUG_CLAMP_LAST_CONTEXT | |
407 | last_context = DEBUG_CLAMP_LAST_CONTEXT; | |
408 | #endif | |
409 | /* | |
410 | * Allocate the maps used by context management | |
411 | */ | |
412 | context_map = alloc_bootmem(CTX_MAP_SIZE); | |
413 | context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1)); | |
414 | stale_map[0] = alloc_bootmem(CTX_MAP_SIZE); | |
415 | ||
416 | #ifdef CONFIG_SMP | |
417 | register_cpu_notifier(&mmu_context_cpu_nb); | |
418 | #endif | |
419 | ||
420 | printk(KERN_INFO | |
ff7c6600 | 421 | "MMU: Allocated %zu bytes of context maps for %d contexts\n", |
77520351 BH |
422 | 2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)), |
423 | last_context - first_context + 1); | |
424 | ||
5e696617 BH |
425 | /* |
426 | * Some processors have too few contexts to reserve one for | |
427 | * init_mm, and require using context 0 for a normal task. | |
428 | * Other processors reserve the use of context zero for the kernel. | |
77520351 | 429 | * This code assumes first_context < 32. |
5e696617 | 430 | */ |
77520351 BH |
431 | context_map[0] = (1 << first_context) - 1; |
432 | next_context = first_context; | |
433 | nr_free_contexts = last_context - first_context + 1; | |
5e696617 BH |
434 | } |
435 |