]>
Commit | Line | Data |
---|---|---|
5e696617 BH |
1 | /* |
2 | * This file contains the routines for handling the MMU on those | |
3 | * PowerPC implementations where the MMU is not using the hash | |
4 | * table, such as 8xx, 4xx, BookE's etc... | |
5 | * | |
6 | * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org> | |
7 | * IBM Corp. | |
8 | * | |
9 | * Derived from previous arch/powerpc/mm/mmu_context.c | |
10 | * and arch/powerpc/include/asm/mmu_context.h | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version | |
15 | * 2 of the License, or (at your option) any later version. | |
16 | * | |
2ca8cf73 BH |
17 | * TODO: |
18 | * | |
19 | * - The global context lock will not scale very well | |
20 | * - The maps should be dynamically allocated to allow for processors | |
21 | * that support more PID bits at runtime | |
22 | * - Implement flush_tlb_mm() by making the context stale and picking | |
23 | * a new one | |
24 | * - More aggressively clear stale map bits and maybe find some way to | |
25 | * also clear mm->cpu_vm_mask bits when processes are migrated | |
5e696617 BH |
26 | */ |
27 | ||
f1167fb3 BH |
28 | //#define DEBUG_MAP_CONSISTENCY |
29 | //#define DEBUG_CLAMP_LAST_CONTEXT 31 | |
fcce8109 BH |
30 | //#define DEBUG_HARDER |
31 | ||
32 | /* We don't use DEBUG because it tends to be compiled in always nowadays | |
33 | * and this would generate way too much output | |
34 | */ | |
35 | #ifdef DEBUG_HARDER | |
36 | #define pr_hard(args...) printk(KERN_DEBUG args) | |
37 | #define pr_hardcont(args...) printk(KERN_CONT args) | |
38 | #else | |
39 | #define pr_hard(args...) do { } while(0) | |
40 | #define pr_hardcont(args...) do { } while(0) | |
41 | #endif | |
2ca8cf73 BH |
42 | |
43 | #include <linux/kernel.h> | |
5e696617 BH |
44 | #include <linux/mm.h> |
45 | #include <linux/init.h> | |
77520351 BH |
46 | #include <linux/spinlock.h> |
47 | #include <linux/bootmem.h> | |
48 | #include <linux/notifier.h> | |
49 | #include <linux/cpu.h> | |
5a0e3ad6 | 50 | #include <linux/slab.h> |
5e696617 BH |
51 | |
52 | #include <asm/mmu_context.h> | |
53 | #include <asm/tlbflush.h> | |
5e696617 | 54 | |
77520351 | 55 | static unsigned int first_context, last_context; |
2ca8cf73 | 56 | static unsigned int next_context, nr_free_contexts; |
77520351 BH |
57 | static unsigned long *context_map; |
58 | static unsigned long *stale_map[NR_CPUS]; | |
59 | static struct mm_struct **context_mm; | |
be833f33 | 60 | static DEFINE_RAW_SPINLOCK(context_lock); |
5e696617 | 61 | |
77520351 BH |
62 | #define CTX_MAP_SIZE \ |
63 | (sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1)) | |
64 | ||
65 | ||
5e696617 | 66 | /* Steal a context from a task that has one at the moment. |
2ca8cf73 BH |
67 | * |
68 | * This is used when we are running out of available PID numbers | |
69 | * on the processors. | |
70 | * | |
5e696617 BH |
71 | * This isn't an LRU system, it just frees up each context in |
72 | * turn (sort-of pseudo-random replacement :). This would be the | |
73 | * place to implement an LRU scheme if anyone was motivated to do it. | |
74 | * -- paulus | |
2ca8cf73 BH |
75 | * |
76 | * For context stealing, we use a slightly different approach for | |
77 | * SMP and UP. Basically, the UP one is simpler and doesn't use | |
78 | * the stale map as we can just flush the local CPU | |
79 | * -- benh | |
5e696617 | 80 | */ |
2ca8cf73 BH |
81 | #ifdef CONFIG_SMP |
82 | static unsigned int steal_context_smp(unsigned int id) | |
5e696617 BH |
83 | { |
84 | struct mm_struct *mm; | |
fcce8109 | 85 | unsigned int cpu, max, i; |
5e696617 | 86 | |
77520351 | 87 | max = last_context - first_context; |
5e696617 | 88 | |
2ca8cf73 BH |
89 | /* Attempt to free next_context first and then loop until we manage */ |
90 | while (max--) { | |
91 | /* Pick up the victim mm */ | |
92 | mm = context_mm[id]; | |
5e696617 | 93 | |
2ca8cf73 BH |
94 | /* We have a candidate victim, check if it's active, on SMP |
95 | * we cannot steal active contexts | |
96 | */ | |
97 | if (mm->context.active) { | |
98 | id++; | |
77520351 BH |
99 | if (id > last_context) |
100 | id = first_context; | |
2ca8cf73 BH |
101 | continue; |
102 | } | |
fcce8109 | 103 | pr_hardcont(" | steal %d from 0x%p", id, mm); |
2ca8cf73 BH |
104 | |
105 | /* Mark this mm has having no context anymore */ | |
106 | mm->context.id = MMU_NO_CONTEXT; | |
107 | ||
fcce8109 BH |
108 | /* Mark it stale on all CPUs that used this mm. For threaded |
109 | * implementations, we set it on all threads on each core | |
110 | * represented in the mask. A future implementation will use | |
111 | * a core map instead but this will do for now. | |
112 | */ | |
113 | for_each_cpu(cpu, mm_cpumask(mm)) { | |
99d86705 | 114 | for (i = cpu_first_thread_sibling(cpu); |
39a421ff SW |
115 | i <= cpu_last_thread_sibling(cpu); i++) { |
116 | if (stale_map[i]) | |
117 | __set_bit(id, stale_map[i]); | |
118 | } | |
fcce8109 BH |
119 | cpu = i - 1; |
120 | } | |
2ca8cf73 BH |
121 | return id; |
122 | } | |
123 | ||
124 | /* This will happen if you have more CPUs than available contexts, | |
125 | * all we can do here is wait a bit and try again | |
126 | */ | |
be833f33 | 127 | raw_spin_unlock(&context_lock); |
2ca8cf73 | 128 | cpu_relax(); |
be833f33 | 129 | raw_spin_lock(&context_lock); |
3035c863 BH |
130 | |
131 | /* This will cause the caller to try again */ | |
132 | return MMU_NO_CONTEXT; | |
2ca8cf73 BH |
133 | } |
134 | #endif /* CONFIG_SMP */ | |
135 | ||
136 | /* Note that this will also be called on SMP if all other CPUs are | |
137 | * offlined, which means that it may be called for cpu != 0. For | |
138 | * this to work, we somewhat assume that CPUs that are onlined | |
139 | * come up with a fully clean TLB (or are cleaned when offlined) | |
5e696617 | 140 | */ |
2ca8cf73 | 141 | static unsigned int steal_context_up(unsigned int id) |
5e696617 | 142 | { |
2ca8cf73 BH |
143 | struct mm_struct *mm; |
144 | int cpu = smp_processor_id(); | |
5e696617 | 145 | |
2ca8cf73 BH |
146 | /* Pick up the victim mm */ |
147 | mm = context_mm[id]; | |
148 | ||
fcce8109 | 149 | pr_hardcont(" | steal %d from 0x%p", id, mm); |
5e696617 | 150 | |
2ca8cf73 BH |
151 | /* Flush the TLB for that context */ |
152 | local_flush_tlb_mm(mm); | |
153 | ||
8e35961b HS |
154 | /* Mark this mm has having no context anymore */ |
155 | mm->context.id = MMU_NO_CONTEXT; | |
156 | ||
2ca8cf73 BH |
157 | /* XXX This clear should ultimately be part of local_flush_tlb_mm */ |
158 | __clear_bit(id, stale_map[cpu]); | |
159 | ||
160 | return id; | |
161 | } | |
162 | ||
163 | #ifdef DEBUG_MAP_CONSISTENCY | |
164 | static void context_check_map(void) | |
165 | { | |
166 | unsigned int id, nrf, nact; | |
167 | ||
168 | nrf = nact = 0; | |
77520351 | 169 | for (id = first_context; id <= last_context; id++) { |
2ca8cf73 BH |
170 | int used = test_bit(id, context_map); |
171 | if (!used) | |
172 | nrf++; | |
173 | if (used != (context_mm[id] != NULL)) | |
174 | pr_err("MMU: Context %d is %s and MM is %p !\n", | |
175 | id, used ? "used" : "free", context_mm[id]); | |
176 | if (context_mm[id] != NULL) | |
177 | nact += context_mm[id]->context.active; | |
5e696617 | 178 | } |
2ca8cf73 BH |
179 | if (nrf != nr_free_contexts) { |
180 | pr_err("MMU: Free context count out of sync ! (%d vs %d)\n", | |
181 | nr_free_contexts, nrf); | |
182 | nr_free_contexts = nrf; | |
183 | } | |
184 | if (nact > num_online_cpus()) | |
185 | pr_err("MMU: More active contexts than CPUs ! (%d vs %d)\n", | |
186 | nact, num_online_cpus()); | |
77520351 BH |
187 | if (first_context > 0 && !test_bit(0, context_map)) |
188 | pr_err("MMU: Context 0 has been freed !!!\n"); | |
5e696617 | 189 | } |
2ca8cf73 BH |
190 | #else |
191 | static void context_check_map(void) { } | |
192 | #endif | |
5e696617 BH |
193 | |
194 | void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) | |
195 | { | |
67050b5c | 196 | unsigned int i, id, cpu = smp_processor_id(); |
2ca8cf73 | 197 | unsigned long *map; |
5e696617 | 198 | |
2ca8cf73 | 199 | /* No lockless fast path .. yet */ |
be833f33 | 200 | raw_spin_lock(&context_lock); |
2ca8cf73 | 201 | |
fcce8109 BH |
202 | pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", |
203 | cpu, next, next->context.active, next->context.id); | |
2ca8cf73 BH |
204 | |
205 | #ifdef CONFIG_SMP | |
206 | /* Mark us active and the previous one not anymore */ | |
207 | next->context.active++; | |
208 | if (prev) { | |
fcce8109 | 209 | pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active); |
2ca8cf73 BH |
210 | WARN_ON(prev->context.active < 1); |
211 | prev->context.active--; | |
212 | } | |
3035c863 BH |
213 | |
214 | again: | |
2ca8cf73 BH |
215 | #endif /* CONFIG_SMP */ |
216 | ||
217 | /* If we already have a valid assigned context, skip all that */ | |
218 | id = next->context.id; | |
fcce8109 BH |
219 | if (likely(id != MMU_NO_CONTEXT)) { |
220 | #ifdef DEBUG_MAP_CONSISTENCY | |
221 | if (context_mm[id] != next) | |
222 | pr_err("MMU: mm 0x%p has id %d but context_mm[%d] says 0x%p\n", | |
223 | next, id, id, context_mm[id]); | |
224 | #endif | |
2ca8cf73 | 225 | goto ctxt_ok; |
fcce8109 | 226 | } |
2ca8cf73 BH |
227 | |
228 | /* We really don't have a context, let's try to acquire one */ | |
229 | id = next_context; | |
77520351 BH |
230 | if (id > last_context) |
231 | id = first_context; | |
2ca8cf73 BH |
232 | map = context_map; |
233 | ||
234 | /* No more free contexts, let's try to steal one */ | |
235 | if (nr_free_contexts == 0) { | |
236 | #ifdef CONFIG_SMP | |
237 | if (num_online_cpus() > 1) { | |
238 | id = steal_context_smp(id); | |
3035c863 BH |
239 | if (id == MMU_NO_CONTEXT) |
240 | goto again; | |
5156ddce | 241 | goto stolen; |
2ca8cf73 BH |
242 | } |
243 | #endif /* CONFIG_SMP */ | |
244 | id = steal_context_up(id); | |
245 | goto stolen; | |
246 | } | |
247 | nr_free_contexts--; | |
248 | ||
249 | /* We know there's at least one free context, try to find it */ | |
250 | while (__test_and_set_bit(id, map)) { | |
77520351 BH |
251 | id = find_next_zero_bit(map, last_context+1, id); |
252 | if (id > last_context) | |
253 | id = first_context; | |
2ca8cf73 BH |
254 | } |
255 | stolen: | |
256 | next_context = id + 1; | |
257 | context_mm[id] = next; | |
258 | next->context.id = id; | |
fcce8109 | 259 | pr_hardcont(" | new id=%d,nrf=%d", id, nr_free_contexts); |
2ca8cf73 BH |
260 | |
261 | context_check_map(); | |
262 | ctxt_ok: | |
263 | ||
264 | /* If that context got marked stale on this CPU, then flush the | |
265 | * local TLB for it and unmark it before we use it | |
266 | */ | |
267 | if (test_bit(id, stale_map[cpu])) { | |
fcce8109 | 268 | pr_hardcont(" | stale flush %d [%d..%d]", |
99d86705 VS |
269 | id, cpu_first_thread_sibling(cpu), |
270 | cpu_last_thread_sibling(cpu)); | |
fcce8109 | 271 | |
2ca8cf73 BH |
272 | local_flush_tlb_mm(next); |
273 | ||
274 | /* XXX This clear should ultimately be part of local_flush_tlb_mm */ | |
99d86705 VS |
275 | for (i = cpu_first_thread_sibling(cpu); |
276 | i <= cpu_last_thread_sibling(cpu); i++) { | |
39a421ff SW |
277 | if (stale_map[i]) |
278 | __clear_bit(id, stale_map[i]); | |
67050b5c | 279 | } |
2ca8cf73 BH |
280 | } |
281 | ||
282 | /* Flick the MMU and release lock */ | |
fcce8109 | 283 | pr_hardcont(" -> %d\n", id); |
2ca8cf73 | 284 | set_context(id, next->pgd); |
be833f33 | 285 | raw_spin_unlock(&context_lock); |
5e696617 BH |
286 | } |
287 | ||
288 | /* | |
289 | * Set up the context for a new address space. | |
290 | */ | |
291 | int init_new_context(struct task_struct *t, struct mm_struct *mm) | |
292 | { | |
fcce8109 BH |
293 | pr_hard("initing context for mm @%p\n", mm); |
294 | ||
2ca8cf73 BH |
295 | mm->context.id = MMU_NO_CONTEXT; |
296 | mm->context.active = 0; | |
297 | ||
41151e77 BB |
298 | #ifdef CONFIG_PPC_MM_SLICES |
299 | if (slice_mm_new_context(mm)) | |
300 | slice_set_user_psize(mm, mmu_virtual_psize); | |
301 | #endif | |
302 | ||
5e696617 BH |
303 | return 0; |
304 | } | |
305 | ||
306 | /* | |
307 | * We're finished using the context for an address space. | |
308 | */ | |
309 | void destroy_context(struct mm_struct *mm) | |
310 | { | |
b46b6942 | 311 | unsigned long flags; |
2ca8cf73 BH |
312 | unsigned int id; |
313 | ||
314 | if (mm->context.id == MMU_NO_CONTEXT) | |
315 | return; | |
316 | ||
317 | WARN_ON(mm->context.active != 0); | |
318 | ||
be833f33 | 319 | raw_spin_lock_irqsave(&context_lock, flags); |
2ca8cf73 BH |
320 | id = mm->context.id; |
321 | if (id != MMU_NO_CONTEXT) { | |
322 | __clear_bit(id, context_map); | |
323 | mm->context.id = MMU_NO_CONTEXT; | |
324 | #ifdef DEBUG_MAP_CONSISTENCY | |
325 | mm->context.active = 0; | |
2ca8cf73 | 326 | #endif |
3035c863 | 327 | context_mm[id] = NULL; |
2ca8cf73 | 328 | nr_free_contexts++; |
5e696617 | 329 | } |
be833f33 | 330 | raw_spin_unlock_irqrestore(&context_lock, flags); |
5e696617 BH |
331 | } |
332 | ||
77520351 BH |
333 | #ifdef CONFIG_SMP |
334 | ||
061d19f2 PG |
335 | static int mmu_context_cpu_notify(struct notifier_block *self, |
336 | unsigned long action, void *hcpu) | |
77520351 BH |
337 | { |
338 | unsigned int cpu = (unsigned int)(long)hcpu; | |
73863ab0 | 339 | |
77520351 BH |
340 | /* We don't touch CPU 0 map, it's allocated at aboot and kept |
341 | * around forever | |
342 | */ | |
0d35e162 | 343 | if (cpu == boot_cpuid) |
77520351 BH |
344 | return NOTIFY_OK; |
345 | ||
346 | switch (action) { | |
f5be2dc0 ME |
347 | case CPU_UP_PREPARE: |
348 | case CPU_UP_PREPARE_FROZEN: | |
a1ac38ab | 349 | pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu); |
77520351 BH |
350 | stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); |
351 | break; | |
352 | #ifdef CONFIG_HOTPLUG_CPU | |
f5be2dc0 ME |
353 | case CPU_UP_CANCELED: |
354 | case CPU_UP_CANCELED_FROZEN: | |
77520351 BH |
355 | case CPU_DEAD: |
356 | case CPU_DEAD_FROZEN: | |
a1ac38ab | 357 | pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); |
77520351 BH |
358 | kfree(stale_map[cpu]); |
359 | stale_map[cpu] = NULL; | |
fcce8109 BH |
360 | |
361 | /* We also clear the cpu_vm_mask bits of CPUs going away */ | |
73863ab0 | 362 | clear_tasks_mm_cpumask(cpu); |
fcce8109 BH |
363 | break; |
364 | #endif /* CONFIG_HOTPLUG_CPU */ | |
77520351 BH |
365 | } |
366 | return NOTIFY_OK; | |
367 | } | |
368 | ||
061d19f2 | 369 | static struct notifier_block mmu_context_cpu_nb = { |
77520351 BH |
370 | .notifier_call = mmu_context_cpu_notify, |
371 | }; | |
372 | ||
373 | #endif /* CONFIG_SMP */ | |
5e696617 BH |
374 | |
375 | /* | |
376 | * Initialize the context management stuff. | |
377 | */ | |
378 | void __init mmu_context_init(void) | |
379 | { | |
2ca8cf73 BH |
380 | /* Mark init_mm as being active on all possible CPUs since |
381 | * we'll get called with prev == init_mm the first time | |
382 | * we schedule on a given CPU | |
383 | */ | |
384 | init_mm.context.active = NR_CPUS; | |
385 | ||
77520351 BH |
386 | /* |
387 | * The MPC8xx has only 16 contexts. We rotate through them on each | |
388 | * task switch. A better way would be to keep track of tasks that | |
389 | * own contexts, and implement an LRU usage. That way very active | |
390 | * tasks don't always have to pay the TLB reload overhead. The | |
391 | * kernel pages are mapped shared, so the kernel can run on behalf | |
392 | * of any task that makes a kernel entry. Shared does not mean they | |
393 | * are not protected, just that the ASID comparison is not performed. | |
394 | * -- Dan | |
395 | * | |
396 | * The IBM4xx has 256 contexts, so we can just rotate through these | |
397 | * as a way of "switching" contexts. If the TID of the TLB is zero, | |
398 | * the PID/TID comparison is disabled, so we can use a TID of zero | |
399 | * to represent all kernel pages as shared among all contexts. | |
400 | * -- Dan | |
e7f75ad0 DK |
401 | * |
402 | * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We | |
403 | * should normally never have to steal though the facility is | |
404 | * present if needed. | |
405 | * -- BenH | |
77520351 BH |
406 | */ |
407 | if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { | |
408 | first_context = 0; | |
409 | last_context = 15; | |
e7f75ad0 DK |
410 | } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { |
411 | first_context = 1; | |
412 | last_context = 65535; | |
cd68098b | 413 | } else { |
77520351 BH |
414 | first_context = 1; |
415 | last_context = 255; | |
416 | } | |
417 | ||
418 | #ifdef DEBUG_CLAMP_LAST_CONTEXT | |
419 | last_context = DEBUG_CLAMP_LAST_CONTEXT; | |
420 | #endif | |
421 | /* | |
422 | * Allocate the maps used by context management | |
423 | */ | |
424 | context_map = alloc_bootmem(CTX_MAP_SIZE); | |
425 | context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1)); | |
0d35e162 | 426 | #ifndef CONFIG_SMP |
77520351 | 427 | stale_map[0] = alloc_bootmem(CTX_MAP_SIZE); |
0d35e162 MM |
428 | #else |
429 | stale_map[boot_cpuid] = alloc_bootmem(CTX_MAP_SIZE); | |
77520351 | 430 | |
77520351 BH |
431 | register_cpu_notifier(&mmu_context_cpu_nb); |
432 | #endif | |
433 | ||
434 | printk(KERN_INFO | |
ff7c6600 | 435 | "MMU: Allocated %zu bytes of context maps for %d contexts\n", |
77520351 BH |
436 | 2 * CTX_MAP_SIZE + (sizeof(void *) * (last_context + 1)), |
437 | last_context - first_context + 1); | |
438 | ||
5e696617 BH |
439 | /* |
440 | * Some processors have too few contexts to reserve one for | |
441 | * init_mm, and require using context 0 for a normal task. | |
442 | * Other processors reserve the use of context zero for the kernel. | |
77520351 | 443 | * This code assumes first_context < 32. |
5e696617 | 444 | */ |
77520351 BH |
445 | context_map[0] = (1 << first_context) - 1; |
446 | next_context = first_context; | |
447 | nr_free_contexts = last_context - first_context + 1; | |
5e696617 BH |
448 | } |
449 |