]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
4baa9922 | 2 | * arch/arm/include/asm/mmu_context.h |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 1996 Russell King. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * Changelog: | |
11 | * 27-06-1996 RMK Created | |
12 | */ | |
13 | #ifndef __ASM_ARM_MMU_CONTEXT_H | |
14 | #define __ASM_ARM_MMU_CONTEXT_H | |
15 | ||
8dc39b88 | 16 | #include <linux/compiler.h> |
87c52578 | 17 | #include <linux/sched.h> |
88f10e37 | 18 | #include <linux/preempt.h> |
4fe15ba0 | 19 | #include <asm/cacheflush.h> |
46097c7d | 20 | #include <asm/cachetype.h> |
1da177e4 | 21 | #include <asm/proc-fns.h> |
621a0147 | 22 | #include <asm/smp_plat.h> |
f9d4861f | 23 | #include <asm-generic/mm_hooks.h> |
1da177e4 | 24 | |
3e99675a | 25 | void __check_vmalloc_seq(struct mm_struct *mm); |
ff0daca5 | 26 | |
516793c6 | 27 | #ifdef CONFIG_CPU_HAS_ASID |
1da177e4 | 28 | |
b5466f87 | 29 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); |
7d74a5f0 AB |
30 | static inline int |
31 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
32 | { | |
33 | atomic64_set(&mm->context.id, 0); | |
34 | return 0; | |
35 | } | |
1da177e4 | 36 | |
0d0752bc MZ |
37 | #ifdef CONFIG_ARM_ERRATA_798181 |
38 | void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, | |
39 | cpumask_t *mask); | |
40 | #else /* !CONFIG_ARM_ERRATA_798181 */ | |
41 | static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, | |
42 | cpumask_t *mask) | |
43 | { | |
44 | } | |
45 | #endif /* CONFIG_ARM_ERRATA_798181 */ | |
93dc6887 | 46 | |
7fec1b57 CM |
47 | #else /* !CONFIG_CPU_HAS_ASID */ |
48 | ||
b9d4d42a CM |
49 | #ifdef CONFIG_MMU |
50 | ||
7fec1b57 CM |
51 | static inline void check_and_switch_context(struct mm_struct *mm, |
52 | struct task_struct *tsk) | |
ff0daca5 | 53 | { |
3e99675a NP |
54 | if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) |
55 | __check_vmalloc_seq(mm); | |
b9d4d42a CM |
56 | |
57 | if (irqs_disabled()) | |
58 | /* | |
59 | * cpu_switch_mm() needs to flush the VIVT caches. To avoid | |
60 | * high interrupt latencies, defer the call and continue | |
61 | * running with the old mm. Since we only support UP systems | |
62 | * on non-ASID CPUs, the old mm will remain valid until the | |
63 | * finish_arch_post_lock_switch() call. | |
64 | */ | |
bdae73cd | 65 | mm->context.switch_pending = 1; |
b9d4d42a CM |
66 | else |
67 | cpu_switch_mm(mm->pgd, mm); | |
ff0daca5 RK |
68 | } |
69 | ||
ef0491ea | 70 | #ifndef MODULE |
b9d4d42a CM |
71 | #define finish_arch_post_lock_switch \ |
72 | finish_arch_post_lock_switch | |
73 | static inline void finish_arch_post_lock_switch(void) | |
74 | { | |
bdae73cd CM |
75 | struct mm_struct *mm = current->mm; |
76 | ||
77 | if (mm && mm->context.switch_pending) { | |
78 | /* | |
79 | * Preemption must be disabled during cpu_switch_mm() as we | |
80 | * have some stateful cache flush implementations. Check | |
81 | * switch_pending again in case we were preempted and the | |
82 | * switch to this mm was already done. | |
83 | */ | |
84 | preempt_disable(); | |
85 | if (mm->context.switch_pending) { | |
86 | mm->context.switch_pending = 0; | |
87 | cpu_switch_mm(mm->pgd, mm); | |
88 | } | |
89 | preempt_enable_no_resched(); | |
b9d4d42a CM |
90 | } |
91 | } | |
ef0491ea | 92 | #endif /* !MODULE */ |
1da177e4 | 93 | |
b9d4d42a CM |
94 | #endif /* CONFIG_MMU */ |
95 | ||
7d74a5f0 AB |
96 | static inline int |
97 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |
98 | { | |
99 | return 0; | |
100 | } | |
101 | ||
7fec1b57 CM |
102 | |
103 | #endif /* CONFIG_CPU_HAS_ASID */ | |
1da177e4 LT |
104 | |
105 | #define destroy_context(mm) do { } while(0) | |
b5466f87 | 106 | #define activate_mm(prev,next) switch_mm(prev, next, NULL) |
1da177e4 LT |
107 | |
108 | /* | |
109 | * This is called when "tsk" is about to enter lazy TLB mode. | |
110 | * | |
111 | * mm: describes the currently active mm context | |
112 | * tsk: task which is entering lazy tlb | |
113 | * cpu: cpu number which is entering lazy tlb | |
114 | * | |
115 | * tsk->mm will be NULL | |
116 | */ | |
117 | static inline void | |
118 | enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | |
119 | { | |
120 | } | |
121 | ||
122 | /* | |
123 | * This is the actual mm switch as far as the scheduler | |
124 | * is concerned. No registers are touched. We avoid | |
125 | * calling the CPU specific function when the mm hasn't | |
126 | * actually changed. | |
127 | */ | |
128 | static inline void | |
129 | switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
130 | struct task_struct *tsk) | |
131 | { | |
002547b4 | 132 | #ifdef CONFIG_MMU |
1da177e4 LT |
133 | unsigned int cpu = smp_processor_id(); |
134 | ||
621a0147 WD |
135 | /* |
136 | * __sync_icache_dcache doesn't broadcast the I-cache invalidation, | |
137 | * so check for possible thread migration and invalidate the I-cache | |
138 | * if we're new to this CPU. | |
139 | */ | |
140 | if (cache_ops_need_broadcast() && | |
141 | !cpumask_empty(mm_cpumask(next)) && | |
56f8ba83 | 142 | !cpumask_test_cpu(cpu, mm_cpumask(next))) |
826cbdaf | 143 | __flush_icache_all(); |
621a0147 | 144 | |
56f8ba83 | 145 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { |
7fec1b57 | 146 | check_and_switch_context(next, tsk); |
7e5e6e9a | 147 | if (cache_is_vivt()) |
56f8ba83 | 148 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); |
1da177e4 | 149 | } |
002547b4 | 150 | #endif |
1da177e4 LT |
151 | } |
152 | ||
153 | #define deactivate_mm(tsk,mm) do { } while (0) | |
1da177e4 LT |
154 | |
155 | #endif |