]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm/include/asm/mmu_context.h
Merge tag 'armsoc-dt64' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[mirror_ubuntu-artful-kernel.git] / arch / arm / include / asm / mmu_context.h
1 /*
2 * arch/arm/include/asm/mmu_context.h
3 *
4 * Copyright (C) 1996 Russell King.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Changelog:
11 * 27-06-1996 RMK Created
12 */
13 #ifndef __ASM_ARM_MMU_CONTEXT_H
14 #define __ASM_ARM_MMU_CONTEXT_H
15
16 #include <linux/compiler.h>
17 #include <linux/sched.h>
18 #include <asm/cacheflush.h>
19 #include <asm/cachetype.h>
20 #include <asm/proc-fns.h>
21 #include <asm/smp_plat.h>
22 #include <asm-generic/mm_hooks.h>
23
24 void __check_vmalloc_seq(struct mm_struct *mm);
25
26 #ifdef CONFIG_CPU_HAS_ASID
27
28 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
29 #define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
30
31 #ifdef CONFIG_ARM_ERRATA_798181
32 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
33 cpumask_t *mask);
34 #else /* !CONFIG_ARM_ERRATA_798181 */
35 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
36 cpumask_t *mask)
37 {
38 }
39 #endif /* CONFIG_ARM_ERRATA_798181 */
40
41 #else /* !CONFIG_CPU_HAS_ASID */
42
43 #ifdef CONFIG_MMU
44
45 static inline void check_and_switch_context(struct mm_struct *mm,
46 struct task_struct *tsk)
47 {
48 if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
49 __check_vmalloc_seq(mm);
50
51 if (irqs_disabled())
52 /*
53 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
54 * high interrupt latencies, defer the call and continue
55 * running with the old mm. Since we only support UP systems
56 * on non-ASID CPUs, the old mm will remain valid until the
57 * finish_arch_post_lock_switch() call.
58 */
59 mm->context.switch_pending = 1;
60 else
61 cpu_switch_mm(mm->pgd, mm);
62 }
63
64 #define finish_arch_post_lock_switch \
65 finish_arch_post_lock_switch
66 static inline void finish_arch_post_lock_switch(void)
67 {
68 struct mm_struct *mm = current->mm;
69
70 if (mm && mm->context.switch_pending) {
71 /*
72 * Preemption must be disabled during cpu_switch_mm() as we
73 * have some stateful cache flush implementations. Check
74 * switch_pending again in case we were preempted and the
75 * switch to this mm was already done.
76 */
77 preempt_disable();
78 if (mm->context.switch_pending) {
79 mm->context.switch_pending = 0;
80 cpu_switch_mm(mm->pgd, mm);
81 }
82 preempt_enable_no_resched();
83 }
84 }
85
86 #endif /* CONFIG_MMU */
87
88 #define init_new_context(tsk,mm) 0
89
90 #endif /* CONFIG_CPU_HAS_ASID */
91
92 #define destroy_context(mm) do { } while(0)
93 #define activate_mm(prev,next) switch_mm(prev, next, NULL)
94
95 /*
96 * This is called when "tsk" is about to enter lazy TLB mode.
97 *
98 * mm: describes the currently active mm context
99 * tsk: task which is entering lazy tlb
100 * cpu: cpu number which is entering lazy tlb
101 *
102 * tsk->mm will be NULL
103 */
104 static inline void
105 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
106 {
107 }
108
109 /*
110 * This is the actual mm switch as far as the scheduler
111 * is concerned. No registers are touched. We avoid
112 * calling the CPU specific function when the mm hasn't
113 * actually changed.
114 */
115 static inline void
116 switch_mm(struct mm_struct *prev, struct mm_struct *next,
117 struct task_struct *tsk)
118 {
119 #ifdef CONFIG_MMU
120 unsigned int cpu = smp_processor_id();
121
122 /*
123 * __sync_icache_dcache doesn't broadcast the I-cache invalidation,
124 * so check for possible thread migration and invalidate the I-cache
125 * if we're new to this CPU.
126 */
127 if (cache_ops_need_broadcast() &&
128 !cpumask_empty(mm_cpumask(next)) &&
129 !cpumask_test_cpu(cpu, mm_cpumask(next)))
130 __flush_icache_all();
131
132 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
133 check_and_switch_context(next, tsk);
134 if (cache_is_vivt())
135 cpumask_clear_cpu(cpu, mm_cpumask(prev));
136 }
137 #endif
138 }
139
140 #define deactivate_mm(tsk,mm) do { } while (0)
141
142 #endif