]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _S390_TLBFLUSH_H |
2 | #define _S390_TLBFLUSH_H | |
3 | ||
1da177e4 | 4 | #include <linux/mm.h> |
53492b1d | 5 | #include <linux/sched.h> |
1da177e4 | 6 | #include <asm/processor.h> |
c1821c2e | 7 | #include <asm/pgalloc.h> |
4ccccc52 | 8 | #include <asm/pgtable.h> |
1da177e4 LT |
9 | |
10 | /* | |
1b948d6c | 11 | * Flush all TLB entries on the local CPU. |
1da177e4 | 12 | */ |
ba8a9229 | 13 | static inline void __tlb_flush_local(void) |
1da177e4 | 14 | { |
ba8a9229 | 15 | asm volatile("ptlb" : : : "memory"); |
1da177e4 | 16 | } |
1da177e4 | 17 | |
ba8a9229 | 18 | /* |
1b948d6c | 19 | * Flush TLB entries for a specific ASCE on all CPUs |
ba8a9229 | 20 | */ |
1b948d6c MS |
21 | static inline void __tlb_flush_idte(unsigned long asce) |
22 | { | |
23 | /* Global TLB flush for the mm */ | |
24 | asm volatile( | |
25 | " .insn rrf,0xb98e0000,0,%0,%1,0" | |
26 | : : "a" (2048), "a" (asce) : "cc"); | |
27 | } | |
28 | ||
1b948d6c | 29 | #ifdef CONFIG_SMP |
a806170e HC |
30 | void smp_ptlb_all(void); |
31 | ||
1b948d6c MS |
32 | /* |
33 | * Flush all TLB entries on all CPUs. | |
34 | */ | |
ba8a9229 | 35 | static inline void __tlb_flush_global(void) |
1da177e4 | 36 | { |
4ccccc52 | 37 | unsigned int dummy = 0; |
94c12cc7 | 38 | |
4ccccc52 | 39 | csp(&dummy, 0, 0); |
1da177e4 LT |
40 | } |
41 | ||
1b948d6c MS |
42 | /* |
43 | * Flush TLB entries for a specific mm on all CPUs (in case gmap is used | |
44 | * this implicates multiple ASCEs!). | |
45 | */ | |
44b6cc81 | 46 | static inline void __tlb_flush_mm(struct mm_struct *mm) |
1b948d6c | 47 | { |
44b6cc81 MS |
48 | unsigned long gmap_asce; |
49 | ||
50 | /* | |
51 | * If the machine has IDTE we prefer to do a per mm flush | |
52 | * on all cpus instead of doing a local flush if the mm | |
53 | * only ran on the local cpu. | |
54 | */ | |
1b948d6c | 55 | preempt_disable(); |
64f31d58 | 56 | atomic_inc(&mm->context.flush_count); |
f14180d0 MS |
57 | /* Reset TLB flush mask */ |
58 | cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); | |
59 | barrier(); | |
44b6cc81 MS |
60 | gmap_asce = READ_ONCE(mm->context.gmap_asce); |
61 | if (MACHINE_HAS_IDTE && gmap_asce != -1UL) { | |
62 | if (gmap_asce) | |
63 | __tlb_flush_idte(gmap_asce); | |
64 | __tlb_flush_idte(mm->context.asce); | |
65 | } else { | |
f14180d0 MS |
66 | /* Global TLB flush */ |
67 | __tlb_flush_global(); | |
44b6cc81 | 68 | } |
64f31d58 | 69 | atomic_dec(&mm->context.flush_count); |
1b948d6c MS |
70 | preempt_enable(); |
71 | } | |
72 | ||
73 | static inline void __tlb_flush_kernel(void) | |
74 | { | |
75 | if (MACHINE_HAS_IDTE) | |
723cacbd | 76 | __tlb_flush_idte(init_mm.context.asce); |
1b948d6c MS |
77 | else |
78 | __tlb_flush_global(); | |
79 | } | |
374b8f45 | 80 | #else |
e1c4d014 | 81 | #define __tlb_flush_global() __tlb_flush_local() |
374b8f45 | 82 | |
1da177e4 | 83 | /* |
1b948d6c | 84 | * Flush TLB entries for a specific ASCE on all CPUs. |
1da177e4 | 85 | */ |
44b6cc81 | 86 | static inline void __tlb_flush_mm(struct mm_struct *mm) |
ba8a9229 | 87 | { |
d5dcafee | 88 | __tlb_flush_local(); |
ba8a9229 | 89 | } |
1da177e4 | 90 | |
1b948d6c MS |
91 | static inline void __tlb_flush_kernel(void) |
92 | { | |
d5dcafee | 93 | __tlb_flush_local(); |
1b948d6c MS |
94 | } |
95 | #endif | |
96 | ||
5c474a1e | 97 | static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) |
1da177e4 | 98 | { |
f8ce57a2 | 99 | spin_lock(&mm->context.lock); |
050eef36 | 100 | if (mm->context.flush_mm) { |
050eef36 | 101 | mm->context.flush_mm = 0; |
f8ce57a2 | 102 | __tlb_flush_mm(mm); |
050eef36 | 103 | } |
f8ce57a2 | 104 | spin_unlock(&mm->context.lock); |
1da177e4 LT |
105 | } |
106 | ||
ba8a9229 MS |
107 | /* |
108 | * TLB flushing: | |
109 | * flush_tlb() - flushes the current mm struct TLBs | |
110 | * flush_tlb_all() - flushes all processes TLBs | |
111 | * flush_tlb_mm(mm) - flushes the specified mm context TLB's | |
112 | * flush_tlb_page(vma, vmaddr) - flushes one page | |
113 | * flush_tlb_range(vma, start, end) - flushes a range of pages | |
114 | * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages | |
115 | */ | |
1da177e4 | 116 | |
ba8a9229 MS |
117 | /* |
118 | * flush_tlb_mm goes together with ptep_set_wrprotect for the | |
119 | * copy_page_range operation and flush_tlb_range is related to | |
120 | * ptep_get_and_clear for change_protection. ptep_set_wrprotect and | |
121 | * ptep_get_and_clear do not flush the TLBs directly if the mm has | |
122 | * only one user. At the end of the update the flush_tlb_mm and | |
123 | * flush_tlb_range functions need to do the flush. | |
124 | */ | |
125 | #define flush_tlb() do { } while (0) | |
126 | #define flush_tlb_all() do { } while (0) | |
ba8a9229 | 127 | #define flush_tlb_page(vma, addr) do { } while (0) |
8ffd74a0 MS |
128 | |
129 | static inline void flush_tlb_mm(struct mm_struct *mm) | |
130 | { | |
5c474a1e | 131 | __tlb_flush_mm_lazy(mm); |
8ffd74a0 MS |
132 | } |
133 | ||
134 | static inline void flush_tlb_range(struct vm_area_struct *vma, | |
135 | unsigned long start, unsigned long end) | |
136 | { | |
5c474a1e | 137 | __tlb_flush_mm_lazy(vma->vm_mm); |
8ffd74a0 MS |
138 | } |
139 | ||
140 | static inline void flush_tlb_kernel_range(unsigned long start, | |
141 | unsigned long end) | |
142 | { | |
1b948d6c | 143 | __tlb_flush_kernel(); |
8ffd74a0 | 144 | } |
1da177e4 | 145 | |
1da177e4 | 146 | #endif /* _S390_TLBFLUSH_H */ |