]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _S390_TLB_H |
2 | #define _S390_TLB_H | |
3 | ||
4 | /* | |
ba8a9229 MS |
5 | * TLB flushing on s390 is complicated. The following requirement |
6 | * from the principles of operation is the most arduous: | |
7 | * | |
8 | * "A valid table entry must not be changed while it is attached | |
9 | * to any CPU and may be used for translation by that CPU except to | |
10 | * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY, | |
11 | * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page | |
12 | * table entry, or (3) make a change by means of a COMPARE AND SWAP | |
13 | * AND PURGE instruction that purges the TLB." | |
14 | * | |
15 | * The modification of a pte of an active mm struct therefore is | |
16 | * a two step process: i) invalidate the pte, ii) store the new pte. | |
17 | * This is true for the page protection bit as well. | |
18 | * The only possible optimization is to flush at the beginning of | |
19 | * a tlb_gather_mmu cycle if the mm_struct is currently not in use. | |
20 | * | |
21 | * Pages used for the page tables is a different story. FIXME: more | |
1da177e4 | 22 | */ |
ba8a9229 MS |
23 | |
24 | #include <linux/mm.h> | |
c84ca008 | 25 | #include <linux/pagemap.h> |
ba8a9229 MS |
26 | #include <linux/swap.h> |
27 | #include <asm/processor.h> | |
28 | #include <asm/pgalloc.h> | |
ba8a9229 MS |
29 | #include <asm/tlbflush.h> |
30 | ||
ba8a9229 MS |
31 | struct mmu_gather { |
32 | struct mm_struct *mm; | |
36409f63 | 33 | struct mmu_table_batch *batch; |
ba8a9229 | 34 | unsigned int fullmm; |
215b28a5 | 35 | unsigned long start, end; |
ba8a9229 MS |
36 | }; |
37 | ||
36409f63 MS |
38 | struct mmu_table_batch { |
39 | struct rcu_head rcu; | |
40 | unsigned int nr; | |
41 | void *tables[0]; | |
42 | }; | |
ba8a9229 | 43 | |
36409f63 MS |
44 | #define MAX_TABLE_BATCH \ |
45 | ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) | |
46 | ||
47 | extern void tlb_table_flush(struct mmu_gather *tlb); | |
48 | extern void tlb_remove_table(struct mmu_gather *tlb, void *table); | |
68f03921 | 49 | |
56236a59 MK |
50 | static inline void |
51 | arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, | |
52 | unsigned long start, unsigned long end) | |
68f03921 | 53 | { |
ba8a9229 | 54 | tlb->mm = mm; |
2b047252 LT |
55 | tlb->start = start; |
56 | tlb->end = end; | |
57 | tlb->fullmm = !(start | (end+1)); | |
36409f63 | 58 | tlb->batch = NULL; |
ba8a9229 MS |
59 | } |
60 | ||
1cf35d47 | 61 | static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) |
ba8a9229 | 62 | { |
5c474a1e | 63 | __tlb_flush_mm_lazy(tlb->mm); |
1cf35d47 LT |
64 | } |
65 | ||
66 | static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) | |
67 | { | |
36409f63 | 68 | tlb_table_flush(tlb); |
ba8a9229 MS |
69 | } |
70 | ||
1cf35d47 LT |
71 | |
72 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) | |
73 | { | |
74 | tlb_flush_mmu_tlbonly(tlb); | |
75 | tlb_flush_mmu_free(tlb); | |
76 | } | |
77 | ||
56236a59 MK |
78 | static inline void |
79 | arch_tlb_finish_mmu(struct mmu_gather *tlb, | |
99baac21 | 80 | unsigned long start, unsigned long end, bool force) |
ba8a9229 | 81 | { |
99baac21 MK |
82 | if (force) { |
83 | tlb->start = start; | |
84 | tlb->end = end; | |
85 | } | |
86 | ||
5c474a1e | 87 | tlb_flush_mmu(tlb); |
ba8a9229 | 88 | } |
1da177e4 LT |
89 | |
90 | /* | |
ba8a9229 | 91 | * Release the page cache reference for a pte removed by |
68f03921 | 92 | * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page |
ba8a9229 | 93 | * has already been freed, so just do free_page_and_swap_cache. |
1da177e4 | 94 | */ |
e9d55e15 | 95 | static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
68f03921 PZ |
96 | { |
97 | free_page_and_swap_cache(page); | |
e9d55e15 | 98 | return false; /* avoid calling tlb_flush_mmu */ |
68f03921 PZ |
99 | } |
100 | ||
ba8a9229 MS |
101 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
102 | { | |
103 | free_page_and_swap_cache(page); | |
104 | } | |
1da177e4 | 105 | |
e77b0852 AK |
106 | static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, |
107 | struct page *page, int page_size) | |
e9d55e15 AK |
108 | { |
109 | return __tlb_remove_page(tlb, page); | |
110 | } | |
e77b0852 AK |
111 | |
112 | static inline void tlb_remove_page_size(struct mmu_gather *tlb, | |
113 | struct page *page, int page_size) | |
114 | { | |
115 | return tlb_remove_page(tlb, page); | |
116 | } | |
117 | ||
ba8a9229 MS |
118 | /* |
119 | * pte_free_tlb frees a pte table and clears the CRSTE for the | |
120 | * page table from the tlb. | |
121 | */ | |
9e1b32ca BH |
122 | static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, |
123 | unsigned long address) | |
ba8a9229 | 124 | { |
527e30b4 | 125 | page_table_free_rcu(tlb, (unsigned long *) pte, address); |
ba8a9229 | 126 | } |
1da177e4 | 127 | |
ba8a9229 MS |
128 | /* |
129 | * pmd_free_tlb frees a pmd table and clears the CRSTE for the | |
130 | * segment table entry from the tlb. | |
6252d702 MS |
131 | * If the mm uses a two level page table the single pmd is freed |
132 | * as the pgd. pmd_free_tlb checks the asce_limit against 2GB | |
133 | * to avoid the double free of the pmd in this case. | |
ba8a9229 | 134 | */ |
9e1b32ca BH |
135 | static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, |
136 | unsigned long address) | |
ba8a9229 | 137 | { |
6252d702 MS |
138 | if (tlb->mm->context.asce_limit <= (1UL << 31)) |
139 | return; | |
9de45f73 | 140 | pgtable_pmd_page_dtor(virt_to_page(pmd)); |
02a8f3ab | 141 | tlb_remove_table(tlb, pmd); |
ba8a9229 MS |
142 | } |
143 | ||
1aea9b3f MS |
144 | /* |
145 | * p4d_free_tlb frees a pud table and clears the CRSTE for the | |
146 | * region second table entry from the tlb. | |
147 | * If the mm uses a four level page table the single p4d is freed | |
148 | * as the pgd. p4d_free_tlb checks the asce_limit against 8PB | |
149 | * to avoid the double free of the p4d in this case. | |
150 | */ | |
151 | static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, | |
152 | unsigned long address) | |
153 | { | |
154 | if (tlb->mm->context.asce_limit <= (1UL << 53)) | |
155 | return; | |
156 | tlb_remove_table(tlb, p4d); | |
157 | } | |
158 | ||
5a216a20 MS |
159 | /* |
160 | * pud_free_tlb frees a pud table and clears the CRSTE for the | |
161 | * region third table entry from the tlb. | |
6252d702 MS |
162 | * If the mm uses a three level page table the single pud is freed |
163 | * as the pgd. pud_free_tlb checks the asce_limit against 4TB | |
164 | * to avoid the double free of the pud in this case. | |
5a216a20 | 165 | */ |
9e1b32ca BH |
166 | static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, |
167 | unsigned long address) | |
5a216a20 | 168 | { |
6252d702 MS |
169 | if (tlb->mm->context.asce_limit <= (1UL << 42)) |
170 | return; | |
02a8f3ab | 171 | tlb_remove_table(tlb, pud); |
5a216a20 | 172 | } |
190a1d72 | 173 | |
ba8a9229 MS |
174 | #define tlb_start_vma(tlb, vma) do { } while (0) |
175 | #define tlb_end_vma(tlb, vma) do { } while (0) | |
176 | #define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0) | |
1ae1c1d0 | 177 | #define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0) |
ba8a9229 | 178 | #define tlb_migrate_finish(mm) do { } while (0) |
b528e4b6 AK |
179 | #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ |
180 | tlb_remove_tlb_entry(tlb, ptep, address) | |
ba8a9229 | 181 | |
07e32661 AK |
182 | #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change |
183 | static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, | |
184 | unsigned int page_size) | |
185 | { | |
186 | } | |
187 | ||
ba8a9229 | 188 | #endif /* _S390_TLB_H */ |