]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - arch/s390/include/asm/tlb.h
Merge branch 'overlayfs-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mszer...
[mirror_ubuntu-focal-kernel.git] / arch / s390 / include / asm / tlb.h
1 #ifndef _S390_TLB_H
2 #define _S390_TLB_H
3
4 /*
5 * TLB flushing on s390 is complicated. The following requirement
6 * from the principles of operation is the most arduous:
7 *
8 * "A valid table entry must not be changed while it is attached
9 * to any CPU and may be used for translation by that CPU except to
10 * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
11 * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
12 * table entry, or (3) make a change by means of a COMPARE AND SWAP
13 * AND PURGE instruction that purges the TLB."
14 *
15 * The modification of a pte of an active mm struct therefore is
16 * a two step process: i) invalidate the pte, ii) store the new pte.
17 * This is true for the page protection bit as well.
18 * The only possible optimization is to flush at the beginning of
19 * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
20 *
21 * Pages used for the page tables is a different story. FIXME: more
22 */
23
24 #include <linux/mm.h>
25 #include <linux/pagemap.h>
26 #include <linux/swap.h>
27 #include <asm/processor.h>
28 #include <asm/pgalloc.h>
29 #include <asm/tlbflush.h>
30
31 struct mmu_gather {
32 struct mm_struct *mm;
33 struct mmu_table_batch *batch;
34 unsigned int fullmm;
35 unsigned long start, end;
36 };
37
38 struct mmu_table_batch {
39 struct rcu_head rcu;
40 unsigned int nr;
41 void *tables[0];
42 };
43
44 #define MAX_TABLE_BATCH \
45 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
46
47 extern void tlb_table_flush(struct mmu_gather *tlb);
48 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
49
50 static inline void
51 arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
52 unsigned long start, unsigned long end)
53 {
54 tlb->mm = mm;
55 tlb->start = start;
56 tlb->end = end;
57 tlb->fullmm = !(start | (end+1));
58 tlb->batch = NULL;
59 }
60
61 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
62 {
63 __tlb_flush_mm_lazy(tlb->mm);
64 }
65
66 static inline void tlb_flush_mmu_free(struct mmu_gather *tlb)
67 {
68 tlb_table_flush(tlb);
69 }
70
71
72 static inline void tlb_flush_mmu(struct mmu_gather *tlb)
73 {
74 tlb_flush_mmu_tlbonly(tlb);
75 tlb_flush_mmu_free(tlb);
76 }
77
78 static inline void
79 arch_tlb_finish_mmu(struct mmu_gather *tlb,
80 unsigned long start, unsigned long end, bool force)
81 {
82 if (force) {
83 tlb->start = start;
84 tlb->end = end;
85 }
86
87 tlb_flush_mmu(tlb);
88 }
89
90 /*
91 * Release the page cache reference for a pte removed by
92 * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
93 * has already been freed, so just do free_page_and_swap_cache.
94 */
95 static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
96 {
97 free_page_and_swap_cache(page);
98 return false; /* avoid calling tlb_flush_mmu */
99 }
100
101 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
102 {
103 free_page_and_swap_cache(page);
104 }
105
106 static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
107 struct page *page, int page_size)
108 {
109 return __tlb_remove_page(tlb, page);
110 }
111
112 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
113 struct page *page, int page_size)
114 {
115 return tlb_remove_page(tlb, page);
116 }
117
118 /*
119 * pte_free_tlb frees a pte table and clears the CRSTE for the
120 * page table from the tlb.
121 */
122 static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
123 unsigned long address)
124 {
125 page_table_free_rcu(tlb, (unsigned long *) pte, address);
126 }
127
128 /*
129 * pmd_free_tlb frees a pmd table and clears the CRSTE for the
130 * segment table entry from the tlb.
131 * If the mm uses a two level page table the single pmd is freed
132 * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
133 * to avoid the double free of the pmd in this case.
134 */
135 static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
136 unsigned long address)
137 {
138 if (tlb->mm->context.asce_limit <= _REGION3_SIZE)
139 return;
140 pgtable_pmd_page_dtor(virt_to_page(pmd));
141 tlb_remove_table(tlb, pmd);
142 }
143
144 /*
145 * p4d_free_tlb frees a pud table and clears the CRSTE for the
146 * region second table entry from the tlb.
147 * If the mm uses a four level page table the single p4d is freed
148 * as the pgd. p4d_free_tlb checks the asce_limit against 8PB
149 * to avoid the double free of the p4d in this case.
150 */
151 static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
152 unsigned long address)
153 {
154 if (tlb->mm->context.asce_limit <= _REGION1_SIZE)
155 return;
156 tlb_remove_table(tlb, p4d);
157 }
158
159 /*
160 * pud_free_tlb frees a pud table and clears the CRSTE for the
161 * region third table entry from the tlb.
162 * If the mm uses a three level page table the single pud is freed
163 * as the pgd. pud_free_tlb checks the asce_limit against 4TB
164 * to avoid the double free of the pud in this case.
165 */
166 static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
167 unsigned long address)
168 {
169 if (tlb->mm->context.asce_limit <= _REGION2_SIZE)
170 return;
171 tlb_remove_table(tlb, pud);
172 }
173
174 #define tlb_start_vma(tlb, vma) do { } while (0)
175 #define tlb_end_vma(tlb, vma) do { } while (0)
176 #define tlb_remove_tlb_entry(tlb, ptep, addr) do { } while (0)
177 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, addr) do { } while (0)
178 #define tlb_migrate_finish(mm) do { } while (0)
179 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
180 tlb_remove_tlb_entry(tlb, ptep, address)
181
182 #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
183 static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
184 unsigned int page_size)
185 {
186 }
187
188 #endif /* _S390_TLB_H */