]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _ASM_IA64_TLB_H |
3 | #define _ASM_IA64_TLB_H | |
4 | /* | |
5 | * Based on <asm-generic/tlb.h>. | |
6 | * | |
7 | * Copyright (C) 2002-2003 Hewlett-Packard Co | |
8 | * David Mosberger-Tang <davidm@hpl.hp.com> | |
9 | */ | |
10 | /* | |
11 | * Removing a translation from a page table (including TLB-shootdown) is a four-step | |
12 | * procedure: | |
13 | * | |
14 | * (1) Flush (virtual) caches --- ensures virtual memory is coherent with kernel memory | |
15 | * (this is a no-op on ia64). | |
16 | * (2) Clear the relevant portions of the page-table | |
17 | * (3) Flush the TLBs --- ensures that stale content is gone from CPU TLBs | |
18 | * (4) Release the pages that were freed up in step (2). | |
19 | * | |
20 | * Note that the ordering of these steps is crucial to avoid races on MP machines. | |
21 | * | |
22 | * The Linux kernel defines several platform-specific hooks for TLB-shootdown. When | |
23 | * unmapping a portion of the virtual address space, these hooks are called according to | |
24 | * the following template: | |
25 | * | |
2b047252 | 26 | * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM |
1da177e4 LT |
27 | * { |
28 | * for each vma that needs a shootdown do { | |
29 | * tlb_start_vma(tlb, vma); | |
30 | * for each page-table-entry PTE that needs to be removed do { | |
31 | * tlb_remove_tlb_entry(tlb, pte, address); | |
32 | * if (pte refers to a normal page) { | |
33 | * tlb_remove_page(tlb, page); | |
34 | * } | |
35 | * } | |
36 | * tlb_end_vma(tlb, vma); | |
37 | * } | |
38 | * } | |
39 | * tlb_finish_mmu(tlb, start, end); // finish unmap for address space MM | |
40 | */ | |
1da177e4 LT |
41 | #include <linux/mm.h> |
42 | #include <linux/pagemap.h> | |
43 | #include <linux/swap.h> | |
44 | ||
45 | #include <asm/pgalloc.h> | |
46 | #include <asm/processor.h> | |
47 | #include <asm/tlbflush.h> | |
48 | #include <asm/machvec.h> | |
49 | ||
7a95a2c8 PZ |
50 | /* |
51 | * If we can't allocate a page to make a big batch of page pointers | |
52 | * to work on, then just handle a few from the on-stack structure. | |
53 | */ | |
54 | #define IA64_GATHER_BUNDLE 8 | |
55 | ||
1da177e4 LT |
56 | struct mmu_gather { |
57 | struct mm_struct *mm; | |
29eb7782 | 58 | unsigned int nr; |
7a95a2c8 | 59 | unsigned int max; |
1da177e4 LT |
60 | unsigned char fullmm; /* non-zero means full mm flush */ |
61 | unsigned char need_flush; /* really unmapped some PTEs? */ | |
2b047252 | 62 | unsigned long start, end; |
1da177e4 LT |
63 | unsigned long start_addr; |
64 | unsigned long end_addr; | |
7a95a2c8 PZ |
65 | struct page **pages; |
66 | struct page *local[IA64_GATHER_BUNDLE]; | |
1da177e4 LT |
67 | }; |
68 | ||
96651896 XZ |
69 | struct ia64_tr_entry { |
70 | u64 ifa; | |
71 | u64 itir; | |
72 | u64 pte; | |
73 | u64 rr; | |
74 | }; /*Record for tr entry!*/ | |
75 | ||
76 | extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size); | |
77 | extern void ia64_ptr_entry(u64 target_mask, int slot); | |
78 | ||
6c57a332 | 79 | extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; |
96651896 XZ |
80 | |
81 | /* | |
82 | region register macros | |
83 | */ | |
84 | #define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001) | |
85 | #define RR_VE(val) (((val) & 0x0000000000000001) << 0) | |
86 | #define RR_VE_MASK 0x0000000000000001L | |
87 | #define RR_VE_SHIFT 0 | |
88 | #define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f) | |
89 | #define RR_PS(val) (((val) & 0x000000000000003f) << 2) | |
90 | #define RR_PS_MASK 0x00000000000000fcL | |
91 | #define RR_PS_SHIFT 2 | |
92 | #define RR_RID_MASK 0x00000000ffffff00L | |
93 | #define RR_TO_RID(val) ((val >> 8) & 0xffffff) | |
94 | ||
1da177e4 | 95 | static inline void |
1cf35d47 | 96 | ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned long end) |
1da177e4 | 97 | { |
1da177e4 LT |
98 | tlb->need_flush = 0; |
99 | ||
100 | if (tlb->fullmm) { | |
101 | /* | |
102 | * Tearing down the entire address space. This happens both as a result | |
103 | * of exit() and execve(). The latter case necessitates the call to | |
104 | * flush_tlb_mm() here. | |
105 | */ | |
106 | flush_tlb_mm(tlb->mm); | |
107 | } else if (unlikely (end - start >= 1024*1024*1024*1024UL | |
108 | || REGION_NUMBER(start) != REGION_NUMBER(end - 1))) | |
109 | { | |
110 | /* | |
111 | * If we flush more than a tera-byte or across regions, we're probably | |
112 | * better off just flushing the entire TLB(s). This should be very rare | |
113 | * and is not worth optimizing for. | |
114 | */ | |
115 | flush_tlb_all(); | |
116 | } else { | |
117 | /* | |
118 | * XXX fix me: flush_tlb_range() should take an mm pointer instead of a | |
119 | * vma pointer. | |
120 | */ | |
121 | struct vm_area_struct vma; | |
122 | ||
123 | vma.vm_mm = tlb->mm; | |
124 | /* flush the address range from the tlb: */ | |
125 | flush_tlb_range(&vma, start, end); | |
126 | /* now flush the virt. page-table area mapping the address range: */ | |
127 | flush_tlb_range(&vma, ia64_thash(start), ia64_thash(end)); | |
128 | } | |
129 | ||
1cf35d47 LT |
130 | } |
131 | ||
132 | static inline void | |
133 | ia64_tlb_flush_mmu_free(struct mmu_gather *tlb) | |
134 | { | |
135 | unsigned long i; | |
136 | unsigned int nr; | |
137 | ||
1da177e4 LT |
138 | /* lastly, release the freed pages */ |
139 | nr = tlb->nr; | |
29eb7782 PZ |
140 | |
141 | tlb->nr = 0; | |
142 | tlb->start_addr = ~0UL; | |
143 | for (i = 0; i < nr; ++i) | |
144 | free_page_and_swap_cache(tlb->pages[i]); | |
1da177e4 LT |
145 | } |
146 | ||
1cf35d47 LT |
147 | /* |
148 | * Flush the TLB for address range START to END and, if not in fast mode, release the | |
149 | * freed pages that where gathered up to this point. | |
150 | */ | |
151 | static inline void | |
152 | ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) | |
153 | { | |
154 | if (!tlb->need_flush) | |
155 | return; | |
156 | ia64_tlb_flush_mmu_tlbonly(tlb, start, end); | |
157 | ia64_tlb_flush_mmu_free(tlb); | |
158 | } | |
159 | ||
7a95a2c8 | 160 | static inline void __tlb_alloc_page(struct mmu_gather *tlb) |
1da177e4 | 161 | { |
7a95a2c8 | 162 | unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); |
1da177e4 | 163 | |
7a95a2c8 PZ |
164 | if (addr) { |
165 | tlb->pages = (void *)addr; | |
166 | tlb->max = PAGE_SIZE / sizeof(void *); | |
167 | } | |
168 | } | |
169 | ||
170 | ||
171 | static inline void | |
56236a59 MK |
172 | arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, |
173 | unsigned long start, unsigned long end) | |
7a95a2c8 | 174 | { |
1da177e4 | 175 | tlb->mm = mm; |
7a95a2c8 PZ |
176 | tlb->max = ARRAY_SIZE(tlb->local); |
177 | tlb->pages = tlb->local; | |
29eb7782 | 178 | tlb->nr = 0; |
2b047252 LT |
179 | tlb->fullmm = !(start | (end+1)); |
180 | tlb->start = start; | |
181 | tlb->end = end; | |
1da177e4 | 182 | tlb->start_addr = ~0UL; |
1da177e4 LT |
183 | } |
184 | ||
185 | /* | |
186 | * Called at the end of the shootdown operation to free up any resources that were | |
15a23ffa | 187 | * collected. |
1da177e4 LT |
188 | */ |
189 | static inline void | |
56236a59 | 190 | arch_tlb_finish_mmu(struct mmu_gather *tlb, |
99baac21 | 191 | unsigned long start, unsigned long end, bool force) |
1da177e4 | 192 | { |
99baac21 MK |
193 | if (force) |
194 | tlb->need_flush = 1; | |
1da177e4 LT |
195 | /* |
196 | * Note: tlb->nr may be 0 at this point, so we can't rely on tlb->start_addr and | |
197 | * tlb->end_addr. | |
198 | */ | |
199 | ia64_tlb_flush_mmu(tlb, start, end); | |
200 | ||
201 | /* keep the page table cache within bounds */ | |
202 | check_pgt_cache(); | |
15a23ffa | 203 | |
7a95a2c8 PZ |
204 | if (tlb->pages != tlb->local) |
205 | free_pages((unsigned long)tlb->pages, 0); | |
1da177e4 LT |
206 | } |
207 | ||
1da177e4 LT |
208 | /* |
209 | * Logically, this routine frees PAGE. On MP machines, the actual freeing of the page | |
210 | * must be delayed until after the TLB has been flushed (see comments at the beginning of | |
211 | * this file). | |
212 | */ | |
e9d55e15 | 213 | static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
1da177e4 LT |
214 | { |
215 | tlb->need_flush = 1; | |
216 | ||
7a95a2c8 PZ |
217 | if (!tlb->nr && tlb->pages == tlb->local) |
218 | __tlb_alloc_page(tlb); | |
219 | ||
1da177e4 | 220 | tlb->pages[tlb->nr++] = page; |
692a68c1 AK |
221 | VM_WARN_ON(tlb->nr > tlb->max); |
222 | if (tlb->nr == tlb->max) | |
223 | return true; | |
e9d55e15 | 224 | return false; |
7a95a2c8 PZ |
225 | } |
226 | ||
1cf35d47 LT |
227 | static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) |
228 | { | |
229 | ia64_tlb_flush_mmu_tlbonly(tlb, tlb->start_addr, tlb->end_addr); | |
230 | } | |
231 | ||
232 | static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) | |
233 | { | |
234 | ia64_tlb_flush_mmu_free(tlb); | |
235 | } | |
236 | ||
7a95a2c8 PZ |
237 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) |
238 | { | |
239 | ia64_tlb_flush_mmu(tlb, tlb->start_addr, tlb->end_addr); | |
240 | } | |
241 | ||
242 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |
243 | { | |
692a68c1 | 244 | if (__tlb_remove_page(tlb, page)) |
7a95a2c8 | 245 | tlb_flush_mmu(tlb); |
e9d55e15 AK |
246 | } |
247 | ||
e77b0852 AK |
248 | static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, |
249 | struct page *page, int page_size) | |
250 | { | |
251 | return __tlb_remove_page(tlb, page); | |
252 | } | |
253 | ||
e77b0852 AK |
254 | static inline void tlb_remove_page_size(struct mmu_gather *tlb, |
255 | struct page *page, int page_size) | |
256 | { | |
257 | return tlb_remove_page(tlb, page); | |
258 | } | |
259 | ||
1da177e4 LT |
260 | /* |
261 | * Remove TLB entry for PTE mapped at virtual address ADDRESS. This is called for any | |
262 | * PTE, not just those pointing to (normal) physical memory. | |
263 | */ | |
264 | static inline void | |
265 | __tlb_remove_tlb_entry (struct mmu_gather *tlb, pte_t *ptep, unsigned long address) | |
266 | { | |
267 | if (tlb->start_addr == ~0UL) | |
268 | tlb->start_addr = address; | |
269 | tlb->end_addr = address + PAGE_SIZE; | |
270 | } | |
271 | ||
272 | #define tlb_migrate_finish(mm) platform_tlb_migrate_finish(mm) | |
273 | ||
274 | #define tlb_start_vma(tlb, vma) do { } while (0) | |
275 | #define tlb_end_vma(tlb, vma) do { } while (0) | |
276 | ||
277 | #define tlb_remove_tlb_entry(tlb, ptep, addr) \ | |
278 | do { \ | |
279 | tlb->need_flush = 1; \ | |
280 | __tlb_remove_tlb_entry(tlb, ptep, addr); \ | |
281 | } while (0) | |
282 | ||
b528e4b6 AK |
283 | #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ |
284 | tlb_remove_tlb_entry(tlb, ptep, address) | |
285 | ||
07e32661 AK |
286 | #define tlb_remove_check_page_size_change tlb_remove_check_page_size_change |
287 | static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, | |
288 | unsigned int page_size) | |
289 | { | |
290 | } | |
291 | ||
9e1b32ca | 292 | #define pte_free_tlb(tlb, ptep, address) \ |
1da177e4 LT |
293 | do { \ |
294 | tlb->need_flush = 1; \ | |
9e1b32ca | 295 | __pte_free_tlb(tlb, ptep, address); \ |
1da177e4 LT |
296 | } while (0) |
297 | ||
9e1b32ca | 298 | #define pmd_free_tlb(tlb, ptep, address) \ |
1da177e4 LT |
299 | do { \ |
300 | tlb->need_flush = 1; \ | |
9e1b32ca | 301 | __pmd_free_tlb(tlb, ptep, address); \ |
1da177e4 LT |
302 | } while (0) |
303 | ||
9e1b32ca | 304 | #define pud_free_tlb(tlb, pudp, address) \ |
1da177e4 LT |
305 | do { \ |
306 | tlb->need_flush = 1; \ | |
9e1b32ca | 307 | __pud_free_tlb(tlb, pudp, address); \ |
1da177e4 LT |
308 | } while (0) |
309 | ||
310 | #endif /* _ASM_IA64_TLB_H */ |