]>
Commit | Line | Data |
---|---|---|
1a472c9d AK |
1 | /* |
2 | * TLB flush routines for radix kernels. | |
3 | * | |
4 | * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #include <linux/mm.h> | |
13 | #include <linux/hugetlb.h> | |
14 | #include <linux/memblock.h> | |
8cd6d3c2 | 15 | #include <asm/ppc-opcode.h> |
1a472c9d AK |
16 | |
17 | #include <asm/tlb.h> | |
18 | #include <asm/tlbflush.h> | |
19 | ||
20 | static DEFINE_RAW_SPINLOCK(native_tlbie_lock); | |
21 | ||
36194812 AK |
22 | #define RIC_FLUSH_TLB 0 |
23 | #define RIC_FLUSH_PWC 1 | |
24 | #define RIC_FLUSH_ALL 2 | |
25 | ||
26 | static inline void __tlbiel_pid(unsigned long pid, int set, | |
27 | unsigned long ric) | |
1a472c9d | 28 | { |
36194812 | 29 | unsigned long rb,rs,prs,r; |
1a472c9d AK |
30 | |
31 | rb = PPC_BIT(53); /* IS = 1 */ | |
32 | rb |= set << PPC_BITLSHIFT(51); | |
33 | rs = ((unsigned long)pid) << PPC_BITLSHIFT(31); | |
34 | prs = 1; /* process scoped */ | |
35 | r = 1; /* raidx format */ | |
1a472c9d | 36 | |
8cd6d3c2 | 37 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) |
1a472c9d | 38 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
1a472c9d AK |
39 | } |
40 | ||
41 | /* | |
42 | * We use 128 set in radix mode and 256 set in hpt mode. | |
43 | */ | |
36194812 | 44 | static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) |
1a472c9d AK |
45 | { |
46 | int set; | |
47 | ||
2da0c6d3 | 48 | asm volatile("ptesync": : :"memory"); |
f33f79d0 AK |
49 | |
50 | /* | |
51 | * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL, | |
52 | * also flush the entire Page Walk Cache. | |
53 | */ | |
54 | __tlbiel_pid(pid, 0, ric); | |
55 | ||
b406f121 BH |
56 | /* For PWC, only one flush is needed */ |
57 | if (ric == RIC_FLUSH_PWC) { | |
58 | asm volatile("ptesync": : :"memory"); | |
59 | return; | |
60 | } | |
f33f79d0 | 61 | |
b406f121 | 62 | /* For the remaining sets, just flush the TLB */ |
f33f79d0 | 63 | for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++) |
b406f121 | 64 | __tlbiel_pid(pid, set, RIC_FLUSH_TLB); |
f33f79d0 | 65 | |
2da0c6d3 | 66 | asm volatile("ptesync": : :"memory"); |
90c1e3c2 | 67 | asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory"); |
1a472c9d AK |
68 | } |
69 | ||
36194812 | 70 | static inline void _tlbie_pid(unsigned long pid, unsigned long ric) |
1a472c9d | 71 | { |
36194812 | 72 | unsigned long rb,rs,prs,r; |
1a472c9d AK |
73 | |
74 | rb = PPC_BIT(53); /* IS = 1 */ | |
75 | rs = pid << PPC_BITLSHIFT(31); | |
76 | prs = 1; /* process scoped */ | |
77 | r = 1; /* raidx format */ | |
1a472c9d AK |
78 | |
79 | asm volatile("ptesync": : :"memory"); | |
8cd6d3c2 | 80 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) |
1a472c9d AK |
81 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
82 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); | |
83 | } | |
84 | ||
85 | static inline void _tlbiel_va(unsigned long va, unsigned long pid, | |
36194812 | 86 | unsigned long ap, unsigned long ric) |
1a472c9d | 87 | { |
36194812 | 88 | unsigned long rb,rs,prs,r; |
1a472c9d AK |
89 | |
90 | rb = va & ~(PPC_BITMASK(52, 63)); | |
91 | rb |= ap << PPC_BITLSHIFT(58); | |
92 | rs = pid << PPC_BITLSHIFT(31); | |
93 | prs = 1; /* process scoped */ | |
94 | r = 1; /* raidx format */ | |
1a472c9d AK |
95 | |
96 | asm volatile("ptesync": : :"memory"); | |
8cd6d3c2 | 97 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) |
1a472c9d AK |
98 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
99 | asm volatile("ptesync": : :"memory"); | |
100 | } | |
101 | ||
102 | static inline void _tlbie_va(unsigned long va, unsigned long pid, | |
36194812 | 103 | unsigned long ap, unsigned long ric) |
1a472c9d | 104 | { |
36194812 | 105 | unsigned long rb,rs,prs,r; |
1a472c9d AK |
106 | |
107 | rb = va & ~(PPC_BITMASK(52, 63)); | |
108 | rb |= ap << PPC_BITLSHIFT(58); | |
109 | rs = pid << PPC_BITLSHIFT(31); | |
110 | prs = 1; /* process scoped */ | |
111 | r = 1; /* raidx format */ | |
1a472c9d AK |
112 | |
113 | asm volatile("ptesync": : :"memory"); | |
8cd6d3c2 | 114 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) |
1a472c9d AK |
115 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
116 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); | |
117 | } | |
118 | ||
119 | /* | |
120 | * Base TLB flushing operations: | |
121 | * | |
122 | * - flush_tlb_mm(mm) flushes the specified mm context TLB's | |
123 | * - flush_tlb_page(vma, vmaddr) flushes one page | |
124 | * - flush_tlb_range(vma, start, end) flushes a range of pages | |
125 | * - flush_tlb_kernel_range(start, end) flushes kernel pages | |
126 | * | |
127 | * - local_* variants of page and mm only apply to the current | |
128 | * processor | |
129 | */ | |
130 | void radix__local_flush_tlb_mm(struct mm_struct *mm) | |
131 | { | |
9690c157 | 132 | unsigned long pid; |
1a472c9d AK |
133 | |
134 | preempt_disable(); | |
135 | pid = mm->context.id; | |
136 | if (pid != MMU_NO_CONTEXT) | |
36194812 | 137 | _tlbiel_pid(pid, RIC_FLUSH_ALL); |
1a472c9d AK |
138 | preempt_enable(); |
139 | } | |
140 | EXPORT_SYMBOL(radix__local_flush_tlb_mm); | |
141 | ||
a145abf1 AK |
142 | void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) |
143 | { | |
144 | unsigned long pid; | |
145 | struct mm_struct *mm = tlb->mm; | |
ea856a8b AK |
146 | /* |
147 | * If we are doing a full mm flush, we will do a tlb flush | |
148 | * with RIC_FLUSH_ALL later. | |
149 | */ | |
150 | if (tlb->fullmm) | |
151 | return; | |
a145abf1 AK |
152 | |
153 | preempt_disable(); | |
154 | ||
155 | pid = mm->context.id; | |
156 | if (pid != MMU_NO_CONTEXT) | |
157 | _tlbiel_pid(pid, RIC_FLUSH_PWC); | |
158 | ||
159 | preempt_enable(); | |
160 | } | |
161 | EXPORT_SYMBOL(radix__local_flush_tlb_pwc); | |
162 | ||
f22dfc91 | 163 | void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, |
fbfa26d8 | 164 | int psize) |
1a472c9d | 165 | { |
9690c157 | 166 | unsigned long pid; |
fbfa26d8 | 167 | unsigned long ap = mmu_get_ap(psize); |
1a472c9d AK |
168 | |
169 | preempt_disable(); | |
170 | pid = mm ? mm->context.id : 0; | |
171 | if (pid != MMU_NO_CONTEXT) | |
36194812 | 172 | _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB); |
1a472c9d AK |
173 | preempt_enable(); |
174 | } | |
175 | ||
176 | void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) | |
177 | { | |
48483760 AK |
178 | #ifdef CONFIG_HUGETLB_PAGE |
179 | /* need the return fix for nohash.c */ | |
180 | if (vma && is_vm_hugetlb_page(vma)) | |
181 | return __local_flush_hugetlb_page(vma, vmaddr); | |
182 | #endif | |
f22dfc91 | 183 | radix__local_flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr, |
fbfa26d8 | 184 | mmu_virtual_psize); |
1a472c9d AK |
185 | } |
186 | EXPORT_SYMBOL(radix__local_flush_tlb_page); | |
187 | ||
188 | #ifdef CONFIG_SMP | |
1a472c9d AK |
189 | void radix__flush_tlb_mm(struct mm_struct *mm) |
190 | { | |
9690c157 | 191 | unsigned long pid; |
1a472c9d AK |
192 | |
193 | preempt_disable(); | |
194 | pid = mm->context.id; | |
195 | if (unlikely(pid == MMU_NO_CONTEXT)) | |
196 | goto no_context; | |
197 | ||
bd77c449 | 198 | if (!mm_is_thread_local(mm)) { |
1a472c9d AK |
199 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
200 | ||
201 | if (lock_tlbie) | |
202 | raw_spin_lock(&native_tlbie_lock); | |
36194812 | 203 | _tlbie_pid(pid, RIC_FLUSH_ALL); |
1a472c9d AK |
204 | if (lock_tlbie) |
205 | raw_spin_unlock(&native_tlbie_lock); | |
206 | } else | |
36194812 | 207 | _tlbiel_pid(pid, RIC_FLUSH_ALL); |
1a472c9d AK |
208 | no_context: |
209 | preempt_enable(); | |
210 | } | |
211 | EXPORT_SYMBOL(radix__flush_tlb_mm); | |
212 | ||
a145abf1 AK |
213 | void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr) |
214 | { | |
215 | unsigned long pid; | |
216 | struct mm_struct *mm = tlb->mm; | |
217 | ||
ea856a8b AK |
218 | /* |
219 | * If we are doing a full mm flush, we will do a tlb flush | |
220 | * with RIC_FLUSH_ALL later. | |
221 | */ | |
222 | if (tlb->fullmm) | |
223 | return; | |
a145abf1 AK |
224 | preempt_disable(); |
225 | ||
226 | pid = mm->context.id; | |
227 | if (unlikely(pid == MMU_NO_CONTEXT)) | |
228 | goto no_context; | |
229 | ||
bd77c449 | 230 | if (!mm_is_thread_local(mm)) { |
a145abf1 AK |
231 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
232 | ||
233 | if (lock_tlbie) | |
234 | raw_spin_lock(&native_tlbie_lock); | |
235 | _tlbie_pid(pid, RIC_FLUSH_PWC); | |
236 | if (lock_tlbie) | |
237 | raw_spin_unlock(&native_tlbie_lock); | |
238 | } else | |
239 | _tlbiel_pid(pid, RIC_FLUSH_PWC); | |
240 | no_context: | |
241 | preempt_enable(); | |
242 | } | |
243 | EXPORT_SYMBOL(radix__flush_tlb_pwc); | |
244 | ||
f22dfc91 | 245 | void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr, |
fbfa26d8 | 246 | int psize) |
1a472c9d | 247 | { |
9690c157 | 248 | unsigned long pid; |
fbfa26d8 | 249 | unsigned long ap = mmu_get_ap(psize); |
1a472c9d AK |
250 | |
251 | preempt_disable(); | |
252 | pid = mm ? mm->context.id : 0; | |
253 | if (unlikely(pid == MMU_NO_CONTEXT)) | |
254 | goto bail; | |
bd77c449 | 255 | if (!mm_is_thread_local(mm)) { |
1a472c9d AK |
256 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
257 | ||
258 | if (lock_tlbie) | |
259 | raw_spin_lock(&native_tlbie_lock); | |
36194812 | 260 | _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB); |
1a472c9d AK |
261 | if (lock_tlbie) |
262 | raw_spin_unlock(&native_tlbie_lock); | |
263 | } else | |
36194812 | 264 | _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB); |
1a472c9d AK |
265 | bail: |
266 | preempt_enable(); | |
267 | } | |
268 | ||
269 | void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) | |
270 | { | |
48483760 AK |
271 | #ifdef CONFIG_HUGETLB_PAGE |
272 | if (vma && is_vm_hugetlb_page(vma)) | |
273 | return flush_hugetlb_page(vma, vmaddr); | |
274 | #endif | |
f22dfc91 | 275 | radix__flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr, |
fbfa26d8 | 276 | mmu_virtual_psize); |
1a472c9d AK |
277 | } |
278 | EXPORT_SYMBOL(radix__flush_tlb_page); | |
279 | ||
280 | #endif /* CONFIG_SMP */ | |
281 | ||
282 | void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
283 | { | |
284 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); | |
285 | ||
286 | if (lock_tlbie) | |
287 | raw_spin_lock(&native_tlbie_lock); | |
36194812 | 288 | _tlbie_pid(0, RIC_FLUSH_ALL); |
1a472c9d AK |
289 | if (lock_tlbie) |
290 | raw_spin_unlock(&native_tlbie_lock); | |
291 | } | |
292 | EXPORT_SYMBOL(radix__flush_tlb_kernel_range); | |
293 | ||
294 | /* | |
295 | * Currently, for range flushing, we just do a full mm flush. Because | |
296 | * we use this in code path where we don' track the page size. | |
297 | */ | |
298 | void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |
299 | unsigned long end) | |
300 | ||
301 | { | |
302 | struct mm_struct *mm = vma->vm_mm; | |
303 | radix__flush_tlb_mm(mm); | |
304 | } | |
305 | EXPORT_SYMBOL(radix__flush_tlb_range); | |
306 | ||
912cc87a AK |
307 | static int radix_get_mmu_psize(int page_size) |
308 | { | |
309 | int psize; | |
310 | ||
311 | if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift)) | |
312 | psize = mmu_virtual_psize; | |
313 | else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift)) | |
314 | psize = MMU_PAGE_2M; | |
315 | else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift)) | |
316 | psize = MMU_PAGE_1G; | |
317 | else | |
318 | return -1; | |
319 | return psize; | |
320 | } | |
1a472c9d AK |
321 | |
322 | void radix__tlb_flush(struct mmu_gather *tlb) | |
323 | { | |
8cb8140c | 324 | int psize = 0; |
1a472c9d | 325 | struct mm_struct *mm = tlb->mm; |
8cb8140c AK |
326 | int page_size = tlb->page_size; |
327 | ||
328 | psize = radix_get_mmu_psize(page_size); | |
329 | /* | |
330 | * if page size is not something we understand, do a full mm flush | |
331 | */ | |
332 | if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all) | |
333 | radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize); | |
334 | else | |
335 | radix__flush_tlb_mm(mm); | |
336 | } | |
337 | ||
338 | #define TLB_FLUSH_ALL -1UL | |
339 | /* | |
340 | * Number of pages above which we will do a bcast tlbie. Just a | |
341 | * number at this point copied from x86 | |
342 | */ | |
343 | static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33; | |
344 | ||
345 | void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start, | |
346 | unsigned long end, int psize) | |
347 | { | |
348 | unsigned long pid; | |
349 | unsigned long addr; | |
bd77c449 | 350 | int local = mm_is_thread_local(mm); |
8cb8140c AK |
351 | unsigned long ap = mmu_get_ap(psize); |
352 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); | |
353 | unsigned long page_size = 1UL << mmu_psize_defs[psize].shift; | |
354 | ||
355 | ||
356 | preempt_disable(); | |
357 | pid = mm ? mm->context.id : 0; | |
358 | if (unlikely(pid == MMU_NO_CONTEXT)) | |
359 | goto err_out; | |
360 | ||
361 | if (end == TLB_FLUSH_ALL || | |
362 | (end - start) > tlb_single_page_flush_ceiling * page_size) { | |
363 | if (local) | |
364 | _tlbiel_pid(pid, RIC_FLUSH_TLB); | |
365 | else | |
366 | _tlbie_pid(pid, RIC_FLUSH_TLB); | |
367 | goto err_out; | |
368 | } | |
369 | for (addr = start; addr < end; addr += page_size) { | |
370 | ||
371 | if (local) | |
372 | _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB); | |
373 | else { | |
374 | if (lock_tlbie) | |
375 | raw_spin_lock(&native_tlbie_lock); | |
376 | _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB); | |
377 | if (lock_tlbie) | |
378 | raw_spin_unlock(&native_tlbie_lock); | |
379 | } | |
380 | } | |
381 | err_out: | |
382 | preempt_enable(); | |
1a472c9d | 383 | } |
912cc87a AK |
384 | |
385 | void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa, | |
386 | unsigned long page_size) | |
387 | { | |
388 | unsigned long rb,rs,prs,r; | |
389 | unsigned long ap; | |
390 | unsigned long ric = RIC_FLUSH_TLB; | |
391 | ||
392 | ap = mmu_get_ap(radix_get_mmu_psize(page_size)); | |
393 | rb = gpa & ~(PPC_BITMASK(52, 63)); | |
394 | rb |= ap << PPC_BITLSHIFT(58); | |
395 | rs = lpid & ((1UL << 32) - 1); | |
396 | prs = 0; /* process scoped */ | |
397 | r = 1; /* raidx format */ | |
398 | ||
399 | asm volatile("ptesync": : :"memory"); | |
400 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) | |
401 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); | |
402 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); | |
403 | } | |
404 | EXPORT_SYMBOL(radix__flush_tlb_lpid_va); | |
405 | ||
406 | void radix__flush_tlb_lpid(unsigned long lpid) | |
407 | { | |
408 | unsigned long rb,rs,prs,r; | |
409 | unsigned long ric = RIC_FLUSH_ALL; | |
410 | ||
411 | rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */ | |
412 | rs = lpid & ((1UL << 32) - 1); | |
413 | prs = 0; /* partition scoped */ | |
414 | r = 1; /* raidx format */ | |
415 | ||
416 | asm volatile("ptesync": : :"memory"); | |
417 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) | |
418 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); | |
419 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); | |
420 | } | |
421 | EXPORT_SYMBOL(radix__flush_tlb_lpid); | |
d8e91e93 AK |
422 | |
423 | void radix__flush_pmd_tlb_range(struct vm_area_struct *vma, | |
424 | unsigned long start, unsigned long end) | |
425 | { | |
426 | radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M); | |
427 | } | |
428 | EXPORT_SYMBOL(radix__flush_pmd_tlb_range); | |
be34d300 AK |
429 | |
430 | void radix__flush_tlb_all(void) | |
431 | { | |
432 | unsigned long rb,prs,r,rs; | |
433 | unsigned long ric = RIC_FLUSH_ALL; | |
434 | ||
435 | rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */ | |
436 | prs = 0; /* partition scoped */ | |
437 | r = 1; /* raidx format */ | |
438 | rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */ | |
439 | ||
440 | asm volatile("ptesync": : :"memory"); | |
441 | /* | |
442 | * now flush guest entries by passing PRS = 1 and LPID != 0 | |
443 | */ | |
444 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) | |
445 | : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory"); | |
446 | /* | |
447 | * now flush host entires by passing PRS = 0 and LPID == 0 | |
448 | */ | |
449 | asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) | |
450 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory"); | |
451 | asm volatile("eieio; tlbsync; ptesync": : :"memory"); | |
452 | } | |
6d3a0379 AK |
453 | |
454 | void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm, | |
455 | unsigned long address) | |
456 | { | |
457 | /* | |
458 | * We track page size in pte only for DD1, So we can | |
459 | * call this only on DD1. | |
460 | */ | |
461 | if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) { | |
462 | VM_WARN_ON(1); | |
463 | return; | |
464 | } | |
465 | ||
466 | if (old_pte & _PAGE_LARGE) | |
467 | radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M); | |
468 | else | |
469 | radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize); | |
470 | } |