2 * TLB flush routines for radix kernels.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/hugetlb.h>
14 #include <linux/memblock.h>
15 #include <asm/ppc-opcode.h>
18 #include <asm/tlbflush.h>
20 static DEFINE_RAW_SPINLOCK(native_tlbie_lock
);
22 #define RIC_FLUSH_TLB 0
23 #define RIC_FLUSH_PWC 1
24 #define RIC_FLUSH_ALL 2
26 static inline void __tlbiel_pid(unsigned long pid
, int set
,
29 unsigned long rb
,rs
,prs
,r
;
31 rb
= PPC_BIT(53); /* IS = 1 */
32 rb
|= set
<< PPC_BITLSHIFT(51);
33 rs
= ((unsigned long)pid
) << PPC_BITLSHIFT(31);
34 prs
= 1; /* process scoped */
35 r
= 1; /* raidx format */
37 asm volatile("ptesync": : :"memory");
38 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
39 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(rs
) : "memory");
40 asm volatile("ptesync": : :"memory");
44 * We use 128 set in radix mode and 256 set in hpt mode.
46 static inline void _tlbiel_pid(unsigned long pid
, unsigned long ric
)
50 for (set
= 0; set
< POWER9_TLB_SETS_RADIX
; set
++) {
51 __tlbiel_pid(pid
, set
, ric
);
53 if (cpu_has_feature(CPU_FTR_POWER9_DD1
))
54 asm volatile(PPC_INVALIDATE_ERAT
: : :"memory");
58 static inline void _tlbie_pid(unsigned long pid
, unsigned long ric
)
60 unsigned long rb
,rs
,prs
,r
;
62 rb
= PPC_BIT(53); /* IS = 1 */
63 rs
= pid
<< PPC_BITLSHIFT(31);
64 prs
= 1; /* process scoped */
65 r
= 1; /* raidx format */
67 asm volatile("ptesync": : :"memory");
68 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
69 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(rs
) : "memory");
70 asm volatile("eieio; tlbsync; ptesync": : :"memory");
73 static inline void _tlbiel_va(unsigned long va
, unsigned long pid
,
74 unsigned long ap
, unsigned long ric
)
76 unsigned long rb
,rs
,prs
,r
;
78 rb
= va
& ~(PPC_BITMASK(52, 63));
79 rb
|= ap
<< PPC_BITLSHIFT(58);
80 rs
= pid
<< PPC_BITLSHIFT(31);
81 prs
= 1; /* process scoped */
82 r
= 1; /* raidx format */
84 asm volatile("ptesync": : :"memory");
85 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
86 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(rs
) : "memory");
87 asm volatile("ptesync": : :"memory");
88 if (cpu_has_feature(CPU_FTR_POWER9_DD1
))
89 asm volatile(PPC_INVALIDATE_ERAT
: : :"memory");
92 static inline void _tlbie_va(unsigned long va
, unsigned long pid
,
93 unsigned long ap
, unsigned long ric
)
95 unsigned long rb
,rs
,prs
,r
;
97 rb
= va
& ~(PPC_BITMASK(52, 63));
98 rb
|= ap
<< PPC_BITLSHIFT(58);
99 rs
= pid
<< PPC_BITLSHIFT(31);
100 prs
= 1; /* process scoped */
101 r
= 1; /* raidx format */
103 asm volatile("ptesync": : :"memory");
104 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
105 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(rs
) : "memory");
106 asm volatile("eieio; tlbsync; ptesync": : :"memory");
110 * Base TLB flushing operations:
112 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
113 * - flush_tlb_page(vma, vmaddr) flushes one page
114 * - flush_tlb_range(vma, start, end) flushes a range of pages
115 * - flush_tlb_kernel_range(start, end) flushes kernel pages
117 * - local_* variants of page and mm only apply to the current
120 void radix__local_flush_tlb_mm(struct mm_struct
*mm
)
125 pid
= mm
->context
.id
;
126 if (pid
!= MMU_NO_CONTEXT
)
127 _tlbiel_pid(pid
, RIC_FLUSH_ALL
);
130 EXPORT_SYMBOL(radix__local_flush_tlb_mm
);
132 void radix__local_flush_tlb_pwc(struct mmu_gather
*tlb
, unsigned long addr
)
135 struct mm_struct
*mm
= tlb
->mm
;
139 pid
= mm
->context
.id
;
140 if (pid
!= MMU_NO_CONTEXT
)
141 _tlbiel_pid(pid
, RIC_FLUSH_PWC
);
145 EXPORT_SYMBOL(radix__local_flush_tlb_pwc
);
147 void radix__local_flush_tlb_page_psize(struct mm_struct
*mm
, unsigned long vmaddr
,
151 unsigned long ap
= mmu_get_ap(psize
);
154 pid
= mm
? mm
->context
.id
: 0;
155 if (pid
!= MMU_NO_CONTEXT
)
156 _tlbiel_va(vmaddr
, pid
, ap
, RIC_FLUSH_TLB
);
160 void radix__local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
162 #ifdef CONFIG_HUGETLB_PAGE
163 /* need the return fix for nohash.c */
164 if (vma
&& is_vm_hugetlb_page(vma
))
165 return __local_flush_hugetlb_page(vma
, vmaddr
);
167 radix__local_flush_tlb_page_psize(vma
? vma
->vm_mm
: NULL
, vmaddr
,
170 EXPORT_SYMBOL(radix__local_flush_tlb_page
);
173 void radix__flush_tlb_mm(struct mm_struct
*mm
)
178 pid
= mm
->context
.id
;
179 if (unlikely(pid
== MMU_NO_CONTEXT
))
182 if (!mm_is_thread_local(mm
)) {
183 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
186 raw_spin_lock(&native_tlbie_lock
);
187 _tlbie_pid(pid
, RIC_FLUSH_ALL
);
189 raw_spin_unlock(&native_tlbie_lock
);
191 _tlbiel_pid(pid
, RIC_FLUSH_ALL
);
195 EXPORT_SYMBOL(radix__flush_tlb_mm
);
197 void radix__flush_tlb_pwc(struct mmu_gather
*tlb
, unsigned long addr
)
200 struct mm_struct
*mm
= tlb
->mm
;
204 pid
= mm
->context
.id
;
205 if (unlikely(pid
== MMU_NO_CONTEXT
))
208 if (!mm_is_thread_local(mm
)) {
209 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
212 raw_spin_lock(&native_tlbie_lock
);
213 _tlbie_pid(pid
, RIC_FLUSH_PWC
);
215 raw_spin_unlock(&native_tlbie_lock
);
217 _tlbiel_pid(pid
, RIC_FLUSH_PWC
);
221 EXPORT_SYMBOL(radix__flush_tlb_pwc
);
223 void radix__flush_tlb_page_psize(struct mm_struct
*mm
, unsigned long vmaddr
,
227 unsigned long ap
= mmu_get_ap(psize
);
230 pid
= mm
? mm
->context
.id
: 0;
231 if (unlikely(pid
== MMU_NO_CONTEXT
))
233 if (!mm_is_thread_local(mm
)) {
234 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
237 raw_spin_lock(&native_tlbie_lock
);
238 _tlbie_va(vmaddr
, pid
, ap
, RIC_FLUSH_TLB
);
240 raw_spin_unlock(&native_tlbie_lock
);
242 _tlbiel_va(vmaddr
, pid
, ap
, RIC_FLUSH_TLB
);
247 void radix__flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
249 #ifdef CONFIG_HUGETLB_PAGE
250 if (vma
&& is_vm_hugetlb_page(vma
))
251 return flush_hugetlb_page(vma
, vmaddr
);
253 radix__flush_tlb_page_psize(vma
? vma
->vm_mm
: NULL
, vmaddr
,
256 EXPORT_SYMBOL(radix__flush_tlb_page
);
258 #endif /* CONFIG_SMP */
260 void radix__flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
262 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
265 raw_spin_lock(&native_tlbie_lock
);
266 _tlbie_pid(0, RIC_FLUSH_ALL
);
268 raw_spin_unlock(&native_tlbie_lock
);
270 EXPORT_SYMBOL(radix__flush_tlb_kernel_range
);
273 * Currently, for range flushing, we just do a full mm flush. Because
274 * we use this in code path where we don' track the page size.
276 void radix__flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
280 struct mm_struct
*mm
= vma
->vm_mm
;
281 radix__flush_tlb_mm(mm
);
283 EXPORT_SYMBOL(radix__flush_tlb_range
);
285 static int radix_get_mmu_psize(int page_size
)
289 if (page_size
== (1UL << mmu_psize_defs
[mmu_virtual_psize
].shift
))
290 psize
= mmu_virtual_psize
;
291 else if (page_size
== (1UL << mmu_psize_defs
[MMU_PAGE_2M
].shift
))
293 else if (page_size
== (1UL << mmu_psize_defs
[MMU_PAGE_1G
].shift
))
300 void radix__tlb_flush(struct mmu_gather
*tlb
)
303 struct mm_struct
*mm
= tlb
->mm
;
304 int page_size
= tlb
->page_size
;
306 psize
= radix_get_mmu_psize(page_size
);
308 * if page size is not something we understand, do a full mm flush
310 if (psize
!= -1 && !tlb
->fullmm
&& !tlb
->need_flush_all
)
311 radix__flush_tlb_range_psize(mm
, tlb
->start
, tlb
->end
, psize
);
313 radix__flush_tlb_mm(mm
);
316 #define TLB_FLUSH_ALL -1UL
318 * Number of pages above which we will do a bcast tlbie. Just a
319 * number at this point copied from x86
321 static unsigned long tlb_single_page_flush_ceiling __read_mostly
= 33;
323 void radix__flush_tlb_range_psize(struct mm_struct
*mm
, unsigned long start
,
324 unsigned long end
, int psize
)
328 int local
= mm_is_thread_local(mm
);
329 unsigned long ap
= mmu_get_ap(psize
);
330 int lock_tlbie
= !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE
);
331 unsigned long page_size
= 1UL << mmu_psize_defs
[psize
].shift
;
335 pid
= mm
? mm
->context
.id
: 0;
336 if (unlikely(pid
== MMU_NO_CONTEXT
))
339 if (end
== TLB_FLUSH_ALL
||
340 (end
- start
) > tlb_single_page_flush_ceiling
* page_size
) {
342 _tlbiel_pid(pid
, RIC_FLUSH_TLB
);
344 _tlbie_pid(pid
, RIC_FLUSH_TLB
);
347 for (addr
= start
; addr
< end
; addr
+= page_size
) {
350 _tlbiel_va(addr
, pid
, ap
, RIC_FLUSH_TLB
);
353 raw_spin_lock(&native_tlbie_lock
);
354 _tlbie_va(addr
, pid
, ap
, RIC_FLUSH_TLB
);
356 raw_spin_unlock(&native_tlbie_lock
);
363 void radix__flush_tlb_lpid_va(unsigned long lpid
, unsigned long gpa
,
364 unsigned long page_size
)
366 unsigned long rb
,rs
,prs
,r
;
368 unsigned long ric
= RIC_FLUSH_TLB
;
370 ap
= mmu_get_ap(radix_get_mmu_psize(page_size
));
371 rb
= gpa
& ~(PPC_BITMASK(52, 63));
372 rb
|= ap
<< PPC_BITLSHIFT(58);
373 rs
= lpid
& ((1UL << 32) - 1);
374 prs
= 0; /* process scoped */
375 r
= 1; /* raidx format */
377 asm volatile("ptesync": : :"memory");
378 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
379 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(rs
) : "memory");
380 asm volatile("eieio; tlbsync; ptesync": : :"memory");
382 EXPORT_SYMBOL(radix__flush_tlb_lpid_va
);
384 void radix__flush_tlb_lpid(unsigned long lpid
)
386 unsigned long rb
,rs
,prs
,r
;
387 unsigned long ric
= RIC_FLUSH_ALL
;
389 rb
= 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
390 rs
= lpid
& ((1UL << 32) - 1);
391 prs
= 0; /* partition scoped */
392 r
= 1; /* raidx format */
394 asm volatile("ptesync": : :"memory");
395 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
396 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(rs
) : "memory");
397 asm volatile("eieio; tlbsync; ptesync": : :"memory");
399 EXPORT_SYMBOL(radix__flush_tlb_lpid
);
401 void radix__flush_pmd_tlb_range(struct vm_area_struct
*vma
,
402 unsigned long start
, unsigned long end
)
404 radix__flush_tlb_range_psize(vma
->vm_mm
, start
, end
, MMU_PAGE_2M
);
406 EXPORT_SYMBOL(radix__flush_pmd_tlb_range
);
408 void radix__flush_tlb_all(void)
410 unsigned long rb
,prs
,r
,rs
;
411 unsigned long ric
= RIC_FLUSH_ALL
;
413 rb
= 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
414 prs
= 0; /* partition scoped */
415 r
= 1; /* raidx format */
416 rs
= 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
418 asm volatile("ptesync": : :"memory");
420 * now flush guest entries by passing PRS = 1 and LPID != 0
422 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
423 : : "r"(rb
), "i"(r
), "i"(1), "i"(ric
), "r"(rs
) : "memory");
425 * now flush host entires by passing PRS = 0 and LPID == 0
427 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
428 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(0) : "memory");
429 asm volatile("eieio; tlbsync; ptesync": : :"memory");
432 void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte
, struct mm_struct
*mm
,
433 unsigned long address
)
436 * We track page size in pte only for DD1, So we can
437 * call this only on DD1.
439 if (!cpu_has_feature(CPU_FTR_POWER9_DD1
)) {
444 if (old_pte
& _PAGE_LARGE
)
445 radix__flush_tlb_page_psize(mm
, address
, MMU_PAGE_2M
);
447 radix__flush_tlb_page_psize(mm
, address
, mmu_virtual_psize
);