2 * TLB flush routines for radix kernels.
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
13 #include <linux/hugetlb.h>
14 #include <linux/memblock.h>
15 #include <asm/ppc-opcode.h>
18 #include <asm/tlbflush.h>
19 #include <asm/trace.h>
22 #define RIC_FLUSH_TLB 0
23 #define RIC_FLUSH_PWC 1
24 #define RIC_FLUSH_ALL 2
26 static inline void __tlbiel_pid(unsigned long pid
, int set
,
29 unsigned long rb
,rs
,prs
,r
;
31 rb
= PPC_BIT(53); /* IS = 1 */
32 rb
|= set
<< PPC_BITLSHIFT(51);
33 rs
= ((unsigned long)pid
) << PPC_BITLSHIFT(31);
34 prs
= 1; /* process scoped */
35 r
= 1; /* raidx format */
37 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
38 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(rs
) : "memory");
39 trace_tlbie(0, 1, rb
, rs
, ric
, prs
, r
);
43 * We use 128 set in radix mode and 256 set in hpt mode.
45 static inline void _tlbiel_pid(unsigned long pid
, unsigned long ric
)
49 asm volatile("ptesync": : :"memory");
52 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
53 * also flush the entire Page Walk Cache.
55 __tlbiel_pid(pid
, 0, ric
);
57 if (ric
== RIC_FLUSH_ALL
)
58 /* For the remaining sets, just flush the TLB */
61 for (set
= 1; set
< POWER9_TLB_SETS_RADIX
; set
++)
62 __tlbiel_pid(pid
, set
, ric
);
64 asm volatile("ptesync": : :"memory");
65 asm volatile(PPC_INVALIDATE_ERAT
"; isync" : : :"memory");
68 static inline void tlbiel_pwc(unsigned long pid
)
70 asm volatile("ptesync": : :"memory");
72 /* For PWC flush, we don't look at set number */
73 __tlbiel_pid(pid
, 0, RIC_FLUSH_PWC
);
75 asm volatile("ptesync": : :"memory");
76 asm volatile(PPC_INVALIDATE_ERAT
"; isync" : : :"memory");
79 static inline void _tlbie_pid(unsigned long pid
, unsigned long ric
)
81 unsigned long rb
,rs
,prs
,r
;
83 rb
= PPC_BIT(53); /* IS = 1 */
84 rs
= pid
<< PPC_BITLSHIFT(31);
85 prs
= 1; /* process scoped */
86 r
= 1; /* raidx format */
88 asm volatile("ptesync": : :"memory");
89 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
90 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(rs
) : "memory");
91 asm volatile("eieio; tlbsync; ptesync": : :"memory");
92 trace_tlbie(0, 0, rb
, rs
, ric
, prs
, r
);
95 static inline void _tlbiel_va(unsigned long va
, unsigned long pid
,
96 unsigned long ap
, unsigned long ric
)
98 unsigned long rb
,rs
,prs
,r
;
100 rb
= va
& ~(PPC_BITMASK(52, 63));
101 rb
|= ap
<< PPC_BITLSHIFT(58);
102 rs
= pid
<< PPC_BITLSHIFT(31);
103 prs
= 1; /* process scoped */
104 r
= 1; /* raidx format */
106 asm volatile("ptesync": : :"memory");
107 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
108 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(rs
) : "memory");
109 asm volatile("ptesync": : :"memory");
110 trace_tlbie(0, 1, rb
, rs
, ric
, prs
, r
);
113 static inline void _tlbie_va(unsigned long va
, unsigned long pid
,
114 unsigned long ap
, unsigned long ric
)
116 unsigned long rb
,rs
,prs
,r
;
118 rb
= va
& ~(PPC_BITMASK(52, 63));
119 rb
|= ap
<< PPC_BITLSHIFT(58);
120 rs
= pid
<< PPC_BITLSHIFT(31);
121 prs
= 1; /* process scoped */
122 r
= 1; /* raidx format */
124 asm volatile("ptesync": : :"memory");
125 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
126 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(rs
) : "memory");
127 asm volatile("eieio; tlbsync; ptesync": : :"memory");
128 trace_tlbie(0, 0, rb
, rs
, ric
, prs
, r
);
132 * Base TLB flushing operations:
134 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
135 * - flush_tlb_page(vma, vmaddr) flushes one page
136 * - flush_tlb_range(vma, start, end) flushes a range of pages
137 * - flush_tlb_kernel_range(start, end) flushes kernel pages
139 * - local_* variants of page and mm only apply to the current
142 void radix__local_flush_tlb_mm(struct mm_struct
*mm
)
147 pid
= mm
->context
.id
;
148 if (pid
!= MMU_NO_CONTEXT
)
149 _tlbiel_pid(pid
, RIC_FLUSH_ALL
);
152 EXPORT_SYMBOL(radix__local_flush_tlb_mm
);
154 void radix__local_flush_tlb_pwc(struct mmu_gather
*tlb
, unsigned long addr
)
157 struct mm_struct
*mm
= tlb
->mm
;
159 * If we are doing a full mm flush, we will do a tlb flush
160 * with RIC_FLUSH_ALL later.
167 pid
= mm
->context
.id
;
168 if (pid
!= MMU_NO_CONTEXT
)
173 EXPORT_SYMBOL(radix__local_flush_tlb_pwc
);
175 void radix__local_flush_tlb_page_psize(struct mm_struct
*mm
, unsigned long vmaddr
,
179 unsigned long ap
= mmu_get_ap(psize
);
182 pid
= mm
? mm
->context
.id
: 0;
183 if (pid
!= MMU_NO_CONTEXT
)
184 _tlbiel_va(vmaddr
, pid
, ap
, RIC_FLUSH_TLB
);
188 void radix__local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
190 #ifdef CONFIG_HUGETLB_PAGE
191 /* need the return fix for nohash.c */
192 if (vma
&& is_vm_hugetlb_page(vma
))
193 return __local_flush_hugetlb_page(vma
, vmaddr
);
195 radix__local_flush_tlb_page_psize(vma
? vma
->vm_mm
: NULL
, vmaddr
,
198 EXPORT_SYMBOL(radix__local_flush_tlb_page
);
201 void radix__flush_tlb_mm(struct mm_struct
*mm
)
206 pid
= mm
->context
.id
;
207 if (unlikely(pid
== MMU_NO_CONTEXT
))
210 if (!mm_is_thread_local(mm
))
211 _tlbie_pid(pid
, RIC_FLUSH_ALL
);
213 _tlbiel_pid(pid
, RIC_FLUSH_ALL
);
217 EXPORT_SYMBOL(radix__flush_tlb_mm
);
219 void radix__flush_tlb_pwc(struct mmu_gather
*tlb
, unsigned long addr
)
222 struct mm_struct
*mm
= tlb
->mm
;
225 * If we are doing a full mm flush, we will do a tlb flush
226 * with RIC_FLUSH_ALL later.
232 pid
= mm
->context
.id
;
233 if (unlikely(pid
== MMU_NO_CONTEXT
))
236 if (!mm_is_thread_local(mm
))
237 _tlbie_pid(pid
, RIC_FLUSH_PWC
);
243 EXPORT_SYMBOL(radix__flush_tlb_pwc
);
245 void radix__flush_tlb_page_psize(struct mm_struct
*mm
, unsigned long vmaddr
,
249 unsigned long ap
= mmu_get_ap(psize
);
252 pid
= mm
? mm
->context
.id
: 0;
253 if (unlikely(pid
== MMU_NO_CONTEXT
))
255 if (!mm_is_thread_local(mm
))
256 _tlbie_va(vmaddr
, pid
, ap
, RIC_FLUSH_TLB
);
258 _tlbiel_va(vmaddr
, pid
, ap
, RIC_FLUSH_TLB
);
263 void radix__flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
265 #ifdef CONFIG_HUGETLB_PAGE
266 if (vma
&& is_vm_hugetlb_page(vma
))
267 return flush_hugetlb_page(vma
, vmaddr
);
269 radix__flush_tlb_page_psize(vma
? vma
->vm_mm
: NULL
, vmaddr
,
272 EXPORT_SYMBOL(radix__flush_tlb_page
);
274 #endif /* CONFIG_SMP */
276 void radix__flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
278 _tlbie_pid(0, RIC_FLUSH_ALL
);
280 EXPORT_SYMBOL(radix__flush_tlb_kernel_range
);
283 * Currently, for range flushing, we just do a full mm flush. Because
284 * we use this in code path where we don' track the page size.
286 void radix__flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
290 struct mm_struct
*mm
= vma
->vm_mm
;
291 radix__flush_tlb_mm(mm
);
293 EXPORT_SYMBOL(radix__flush_tlb_range
);
295 static int radix_get_mmu_psize(int page_size
)
299 if (page_size
== (1UL << mmu_psize_defs
[mmu_virtual_psize
].shift
))
300 psize
= mmu_virtual_psize
;
301 else if (page_size
== (1UL << mmu_psize_defs
[MMU_PAGE_2M
].shift
))
303 else if (page_size
== (1UL << mmu_psize_defs
[MMU_PAGE_1G
].shift
))
310 void radix__tlb_flush(struct mmu_gather
*tlb
)
313 struct mm_struct
*mm
= tlb
->mm
;
314 int page_size
= tlb
->page_size
;
316 psize
= radix_get_mmu_psize(page_size
);
318 * if page size is not something we understand, do a full mm flush
320 if (psize
!= -1 && !tlb
->fullmm
&& !tlb
->need_flush_all
)
321 radix__flush_tlb_range_psize(mm
, tlb
->start
, tlb
->end
, psize
);
323 radix__flush_tlb_mm(mm
);
326 #define TLB_FLUSH_ALL -1UL
328 * Number of pages above which we will do a bcast tlbie. Just a
329 * number at this point copied from x86
331 static unsigned long tlb_single_page_flush_ceiling __read_mostly
= 33;
333 void radix__flush_tlb_range_psize(struct mm_struct
*mm
, unsigned long start
,
334 unsigned long end
, int psize
)
338 int local
= mm_is_thread_local(mm
);
339 unsigned long ap
= mmu_get_ap(psize
);
340 unsigned long page_size
= 1UL << mmu_psize_defs
[psize
].shift
;
344 pid
= mm
? mm
->context
.id
: 0;
345 if (unlikely(pid
== MMU_NO_CONTEXT
))
348 if (end
== TLB_FLUSH_ALL
||
349 (end
- start
) > tlb_single_page_flush_ceiling
* page_size
) {
351 _tlbiel_pid(pid
, RIC_FLUSH_TLB
);
353 _tlbie_pid(pid
, RIC_FLUSH_TLB
);
356 for (addr
= start
; addr
< end
; addr
+= page_size
) {
359 _tlbiel_va(addr
, pid
, ap
, RIC_FLUSH_TLB
);
361 _tlbie_va(addr
, pid
, ap
, RIC_FLUSH_TLB
);
367 void radix__flush_tlb_lpid_va(unsigned long lpid
, unsigned long gpa
,
368 unsigned long page_size
)
370 unsigned long rb
,rs
,prs
,r
;
372 unsigned long ric
= RIC_FLUSH_TLB
;
374 ap
= mmu_get_ap(radix_get_mmu_psize(page_size
));
375 rb
= gpa
& ~(PPC_BITMASK(52, 63));
376 rb
|= ap
<< PPC_BITLSHIFT(58);
377 rs
= lpid
& ((1UL << 32) - 1);
378 prs
= 0; /* process scoped */
379 r
= 1; /* raidx format */
381 asm volatile("ptesync": : :"memory");
382 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
383 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(rs
) : "memory");
384 asm volatile("eieio; tlbsync; ptesync": : :"memory");
385 trace_tlbie(lpid
, 0, rb
, rs
, ric
, prs
, r
);
387 EXPORT_SYMBOL(radix__flush_tlb_lpid_va
);
389 void radix__flush_tlb_lpid(unsigned long lpid
)
391 unsigned long rb
,rs
,prs
,r
;
392 unsigned long ric
= RIC_FLUSH_ALL
;
394 rb
= 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
395 rs
= lpid
& ((1UL << 32) - 1);
396 prs
= 0; /* partition scoped */
397 r
= 1; /* raidx format */
399 asm volatile("ptesync": : :"memory");
400 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
401 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(rs
) : "memory");
402 asm volatile("eieio; tlbsync; ptesync": : :"memory");
403 trace_tlbie(lpid
, 0, rb
, rs
, ric
, prs
, r
);
405 EXPORT_SYMBOL(radix__flush_tlb_lpid
);
407 void radix__flush_pmd_tlb_range(struct vm_area_struct
*vma
,
408 unsigned long start
, unsigned long end
)
410 radix__flush_tlb_range_psize(vma
->vm_mm
, start
, end
, MMU_PAGE_2M
);
412 EXPORT_SYMBOL(radix__flush_pmd_tlb_range
);
414 void radix__flush_tlb_all(void)
416 unsigned long rb
,prs
,r
,rs
;
417 unsigned long ric
= RIC_FLUSH_ALL
;
419 rb
= 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
420 prs
= 0; /* partition scoped */
421 r
= 1; /* raidx format */
422 rs
= 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
424 asm volatile("ptesync": : :"memory");
426 * now flush guest entries by passing PRS = 1 and LPID != 0
428 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
429 : : "r"(rb
), "i"(r
), "i"(1), "i"(ric
), "r"(rs
) : "memory");
430 trace_tlbie(0, 0, rb
, rs
, ric
, prs
, r
);
432 * now flush host entires by passing PRS = 0 and LPID == 0
434 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
435 : : "r"(rb
), "i"(r
), "i"(prs
), "i"(ric
), "r"(0) : "memory");
436 asm volatile("eieio; tlbsync; ptesync": : :"memory");
437 trace_tlbie(0, 0, rb
, 0, ric
, prs
, r
);
440 void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte
, struct mm_struct
*mm
,
441 unsigned long address
)
444 * We track page size in pte only for DD1, So we can
445 * call this only on DD1.
447 if (!cpu_has_feature(CPU_FTR_POWER9_DD1
)) {
452 if (old_pte
& R_PAGE_LARGE
)
453 radix__flush_tlb_page_psize(mm
, address
, MMU_PAGE_2M
);
455 radix__flush_tlb_page_psize(mm
, address
, mmu_virtual_psize
);