]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/powerpc/mm/tlb-radix.c
a11c3eab4ad9f515b7f9988a50eef3b559e94573
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / mm / tlb-radix.c
1 /*
2 * TLB flush routines for radix kernels.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #include <linux/mm.h>
13 #include <linux/hugetlb.h>
14 #include <linux/memblock.h>
15 #include <asm/ppc-opcode.h>
16
17 #include <asm/tlb.h>
18 #include <asm/tlbflush.h>
19
20 static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
21
22 #define RIC_FLUSH_TLB 0
23 #define RIC_FLUSH_PWC 1
24 #define RIC_FLUSH_ALL 2
25
26 static inline void __tlbiel_pid(unsigned long pid, int set,
27 unsigned long ric)
28 {
29 unsigned long rb,rs,prs,r;
30
31 rb = PPC_BIT(53); /* IS = 1 */
32 rb |= set << PPC_BITLSHIFT(51);
33 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
34 prs = 1; /* process scoped */
35 r = 1; /* raidx format */
36
37 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
38 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
39 }
40
41 /*
42 * We use 128 set in radix mode and 256 set in hpt mode.
43 */
44 static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
45 {
46 int set;
47
48 asm volatile("ptesync": : :"memory");
49
50 /*
51 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
52 * also flush the entire Page Walk Cache.
53 */
54 __tlbiel_pid(pid, 0, ric);
55
56 /* For PWC, only one flush is needed */
57 if (ric == RIC_FLUSH_PWC) {
58 asm volatile("ptesync": : :"memory");
59 return;
60 }
61
62 /* For the remaining sets, just flush the TLB */
63 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
64 __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
65
66 asm volatile("ptesync": : :"memory");
67 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
68 }
69
70 static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
71 {
72 unsigned long rb,rs,prs,r;
73
74 rb = PPC_BIT(53); /* IS = 1 */
75 rs = pid << PPC_BITLSHIFT(31);
76 prs = 1; /* process scoped */
77 r = 1; /* raidx format */
78
79 asm volatile("ptesync": : :"memory");
80 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
81 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
82 asm volatile("eieio; tlbsync; ptesync": : :"memory");
83 }
84
85 static inline void _tlbiel_va(unsigned long va, unsigned long pid,
86 unsigned long ap, unsigned long ric)
87 {
88 unsigned long rb,rs,prs,r;
89
90 rb = va & ~(PPC_BITMASK(52, 63));
91 rb |= ap << PPC_BITLSHIFT(58);
92 rs = pid << PPC_BITLSHIFT(31);
93 prs = 1; /* process scoped */
94 r = 1; /* raidx format */
95
96 asm volatile("ptesync": : :"memory");
97 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
98 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
99 asm volatile("ptesync": : :"memory");
100 }
101
102 static inline void _tlbie_va(unsigned long va, unsigned long pid,
103 unsigned long ap, unsigned long ric)
104 {
105 unsigned long rb,rs,prs,r;
106
107 rb = va & ~(PPC_BITMASK(52, 63));
108 rb |= ap << PPC_BITLSHIFT(58);
109 rs = pid << PPC_BITLSHIFT(31);
110 prs = 1; /* process scoped */
111 r = 1; /* raidx format */
112
113 asm volatile("ptesync": : :"memory");
114 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
115 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
116 asm volatile("eieio; tlbsync; ptesync": : :"memory");
117 }
118
119 /*
120 * Base TLB flushing operations:
121 *
122 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
123 * - flush_tlb_page(vma, vmaddr) flushes one page
124 * - flush_tlb_range(vma, start, end) flushes a range of pages
125 * - flush_tlb_kernel_range(start, end) flushes kernel pages
126 *
127 * - local_* variants of page and mm only apply to the current
128 * processor
129 */
130 void radix__local_flush_tlb_mm(struct mm_struct *mm)
131 {
132 unsigned long pid;
133
134 preempt_disable();
135 pid = mm->context.id;
136 if (pid != MMU_NO_CONTEXT)
137 _tlbiel_pid(pid, RIC_FLUSH_TLB);
138 preempt_enable();
139 }
140 EXPORT_SYMBOL(radix__local_flush_tlb_mm);
141
142 #ifndef CONFIG_SMP
143 static void radix__local_flush_all_mm(struct mm_struct *mm)
144 {
145 unsigned long pid;
146
147 preempt_disable();
148 pid = mm->context.id;
149 if (pid != MMU_NO_CONTEXT)
150 _tlbiel_pid(pid, RIC_FLUSH_ALL);
151 preempt_enable();
152 }
153 #endif /* CONFIG_SMP */
154
155 void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
156 int psize)
157 {
158 unsigned long pid;
159 unsigned long ap = mmu_get_ap(psize);
160
161 preempt_disable();
162 pid = mm ? mm->context.id : 0;
163 if (pid != MMU_NO_CONTEXT)
164 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
165 preempt_enable();
166 }
167
168 void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
169 {
170 #ifdef CONFIG_HUGETLB_PAGE
171 /* need the return fix for nohash.c */
172 if (vma && is_vm_hugetlb_page(vma))
173 return __local_flush_hugetlb_page(vma, vmaddr);
174 #endif
175 radix__local_flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
176 mmu_virtual_psize);
177 }
178 EXPORT_SYMBOL(radix__local_flush_tlb_page);
179
180 #ifdef CONFIG_SMP
181 void radix__flush_tlb_mm(struct mm_struct *mm)
182 {
183 unsigned long pid;
184
185 preempt_disable();
186 pid = mm->context.id;
187 if (unlikely(pid == MMU_NO_CONTEXT))
188 goto no_context;
189
190 if (!mm_is_thread_local(mm))
191 _tlbie_pid(pid, RIC_FLUSH_TLB);
192 else
193 _tlbiel_pid(pid, RIC_FLUSH_TLB);
194 no_context:
195 preempt_enable();
196 }
197 EXPORT_SYMBOL(radix__flush_tlb_mm);
198
199 static void radix__flush_all_mm(struct mm_struct *mm)
200 {
201 unsigned long pid;
202
203 preempt_disable();
204 pid = mm->context.id;
205 if (unlikely(pid == MMU_NO_CONTEXT))
206 goto no_context;
207
208 if (!mm_is_thread_local(mm))
209 _tlbie_pid(pid, RIC_FLUSH_ALL);
210 else
211 _tlbiel_pid(pid, RIC_FLUSH_ALL);
212 no_context:
213 preempt_enable();
214 }
215
216 void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
217 {
218 tlb->need_flush_all = 1;
219 }
220 EXPORT_SYMBOL(radix__flush_tlb_pwc);
221
222 void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
223 int psize)
224 {
225 unsigned long pid;
226 unsigned long ap = mmu_get_ap(psize);
227
228 preempt_disable();
229 pid = mm ? mm->context.id : 0;
230 if (unlikely(pid == MMU_NO_CONTEXT))
231 goto bail;
232 if (!mm_is_thread_local(mm)) {
233 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
234
235 if (lock_tlbie)
236 raw_spin_lock(&native_tlbie_lock);
237 _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
238 if (lock_tlbie)
239 raw_spin_unlock(&native_tlbie_lock);
240 } else
241 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
242 bail:
243 preempt_enable();
244 }
245
246 void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
247 {
248 #ifdef CONFIG_HUGETLB_PAGE
249 if (vma && is_vm_hugetlb_page(vma))
250 return flush_hugetlb_page(vma, vmaddr);
251 #endif
252 radix__flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
253 mmu_virtual_psize);
254 }
255 EXPORT_SYMBOL(radix__flush_tlb_page);
256
257 #else /* CONFIG_SMP */
258 #define radix__flush_all_mm radix__local_flush_all_mm
259 #endif /* CONFIG_SMP */
260
261 void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
262 {
263 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
264
265 if (lock_tlbie)
266 raw_spin_lock(&native_tlbie_lock);
267 _tlbie_pid(0, RIC_FLUSH_ALL);
268 if (lock_tlbie)
269 raw_spin_unlock(&native_tlbie_lock);
270 }
271 EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
272
273 /*
274 * Currently, for range flushing, we just do a full mm flush. Because
275 * we use this in code path where we don' track the page size.
276 */
277 void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
278 unsigned long end)
279
280 {
281 struct mm_struct *mm = vma->vm_mm;
282
283 /*
284 * This is currently used when collapsing THPs so we need to
285 * flush the PWC. We should fix this.
286 */
287 radix__flush_all_mm(mm);
288 }
289 EXPORT_SYMBOL(radix__flush_tlb_range);
290
291 static int radix_get_mmu_psize(int page_size)
292 {
293 int psize;
294
295 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
296 psize = mmu_virtual_psize;
297 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
298 psize = MMU_PAGE_2M;
299 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
300 psize = MMU_PAGE_1G;
301 else
302 return -1;
303 return psize;
304 }
305
306 void radix__tlb_flush(struct mmu_gather *tlb)
307 {
308 int psize = 0;
309 struct mm_struct *mm = tlb->mm;
310 int page_size = tlb->page_size;
311
312 psize = radix_get_mmu_psize(page_size);
313 /*
314 * if page size is not something we understand, do a full mm flush
315 */
316 if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
317 radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
318 else if (tlb->need_flush_all) {
319 tlb->need_flush_all = 0;
320 radix__flush_all_mm(mm);
321 } else
322 radix__flush_tlb_mm(mm);
323 }
324
325 #define TLB_FLUSH_ALL -1UL
326 /*
327 * Number of pages above which we will do a bcast tlbie. Just a
328 * number at this point copied from x86
329 */
330 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
331
332 void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
333 unsigned long end, int psize)
334 {
335 unsigned long pid;
336 unsigned long addr;
337 int local = mm_is_thread_local(mm);
338 unsigned long ap = mmu_get_ap(psize);
339 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
340 unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
341
342
343 preempt_disable();
344 pid = mm ? mm->context.id : 0;
345 if (unlikely(pid == MMU_NO_CONTEXT))
346 goto err_out;
347
348 if (end == TLB_FLUSH_ALL ||
349 (end - start) > tlb_single_page_flush_ceiling * page_size) {
350 if (local)
351 _tlbiel_pid(pid, RIC_FLUSH_TLB);
352 else
353 _tlbie_pid(pid, RIC_FLUSH_TLB);
354 goto err_out;
355 }
356 for (addr = start; addr < end; addr += page_size) {
357
358 if (local)
359 _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
360 else {
361 if (lock_tlbie)
362 raw_spin_lock(&native_tlbie_lock);
363 _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
364 if (lock_tlbie)
365 raw_spin_unlock(&native_tlbie_lock);
366 }
367 }
368 err_out:
369 preempt_enable();
370 }
371
372 void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
373 unsigned long page_size)
374 {
375 unsigned long rb,rs,prs,r;
376 unsigned long ap;
377 unsigned long ric = RIC_FLUSH_TLB;
378
379 ap = mmu_get_ap(radix_get_mmu_psize(page_size));
380 rb = gpa & ~(PPC_BITMASK(52, 63));
381 rb |= ap << PPC_BITLSHIFT(58);
382 rs = lpid & ((1UL << 32) - 1);
383 prs = 0; /* process scoped */
384 r = 1; /* raidx format */
385
386 asm volatile("ptesync": : :"memory");
387 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
388 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
389 asm volatile("eieio; tlbsync; ptesync": : :"memory");
390 }
391 EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
392
393 void radix__flush_tlb_lpid(unsigned long lpid)
394 {
395 unsigned long rb,rs,prs,r;
396 unsigned long ric = RIC_FLUSH_ALL;
397
398 rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
399 rs = lpid & ((1UL << 32) - 1);
400 prs = 0; /* partition scoped */
401 r = 1; /* raidx format */
402
403 asm volatile("ptesync": : :"memory");
404 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
405 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
406 asm volatile("eieio; tlbsync; ptesync": : :"memory");
407 }
408 EXPORT_SYMBOL(radix__flush_tlb_lpid);
409
410 void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
411 unsigned long start, unsigned long end)
412 {
413 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
414 }
415 EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
416
417 void radix__flush_tlb_all(void)
418 {
419 unsigned long rb,prs,r,rs;
420 unsigned long ric = RIC_FLUSH_ALL;
421
422 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
423 prs = 0; /* partition scoped */
424 r = 1; /* raidx format */
425 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
426
427 asm volatile("ptesync": : :"memory");
428 /*
429 * now flush guest entries by passing PRS = 1 and LPID != 0
430 */
431 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
432 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
433 /*
434 * now flush host entires by passing PRS = 0 and LPID == 0
435 */
436 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
437 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
438 asm volatile("eieio; tlbsync; ptesync": : :"memory");
439 }
440
441 void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
442 unsigned long address)
443 {
444 /*
445 * We track page size in pte only for DD1, So we can
446 * call this only on DD1.
447 */
448 if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
449 VM_WARN_ON(1);
450 return;
451 }
452
453 if (old_pte & _PAGE_LARGE)
454 radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
455 else
456 radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
457 }