]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/powerpc/mm/tlb-radix.c
Merge tag 'clk-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / mm / tlb-radix.c
1 /*
2 * TLB flush routines for radix kernels.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #include <linux/mm.h>
13 #include <linux/hugetlb.h>
14 #include <linux/memblock.h>
15 #include <asm/ppc-opcode.h>
16
17 #include <asm/tlb.h>
18 #include <asm/tlbflush.h>
19
20 static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
21
22 #define RIC_FLUSH_TLB 0
23 #define RIC_FLUSH_PWC 1
24 #define RIC_FLUSH_ALL 2
25
26 static inline void __tlbiel_pid(unsigned long pid, int set,
27 unsigned long ric)
28 {
29 unsigned long rb,rs,prs,r;
30
31 rb = PPC_BIT(53); /* IS = 1 */
32 rb |= set << PPC_BITLSHIFT(51);
33 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
34 prs = 1; /* process scoped */
35 r = 1; /* raidx format */
36
37 asm volatile("ptesync": : :"memory");
38 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
39 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
40 asm volatile("ptesync": : :"memory");
41 }
42
43 /*
44 * We use 128 set in radix mode and 256 set in hpt mode.
45 */
46 static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
47 {
48 int set;
49
50 for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
51 __tlbiel_pid(pid, set, ric);
52 }
53 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
54 asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
55 return;
56 }
57
58 static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
59 {
60 unsigned long rb,rs,prs,r;
61
62 rb = PPC_BIT(53); /* IS = 1 */
63 rs = pid << PPC_BITLSHIFT(31);
64 prs = 1; /* process scoped */
65 r = 1; /* raidx format */
66
67 asm volatile("ptesync": : :"memory");
68 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
69 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
70 asm volatile("eieio; tlbsync; ptesync": : :"memory");
71 }
72
73 static inline void _tlbiel_va(unsigned long va, unsigned long pid,
74 unsigned long ap, unsigned long ric)
75 {
76 unsigned long rb,rs,prs,r;
77
78 rb = va & ~(PPC_BITMASK(52, 63));
79 rb |= ap << PPC_BITLSHIFT(58);
80 rs = pid << PPC_BITLSHIFT(31);
81 prs = 1; /* process scoped */
82 r = 1; /* raidx format */
83
84 asm volatile("ptesync": : :"memory");
85 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
86 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
87 asm volatile("ptesync": : :"memory");
88 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
89 asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
90 }
91
92 static inline void _tlbie_va(unsigned long va, unsigned long pid,
93 unsigned long ap, unsigned long ric)
94 {
95 unsigned long rb,rs,prs,r;
96
97 rb = va & ~(PPC_BITMASK(52, 63));
98 rb |= ap << PPC_BITLSHIFT(58);
99 rs = pid << PPC_BITLSHIFT(31);
100 prs = 1; /* process scoped */
101 r = 1; /* raidx format */
102
103 asm volatile("ptesync": : :"memory");
104 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
105 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
106 asm volatile("eieio; tlbsync; ptesync": : :"memory");
107 }
108
109 /*
110 * Base TLB flushing operations:
111 *
112 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
113 * - flush_tlb_page(vma, vmaddr) flushes one page
114 * - flush_tlb_range(vma, start, end) flushes a range of pages
115 * - flush_tlb_kernel_range(start, end) flushes kernel pages
116 *
117 * - local_* variants of page and mm only apply to the current
118 * processor
119 */
120 void radix__local_flush_tlb_mm(struct mm_struct *mm)
121 {
122 unsigned long pid;
123
124 preempt_disable();
125 pid = mm->context.id;
126 if (pid != MMU_NO_CONTEXT)
127 _tlbiel_pid(pid, RIC_FLUSH_ALL);
128 preempt_enable();
129 }
130 EXPORT_SYMBOL(radix__local_flush_tlb_mm);
131
132 void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
133 {
134 unsigned long pid;
135 struct mm_struct *mm = tlb->mm;
136
137 preempt_disable();
138
139 pid = mm->context.id;
140 if (pid != MMU_NO_CONTEXT)
141 _tlbiel_pid(pid, RIC_FLUSH_PWC);
142
143 preempt_enable();
144 }
145 EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
146
147 void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
148 int psize)
149 {
150 unsigned long pid;
151 unsigned long ap = mmu_get_ap(psize);
152
153 preempt_disable();
154 pid = mm ? mm->context.id : 0;
155 if (pid != MMU_NO_CONTEXT)
156 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
157 preempt_enable();
158 }
159
160 void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
161 {
162 #ifdef CONFIG_HUGETLB_PAGE
163 /* need the return fix for nohash.c */
164 if (vma && is_vm_hugetlb_page(vma))
165 return __local_flush_hugetlb_page(vma, vmaddr);
166 #endif
167 radix__local_flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
168 mmu_virtual_psize);
169 }
170 EXPORT_SYMBOL(radix__local_flush_tlb_page);
171
172 #ifdef CONFIG_SMP
173 void radix__flush_tlb_mm(struct mm_struct *mm)
174 {
175 unsigned long pid;
176
177 preempt_disable();
178 pid = mm->context.id;
179 if (unlikely(pid == MMU_NO_CONTEXT))
180 goto no_context;
181
182 if (!mm_is_thread_local(mm)) {
183 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
184
185 if (lock_tlbie)
186 raw_spin_lock(&native_tlbie_lock);
187 _tlbie_pid(pid, RIC_FLUSH_ALL);
188 if (lock_tlbie)
189 raw_spin_unlock(&native_tlbie_lock);
190 } else
191 _tlbiel_pid(pid, RIC_FLUSH_ALL);
192 no_context:
193 preempt_enable();
194 }
195 EXPORT_SYMBOL(radix__flush_tlb_mm);
196
197 void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
198 {
199 unsigned long pid;
200 struct mm_struct *mm = tlb->mm;
201
202 preempt_disable();
203
204 pid = mm->context.id;
205 if (unlikely(pid == MMU_NO_CONTEXT))
206 goto no_context;
207
208 if (!mm_is_thread_local(mm)) {
209 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
210
211 if (lock_tlbie)
212 raw_spin_lock(&native_tlbie_lock);
213 _tlbie_pid(pid, RIC_FLUSH_PWC);
214 if (lock_tlbie)
215 raw_spin_unlock(&native_tlbie_lock);
216 } else
217 _tlbiel_pid(pid, RIC_FLUSH_PWC);
218 no_context:
219 preempt_enable();
220 }
221 EXPORT_SYMBOL(radix__flush_tlb_pwc);
222
223 void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
224 int psize)
225 {
226 unsigned long pid;
227 unsigned long ap = mmu_get_ap(psize);
228
229 preempt_disable();
230 pid = mm ? mm->context.id : 0;
231 if (unlikely(pid == MMU_NO_CONTEXT))
232 goto bail;
233 if (!mm_is_thread_local(mm)) {
234 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
235
236 if (lock_tlbie)
237 raw_spin_lock(&native_tlbie_lock);
238 _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
239 if (lock_tlbie)
240 raw_spin_unlock(&native_tlbie_lock);
241 } else
242 _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
243 bail:
244 preempt_enable();
245 }
246
247 void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
248 {
249 #ifdef CONFIG_HUGETLB_PAGE
250 if (vma && is_vm_hugetlb_page(vma))
251 return flush_hugetlb_page(vma, vmaddr);
252 #endif
253 radix__flush_tlb_page_psize(vma ? vma->vm_mm : NULL, vmaddr,
254 mmu_virtual_psize);
255 }
256 EXPORT_SYMBOL(radix__flush_tlb_page);
257
258 #endif /* CONFIG_SMP */
259
260 void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
261 {
262 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
263
264 if (lock_tlbie)
265 raw_spin_lock(&native_tlbie_lock);
266 _tlbie_pid(0, RIC_FLUSH_ALL);
267 if (lock_tlbie)
268 raw_spin_unlock(&native_tlbie_lock);
269 }
270 EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
271
272 /*
273 * Currently, for range flushing, we just do a full mm flush. Because
274 * we use this in code path where we don' track the page size.
275 */
276 void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
277 unsigned long end)
278
279 {
280 struct mm_struct *mm = vma->vm_mm;
281 radix__flush_tlb_mm(mm);
282 }
283 EXPORT_SYMBOL(radix__flush_tlb_range);
284
285 static int radix_get_mmu_psize(int page_size)
286 {
287 int psize;
288
289 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
290 psize = mmu_virtual_psize;
291 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
292 psize = MMU_PAGE_2M;
293 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
294 psize = MMU_PAGE_1G;
295 else
296 return -1;
297 return psize;
298 }
299
300 void radix__tlb_flush(struct mmu_gather *tlb)
301 {
302 int psize = 0;
303 struct mm_struct *mm = tlb->mm;
304 int page_size = tlb->page_size;
305
306 psize = radix_get_mmu_psize(page_size);
307 /*
308 * if page size is not something we understand, do a full mm flush
309 */
310 if (psize != -1 && !tlb->fullmm && !tlb->need_flush_all)
311 radix__flush_tlb_range_psize(mm, tlb->start, tlb->end, psize);
312 else
313 radix__flush_tlb_mm(mm);
314 }
315
316 #define TLB_FLUSH_ALL -1UL
317 /*
318 * Number of pages above which we will do a bcast tlbie. Just a
319 * number at this point copied from x86
320 */
321 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
322
323 void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
324 unsigned long end, int psize)
325 {
326 unsigned long pid;
327 unsigned long addr;
328 int local = mm_is_thread_local(mm);
329 unsigned long ap = mmu_get_ap(psize);
330 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
331 unsigned long page_size = 1UL << mmu_psize_defs[psize].shift;
332
333
334 preempt_disable();
335 pid = mm ? mm->context.id : 0;
336 if (unlikely(pid == MMU_NO_CONTEXT))
337 goto err_out;
338
339 if (end == TLB_FLUSH_ALL ||
340 (end - start) > tlb_single_page_flush_ceiling * page_size) {
341 if (local)
342 _tlbiel_pid(pid, RIC_FLUSH_TLB);
343 else
344 _tlbie_pid(pid, RIC_FLUSH_TLB);
345 goto err_out;
346 }
347 for (addr = start; addr < end; addr += page_size) {
348
349 if (local)
350 _tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
351 else {
352 if (lock_tlbie)
353 raw_spin_lock(&native_tlbie_lock);
354 _tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
355 if (lock_tlbie)
356 raw_spin_unlock(&native_tlbie_lock);
357 }
358 }
359 err_out:
360 preempt_enable();
361 }
362
363 void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
364 unsigned long page_size)
365 {
366 unsigned long rb,rs,prs,r;
367 unsigned long ap;
368 unsigned long ric = RIC_FLUSH_TLB;
369
370 ap = mmu_get_ap(radix_get_mmu_psize(page_size));
371 rb = gpa & ~(PPC_BITMASK(52, 63));
372 rb |= ap << PPC_BITLSHIFT(58);
373 rs = lpid & ((1UL << 32) - 1);
374 prs = 0; /* process scoped */
375 r = 1; /* raidx format */
376
377 asm volatile("ptesync": : :"memory");
378 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
379 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
380 asm volatile("eieio; tlbsync; ptesync": : :"memory");
381 }
382 EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
383
384 void radix__flush_tlb_lpid(unsigned long lpid)
385 {
386 unsigned long rb,rs,prs,r;
387 unsigned long ric = RIC_FLUSH_ALL;
388
389 rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
390 rs = lpid & ((1UL << 32) - 1);
391 prs = 0; /* partition scoped */
392 r = 1; /* raidx format */
393
394 asm volatile("ptesync": : :"memory");
395 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
396 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
397 asm volatile("eieio; tlbsync; ptesync": : :"memory");
398 }
399 EXPORT_SYMBOL(radix__flush_tlb_lpid);
400
401 void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
402 unsigned long start, unsigned long end)
403 {
404 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
405 }
406 EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
407
408 void radix__flush_tlb_all(void)
409 {
410 unsigned long rb,prs,r,rs;
411 unsigned long ric = RIC_FLUSH_ALL;
412
413 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
414 prs = 0; /* partition scoped */
415 r = 1; /* raidx format */
416 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
417
418 asm volatile("ptesync": : :"memory");
419 /*
420 * now flush guest entries by passing PRS = 1 and LPID != 0
421 */
422 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
423 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
424 /*
425 * now flush host entires by passing PRS = 0 and LPID == 0
426 */
427 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
428 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
429 asm volatile("eieio; tlbsync; ptesync": : :"memory");
430 }
431
432 void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
433 unsigned long address)
434 {
435 /*
436 * We track page size in pte only for DD1, So we can
437 * call this only on DD1.
438 */
439 if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
440 VM_WARN_ON(1);
441 return;
442 }
443
444 if (old_pte & _PAGE_LARGE)
445 radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
446 else
447 radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
448 }