]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - arch/powerpc/mm/tlb-radix.c
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux
[mirror_ubuntu-eoan-kernel.git] / arch / powerpc / mm / tlb-radix.c
1 /*
2 * TLB flush routines for radix kernels.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #include <linux/mm.h>
13 #include <linux/hugetlb.h>
14 #include <linux/memblock.h>
15
16 #include <asm/ppc-opcode.h>
17 #include <asm/tlb.h>
18 #include <asm/tlbflush.h>
19 #include <asm/trace.h>
20 #include <asm/cputhreads.h>
21
22 #define RIC_FLUSH_TLB 0
23 #define RIC_FLUSH_PWC 1
24 #define RIC_FLUSH_ALL 2
25
26 /*
27 * tlbiel instruction for radix, set invalidation
28 * i.e., r=1 and is=01 or is=10 or is=11
29 */
30 static inline void tlbiel_radix_set_isa300(unsigned int set, unsigned int is,
31 unsigned int pid,
32 unsigned int ric, unsigned int prs)
33 {
34 unsigned long rb;
35 unsigned long rs;
36 unsigned int r = 1; /* radix format */
37
38 rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
39 rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
40
41 asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
42 : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "r"(r)
43 : "memory");
44 }
45
46 static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
47 {
48 unsigned int set;
49
50 asm volatile("ptesync": : :"memory");
51
52 /*
53 * Flush the first set of the TLB, and the entire Page Walk Cache
54 * and partition table entries. Then flush the remaining sets of the
55 * TLB.
56 */
57 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 0);
58 for (set = 1; set < num_sets; set++)
59 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 0);
60
61 /* Do the same for process scoped entries. */
62 tlbiel_radix_set_isa300(0, is, 0, RIC_FLUSH_ALL, 1);
63 for (set = 1; set < num_sets; set++)
64 tlbiel_radix_set_isa300(set, is, 0, RIC_FLUSH_TLB, 1);
65
66 asm volatile("ptesync": : :"memory");
67 }
68
69 void radix__tlbiel_all(unsigned int action)
70 {
71 unsigned int is;
72
73 switch (action) {
74 case TLB_INVAL_SCOPE_GLOBAL:
75 is = 3;
76 break;
77 case TLB_INVAL_SCOPE_LPID:
78 is = 2;
79 break;
80 default:
81 BUG();
82 }
83
84 if (early_cpu_has_feature(CPU_FTR_ARCH_300))
85 tlbiel_all_isa300(POWER9_TLB_SETS_RADIX, is);
86 else
87 WARN(1, "%s called on pre-POWER9 CPU\n", __func__);
88
89 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
90 }
91
92 static inline void __tlbiel_pid(unsigned long pid, int set,
93 unsigned long ric)
94 {
95 unsigned long rb,rs,prs,r;
96
97 rb = PPC_BIT(53); /* IS = 1 */
98 rb |= set << PPC_BITLSHIFT(51);
99 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
100 prs = 1; /* process scoped */
101 r = 1; /* raidx format */
102
103 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
104 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
105 trace_tlbie(0, 1, rb, rs, ric, prs, r);
106 }
107
108 static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
109 {
110 unsigned long rb,rs,prs,r;
111
112 rb = PPC_BIT(53); /* IS = 1 */
113 rs = pid << PPC_BITLSHIFT(31);
114 prs = 1; /* process scoped */
115 r = 1; /* raidx format */
116
117 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
118 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
119 trace_tlbie(0, 0, rb, rs, ric, prs, r);
120 }
121
122 /*
123 * We use 128 set in radix mode and 256 set in hpt mode.
124 */
125 static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
126 {
127 int set;
128
129 asm volatile("ptesync": : :"memory");
130
131 /*
132 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
133 * also flush the entire Page Walk Cache.
134 */
135 __tlbiel_pid(pid, 0, ric);
136
137 /* For PWC, only one flush is needed */
138 if (ric == RIC_FLUSH_PWC) {
139 asm volatile("ptesync": : :"memory");
140 return;
141 }
142
143 /* For the remaining sets, just flush the TLB */
144 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
145 __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
146
147 asm volatile("ptesync": : :"memory");
148 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
149 }
150
151 static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
152 {
153 asm volatile("ptesync": : :"memory");
154 __tlbie_pid(pid, ric);
155 asm volatile("eieio; tlbsync; ptesync": : :"memory");
156 }
157
158 static inline void __tlbiel_va(unsigned long va, unsigned long pid,
159 unsigned long ap, unsigned long ric)
160 {
161 unsigned long rb,rs,prs,r;
162
163 rb = va & ~(PPC_BITMASK(52, 63));
164 rb |= ap << PPC_BITLSHIFT(58);
165 rs = pid << PPC_BITLSHIFT(31);
166 prs = 1; /* process scoped */
167 r = 1; /* raidx format */
168
169 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
170 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
171 trace_tlbie(0, 1, rb, rs, ric, prs, r);
172 }
173
174 static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
175 unsigned long pid, unsigned long page_size,
176 unsigned long psize)
177 {
178 unsigned long addr;
179 unsigned long ap = mmu_get_ap(psize);
180
181 for (addr = start; addr < end; addr += page_size)
182 __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
183 }
184
185 static inline void _tlbiel_va(unsigned long va, unsigned long pid,
186 unsigned long psize, unsigned long ric)
187 {
188 unsigned long ap = mmu_get_ap(psize);
189
190 asm volatile("ptesync": : :"memory");
191 __tlbiel_va(va, pid, ap, ric);
192 asm volatile("ptesync": : :"memory");
193 }
194
195 static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
196 unsigned long pid, unsigned long page_size,
197 unsigned long psize, bool also_pwc)
198 {
199 asm volatile("ptesync": : :"memory");
200 if (also_pwc)
201 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
202 __tlbiel_va_range(start, end, pid, page_size, psize);
203 asm volatile("ptesync": : :"memory");
204 }
205
206 static inline void __tlbie_va(unsigned long va, unsigned long pid,
207 unsigned long ap, unsigned long ric)
208 {
209 unsigned long rb,rs,prs,r;
210
211 rb = va & ~(PPC_BITMASK(52, 63));
212 rb |= ap << PPC_BITLSHIFT(58);
213 rs = pid << PPC_BITLSHIFT(31);
214 prs = 1; /* process scoped */
215 r = 1; /* raidx format */
216
217 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
218 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
219 trace_tlbie(0, 0, rb, rs, ric, prs, r);
220 }
221
222 static inline void __tlbie_va_range(unsigned long start, unsigned long end,
223 unsigned long pid, unsigned long page_size,
224 unsigned long psize)
225 {
226 unsigned long addr;
227 unsigned long ap = mmu_get_ap(psize);
228
229 for (addr = start; addr < end; addr += page_size)
230 __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
231 }
232
233 static inline void _tlbie_va(unsigned long va, unsigned long pid,
234 unsigned long psize, unsigned long ric)
235 {
236 unsigned long ap = mmu_get_ap(psize);
237
238 asm volatile("ptesync": : :"memory");
239 __tlbie_va(va, pid, ap, ric);
240 asm volatile("eieio; tlbsync; ptesync": : :"memory");
241 }
242
243 static inline void _tlbie_va_range(unsigned long start, unsigned long end,
244 unsigned long pid, unsigned long page_size,
245 unsigned long psize, bool also_pwc)
246 {
247 asm volatile("ptesync": : :"memory");
248 if (also_pwc)
249 __tlbie_pid(pid, RIC_FLUSH_PWC);
250 __tlbie_va_range(start, end, pid, page_size, psize);
251 asm volatile("eieio; tlbsync; ptesync": : :"memory");
252 }
253
254 /*
255 * Base TLB flushing operations:
256 *
257 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
258 * - flush_tlb_page(vma, vmaddr) flushes one page
259 * - flush_tlb_range(vma, start, end) flushes a range of pages
260 * - flush_tlb_kernel_range(start, end) flushes kernel pages
261 *
262 * - local_* variants of page and mm only apply to the current
263 * processor
264 */
265 void radix__local_flush_tlb_mm(struct mm_struct *mm)
266 {
267 unsigned long pid;
268
269 preempt_disable();
270 pid = mm->context.id;
271 if (pid != MMU_NO_CONTEXT)
272 _tlbiel_pid(pid, RIC_FLUSH_TLB);
273 preempt_enable();
274 }
275 EXPORT_SYMBOL(radix__local_flush_tlb_mm);
276
277 #ifndef CONFIG_SMP
278 void radix__local_flush_all_mm(struct mm_struct *mm)
279 {
280 unsigned long pid;
281
282 preempt_disable();
283 pid = mm->context.id;
284 if (pid != MMU_NO_CONTEXT)
285 _tlbiel_pid(pid, RIC_FLUSH_ALL);
286 preempt_enable();
287 }
288 EXPORT_SYMBOL(radix__local_flush_all_mm);
289 #endif /* CONFIG_SMP */
290
291 void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
292 int psize)
293 {
294 unsigned long pid;
295
296 preempt_disable();
297 pid = mm->context.id;
298 if (pid != MMU_NO_CONTEXT)
299 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
300 preempt_enable();
301 }
302
303 void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
304 {
305 #ifdef CONFIG_HUGETLB_PAGE
306 /* need the return fix for nohash.c */
307 if (is_vm_hugetlb_page(vma))
308 return radix__local_flush_hugetlb_page(vma, vmaddr);
309 #endif
310 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
311 }
312 EXPORT_SYMBOL(radix__local_flush_tlb_page);
313
314 #ifdef CONFIG_SMP
315 void radix__flush_tlb_mm(struct mm_struct *mm)
316 {
317 unsigned long pid;
318
319 pid = mm->context.id;
320 if (unlikely(pid == MMU_NO_CONTEXT))
321 return;
322
323 preempt_disable();
324 if (!mm_is_thread_local(mm))
325 _tlbie_pid(pid, RIC_FLUSH_TLB);
326 else
327 _tlbiel_pid(pid, RIC_FLUSH_TLB);
328 preempt_enable();
329 }
330 EXPORT_SYMBOL(radix__flush_tlb_mm);
331
332 void radix__flush_all_mm(struct mm_struct *mm)
333 {
334 unsigned long pid;
335
336 pid = mm->context.id;
337 if (unlikely(pid == MMU_NO_CONTEXT))
338 return;
339
340 preempt_disable();
341 if (!mm_is_thread_local(mm))
342 _tlbie_pid(pid, RIC_FLUSH_ALL);
343 else
344 _tlbiel_pid(pid, RIC_FLUSH_ALL);
345 preempt_enable();
346 }
347 EXPORT_SYMBOL(radix__flush_all_mm);
348
349 void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
350 {
351 tlb->need_flush_all = 1;
352 }
353 EXPORT_SYMBOL(radix__flush_tlb_pwc);
354
355 void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
356 int psize)
357 {
358 unsigned long pid;
359
360 pid = mm->context.id;
361 if (unlikely(pid == MMU_NO_CONTEXT))
362 return;
363
364 preempt_disable();
365 if (!mm_is_thread_local(mm))
366 _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
367 else
368 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
369 preempt_enable();
370 }
371
372 void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
373 {
374 #ifdef CONFIG_HUGETLB_PAGE
375 if (is_vm_hugetlb_page(vma))
376 return radix__flush_hugetlb_page(vma, vmaddr);
377 #endif
378 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
379 }
380 EXPORT_SYMBOL(radix__flush_tlb_page);
381
382 #else /* CONFIG_SMP */
383 #define radix__flush_all_mm radix__local_flush_all_mm
384 #endif /* CONFIG_SMP */
385
386 void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
387 {
388 _tlbie_pid(0, RIC_FLUSH_ALL);
389 }
390 EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
391
392 #define TLB_FLUSH_ALL -1UL
393
394 /*
395 * Number of pages above which we invalidate the entire PID rather than
396 * flush individual pages, for local and global flushes respectively.
397 *
398 * tlbie goes out to the interconnect and individual ops are more costly.
399 * It also does not iterate over sets like the local tlbiel variant when
400 * invalidating a full PID, so it has a far lower threshold to change from
401 * individual page flushes to full-pid flushes.
402 */
403 static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
404 static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
405
406 void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
407 unsigned long end)
408
409 {
410 struct mm_struct *mm = vma->vm_mm;
411 unsigned long pid;
412 unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
413 unsigned long page_size = 1UL << page_shift;
414 unsigned long nr_pages = (end - start) >> page_shift;
415 bool local, full;
416
417 #ifdef CONFIG_HUGETLB_PAGE
418 if (is_vm_hugetlb_page(vma))
419 return radix__flush_hugetlb_tlb_range(vma, start, end);
420 #endif
421
422 pid = mm->context.id;
423 if (unlikely(pid == MMU_NO_CONTEXT))
424 return;
425
426 preempt_disable();
427 if (mm_is_thread_local(mm)) {
428 local = true;
429 full = (end == TLB_FLUSH_ALL ||
430 nr_pages > tlb_local_single_page_flush_ceiling);
431 } else {
432 local = false;
433 full = (end == TLB_FLUSH_ALL ||
434 nr_pages > tlb_single_page_flush_ceiling);
435 }
436
437 if (full) {
438 if (local)
439 _tlbiel_pid(pid, RIC_FLUSH_TLB);
440 else
441 _tlbie_pid(pid, RIC_FLUSH_TLB);
442 } else {
443 bool hflush = false;
444 unsigned long hstart, hend;
445
446 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
447 hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT;
448 hend = end >> HPAGE_PMD_SHIFT;
449 if (hstart < hend) {
450 hstart <<= HPAGE_PMD_SHIFT;
451 hend <<= HPAGE_PMD_SHIFT;
452 hflush = true;
453 }
454 #endif
455
456 asm volatile("ptesync": : :"memory");
457 if (local) {
458 __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
459 if (hflush)
460 __tlbiel_va_range(hstart, hend, pid,
461 HPAGE_PMD_SIZE, MMU_PAGE_2M);
462 asm volatile("ptesync": : :"memory");
463 } else {
464 __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
465 if (hflush)
466 __tlbie_va_range(hstart, hend, pid,
467 HPAGE_PMD_SIZE, MMU_PAGE_2M);
468 asm volatile("eieio; tlbsync; ptesync": : :"memory");
469 }
470 }
471 preempt_enable();
472 }
473 EXPORT_SYMBOL(radix__flush_tlb_range);
474
475 static int radix_get_mmu_psize(int page_size)
476 {
477 int psize;
478
479 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
480 psize = mmu_virtual_psize;
481 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
482 psize = MMU_PAGE_2M;
483 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
484 psize = MMU_PAGE_1G;
485 else
486 return -1;
487 return psize;
488 }
489
490 static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
491 unsigned long end, int psize);
492
493 void radix__tlb_flush(struct mmu_gather *tlb)
494 {
495 int psize = 0;
496 struct mm_struct *mm = tlb->mm;
497 int page_size = tlb->page_size;
498
499 /*
500 * if page size is not something we understand, do a full mm flush
501 *
502 * A "fullmm" flush must always do a flush_all_mm (RIC=2) flush
503 * that flushes the process table entry cache upon process teardown.
504 * See the comment for radix in arch_exit_mmap().
505 */
506 if (tlb->fullmm) {
507 radix__flush_all_mm(mm);
508 } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
509 if (!tlb->need_flush_all)
510 radix__flush_tlb_mm(mm);
511 else
512 radix__flush_all_mm(mm);
513 } else {
514 unsigned long start = tlb->start;
515 unsigned long end = tlb->end;
516
517 if (!tlb->need_flush_all)
518 radix__flush_tlb_range_psize(mm, start, end, psize);
519 else
520 radix__flush_tlb_pwc_range_psize(mm, start, end, psize);
521 }
522 tlb->need_flush_all = 0;
523 }
524
525 static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
526 unsigned long start, unsigned long end,
527 int psize, bool also_pwc)
528 {
529 unsigned long pid;
530 unsigned int page_shift = mmu_psize_defs[psize].shift;
531 unsigned long page_size = 1UL << page_shift;
532 unsigned long nr_pages = (end - start) >> page_shift;
533 bool local, full;
534
535 pid = mm->context.id;
536 if (unlikely(pid == MMU_NO_CONTEXT))
537 return;
538
539 preempt_disable();
540 if (mm_is_thread_local(mm)) {
541 local = true;
542 full = (end == TLB_FLUSH_ALL ||
543 nr_pages > tlb_local_single_page_flush_ceiling);
544 } else {
545 local = false;
546 full = (end == TLB_FLUSH_ALL ||
547 nr_pages > tlb_single_page_flush_ceiling);
548 }
549
550 if (full) {
551 if (local)
552 _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
553 else
554 _tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL: RIC_FLUSH_TLB);
555 } else {
556 if (local)
557 _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc);
558 else
559 _tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
560 }
561 preempt_enable();
562 }
563
564 void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
565 unsigned long end, int psize)
566 {
567 return __radix__flush_tlb_range_psize(mm, start, end, psize, false);
568 }
569
570 static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
571 unsigned long end, int psize)
572 {
573 __radix__flush_tlb_range_psize(mm, start, end, psize, true);
574 }
575
576 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
577 void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
578 {
579 unsigned long pid, end;
580
581 pid = mm->context.id;
582 if (unlikely(pid == MMU_NO_CONTEXT))
583 return;
584
585 /* 4k page size, just blow the world */
586 if (PAGE_SIZE == 0x1000) {
587 radix__flush_all_mm(mm);
588 return;
589 }
590
591 end = addr + HPAGE_PMD_SIZE;
592
593 /* Otherwise first do the PWC, then iterate the pages. */
594 preempt_disable();
595
596 if (mm_is_thread_local(mm)) {
597 _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
598 } else {
599 _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
600 }
601
602 preempt_enable();
603 }
604 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
605
606 void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
607 unsigned long page_size)
608 {
609 unsigned long rb,rs,prs,r;
610 unsigned long ap;
611 unsigned long ric = RIC_FLUSH_TLB;
612
613 ap = mmu_get_ap(radix_get_mmu_psize(page_size));
614 rb = gpa & ~(PPC_BITMASK(52, 63));
615 rb |= ap << PPC_BITLSHIFT(58);
616 rs = lpid & ((1UL << 32) - 1);
617 prs = 0; /* process scoped */
618 r = 1; /* raidx format */
619
620 asm volatile("ptesync": : :"memory");
621 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
622 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
623 asm volatile("eieio; tlbsync; ptesync": : :"memory");
624 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
625 }
626 EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
627
628 void radix__flush_tlb_lpid(unsigned long lpid)
629 {
630 unsigned long rb,rs,prs,r;
631 unsigned long ric = RIC_FLUSH_ALL;
632
633 rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
634 rs = lpid & ((1UL << 32) - 1);
635 prs = 0; /* partition scoped */
636 r = 1; /* raidx format */
637
638 asm volatile("ptesync": : :"memory");
639 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
640 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
641 asm volatile("eieio; tlbsync; ptesync": : :"memory");
642 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
643 }
644 EXPORT_SYMBOL(radix__flush_tlb_lpid);
645
646 void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
647 unsigned long start, unsigned long end)
648 {
649 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
650 }
651 EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
652
653 void radix__flush_tlb_all(void)
654 {
655 unsigned long rb,prs,r,rs;
656 unsigned long ric = RIC_FLUSH_ALL;
657
658 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
659 prs = 0; /* partition scoped */
660 r = 1; /* raidx format */
661 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
662
663 asm volatile("ptesync": : :"memory");
664 /*
665 * now flush guest entries by passing PRS = 1 and LPID != 0
666 */
667 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
668 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
669 /*
670 * now flush host entires by passing PRS = 0 and LPID == 0
671 */
672 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
673 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
674 asm volatile("eieio; tlbsync; ptesync": : :"memory");
675 }
676
677 void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
678 unsigned long address)
679 {
680 /*
681 * We track page size in pte only for DD1, So we can
682 * call this only on DD1.
683 */
684 if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
685 VM_WARN_ON(1);
686 return;
687 }
688
689 if (old_pte & R_PAGE_LARGE)
690 radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
691 else
692 radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
693 }
694
695 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
696 extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
697 {
698 unsigned int pid = mm->context.id;
699
700 if (unlikely(pid == MMU_NO_CONTEXT))
701 return;
702
703 /*
704 * If this context hasn't run on that CPU before and KVM is
705 * around, there's a slim chance that the guest on another
706 * CPU just brought in obsolete translation into the TLB of
707 * this CPU due to a bad prefetch using the guest PID on
708 * the way into the hypervisor.
709 *
710 * We work around this here. If KVM is possible, we check if
711 * any sibling thread is in KVM. If it is, the window may exist
712 * and thus we flush that PID from the core.
713 *
714 * A potential future improvement would be to mark which PIDs
715 * have never been used on the system and avoid it if the PID
716 * is new and the process has no other cpumask bit set.
717 */
718 if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
719 int cpu = smp_processor_id();
720 int sib = cpu_first_thread_sibling(cpu);
721 bool flush = false;
722
723 for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
724 if (sib == cpu)
725 continue;
726 if (paca[sib].kvm_hstate.kvm_vcpu)
727 flush = true;
728 }
729 if (flush)
730 _tlbiel_pid(pid, RIC_FLUSH_ALL);
731 }
732 }
733 EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
734 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */