]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/mm/tlb-radix.c
powerpc/64s/radix: Improve TLB flushing for page table freeing
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / mm / tlb-radix.c
CommitLineData
1a472c9d
AK
1/*
2 * TLB flush routines for radix kernels.
3 *
4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/mm.h>
13#include <linux/hugetlb.h>
14#include <linux/memblock.h>
15
a25bd72b 16#include <asm/ppc-opcode.h>
1a472c9d
AK
17#include <asm/tlb.h>
18#include <asm/tlbflush.h>
0428491c 19#include <asm/trace.h>
a25bd72b 20#include <asm/cputhreads.h>
1a472c9d 21
36194812
AK
22#define RIC_FLUSH_TLB 0
23#define RIC_FLUSH_PWC 1
24#define RIC_FLUSH_ALL 2
25
26static inline void __tlbiel_pid(unsigned long pid, int set,
27 unsigned long ric)
1a472c9d 28{
36194812 29 unsigned long rb,rs,prs,r;
1a472c9d
AK
30
31 rb = PPC_BIT(53); /* IS = 1 */
32 rb |= set << PPC_BITLSHIFT(51);
33 rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
34 prs = 1; /* process scoped */
35 r = 1; /* raidx format */
1a472c9d 36
8cd6d3c2 37 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
1a472c9d 38 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
0428491c 39 trace_tlbie(0, 1, rb, rs, ric, prs, r);
1a472c9d
AK
40}
41
0b2f5a8a
NP
42static inline void __tlbie_pid(unsigned long pid, unsigned long ric)
43{
44 unsigned long rb,rs,prs,r;
45
46 rb = PPC_BIT(53); /* IS = 1 */
47 rs = pid << PPC_BITLSHIFT(31);
48 prs = 1; /* process scoped */
49 r = 1; /* raidx format */
50
51 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
52 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
53 trace_tlbie(0, 0, rb, rs, ric, prs, r);
54}
55
1a472c9d
AK
56/*
57 * We use 128 set in radix mode and 256 set in hpt mode.
58 */
36194812 59static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
1a472c9d
AK
60{
61 int set;
62
f7327e0b 63 asm volatile("ptesync": : :"memory");
a5998fcb
AK
64
65 /*
66 * Flush the first set of the TLB, and if we're doing a RIC_FLUSH_ALL,
67 * also flush the entire Page Walk Cache.
68 */
69 __tlbiel_pid(pid, 0, ric);
70
5ce5fe14
BH
71 /* For PWC, only one flush is needed */
72 if (ric == RIC_FLUSH_PWC) {
73 asm volatile("ptesync": : :"memory");
74 return;
75 }
a5998fcb 76
5ce5fe14 77 /* For the remaining sets, just flush the TLB */
a5998fcb 78 for (set = 1; set < POWER9_TLB_SETS_RADIX ; set++)
5ce5fe14 79 __tlbiel_pid(pid, set, RIC_FLUSH_TLB);
a5998fcb 80
f7327e0b 81 asm volatile("ptesync": : :"memory");
90c1e3c2 82 asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
1a472c9d
AK
83}
84
36194812 85static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
1a472c9d 86{
1a472c9d 87 asm volatile("ptesync": : :"memory");
0b2f5a8a 88 __tlbie_pid(pid, ric);
1a472c9d
AK
89 asm volatile("eieio; tlbsync; ptesync": : :"memory");
90}
91
14001c60 92static inline void __tlbiel_va(unsigned long va, unsigned long pid,
d665767e 93 unsigned long ap, unsigned long ric)
1a472c9d 94{
36194812 95 unsigned long rb,rs,prs,r;
1a472c9d
AK
96
97 rb = va & ~(PPC_BITMASK(52, 63));
98 rb |= ap << PPC_BITLSHIFT(58);
99 rs = pid << PPC_BITLSHIFT(31);
100 prs = 1; /* process scoped */
101 r = 1; /* raidx format */
1a472c9d 102
8cd6d3c2 103 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
1a472c9d 104 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
0428491c 105 trace_tlbie(0, 1, rb, rs, ric, prs, r);
1a472c9d
AK
106}
107
cbf09c83
NP
108static inline void __tlbiel_va_range(unsigned long start, unsigned long end,
109 unsigned long pid, unsigned long page_size,
110 unsigned long psize)
111{
112 unsigned long addr;
113 unsigned long ap = mmu_get_ap(psize);
114
115 for (addr = start; addr < end; addr += page_size)
116 __tlbiel_va(addr, pid, ap, RIC_FLUSH_TLB);
117}
118
14001c60 119static inline void _tlbiel_va(unsigned long va, unsigned long pid,
d665767e 120 unsigned long psize, unsigned long ric)
14001c60 121{
d665767e
NP
122 unsigned long ap = mmu_get_ap(psize);
123
14001c60
NP
124 asm volatile("ptesync": : :"memory");
125 __tlbiel_va(va, pid, ap, ric);
126 asm volatile("ptesync": : :"memory");
127}
128
d665767e
NP
129static inline void _tlbiel_va_range(unsigned long start, unsigned long end,
130 unsigned long pid, unsigned long page_size,
0b2f5a8a 131 unsigned long psize, bool also_pwc)
d665767e 132{
d665767e 133 asm volatile("ptesync": : :"memory");
0b2f5a8a
NP
134 if (also_pwc)
135 __tlbiel_pid(pid, 0, RIC_FLUSH_PWC);
cbf09c83 136 __tlbiel_va_range(start, end, pid, page_size, psize);
d665767e
NP
137 asm volatile("ptesync": : :"memory");
138}
139
14001c60 140static inline void __tlbie_va(unsigned long va, unsigned long pid,
36194812 141 unsigned long ap, unsigned long ric)
1a472c9d 142{
36194812 143 unsigned long rb,rs,prs,r;
1a472c9d
AK
144
145 rb = va & ~(PPC_BITMASK(52, 63));
146 rb |= ap << PPC_BITLSHIFT(58);
147 rs = pid << PPC_BITLSHIFT(31);
148 prs = 1; /* process scoped */
149 r = 1; /* raidx format */
1a472c9d 150
8cd6d3c2 151 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
1a472c9d 152 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
0428491c 153 trace_tlbie(0, 0, rb, rs, ric, prs, r);
1a472c9d
AK
154}
155
cbf09c83
NP
156static inline void __tlbie_va_range(unsigned long start, unsigned long end,
157 unsigned long pid, unsigned long page_size,
158 unsigned long psize)
159{
160 unsigned long addr;
161 unsigned long ap = mmu_get_ap(psize);
162
163 for (addr = start; addr < end; addr += page_size)
164 __tlbie_va(addr, pid, ap, RIC_FLUSH_TLB);
165}
166
14001c60 167static inline void _tlbie_va(unsigned long va, unsigned long pid,
d665767e 168 unsigned long psize, unsigned long ric)
14001c60 169{
d665767e
NP
170 unsigned long ap = mmu_get_ap(psize);
171
14001c60
NP
172 asm volatile("ptesync": : :"memory");
173 __tlbie_va(va, pid, ap, ric);
174 asm volatile("eieio; tlbsync; ptesync": : :"memory");
175}
176
d665767e
NP
177static inline void _tlbie_va_range(unsigned long start, unsigned long end,
178 unsigned long pid, unsigned long page_size,
0b2f5a8a 179 unsigned long psize, bool also_pwc)
d665767e 180{
d665767e 181 asm volatile("ptesync": : :"memory");
0b2f5a8a
NP
182 if (also_pwc)
183 __tlbie_pid(pid, RIC_FLUSH_PWC);
cbf09c83 184 __tlbie_va_range(start, end, pid, page_size, psize);
d665767e
NP
185 asm volatile("eieio; tlbsync; ptesync": : :"memory");
186}
14001c60 187
1a472c9d
AK
188/*
189 * Base TLB flushing operations:
190 *
191 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
192 * - flush_tlb_page(vma, vmaddr) flushes one page
193 * - flush_tlb_range(vma, start, end) flushes a range of pages
194 * - flush_tlb_kernel_range(start, end) flushes kernel pages
195 *
196 * - local_* variants of page and mm only apply to the current
197 * processor
198 */
199void radix__local_flush_tlb_mm(struct mm_struct *mm)
200{
9690c157 201 unsigned long pid;
1a472c9d
AK
202
203 preempt_disable();
204 pid = mm->context.id;
205 if (pid != MMU_NO_CONTEXT)
a46cc7a9 206 _tlbiel_pid(pid, RIC_FLUSH_TLB);
1a472c9d
AK
207 preempt_enable();
208}
209EXPORT_SYMBOL(radix__local_flush_tlb_mm);
210
a46cc7a9 211#ifndef CONFIG_SMP
6110236b 212void radix__local_flush_all_mm(struct mm_struct *mm)
a145abf1
AK
213{
214 unsigned long pid;
a145abf1
AK
215
216 preempt_disable();
a145abf1
AK
217 pid = mm->context.id;
218 if (pid != MMU_NO_CONTEXT)
a46cc7a9 219 _tlbiel_pid(pid, RIC_FLUSH_ALL);
a145abf1
AK
220 preempt_enable();
221}
6110236b 222EXPORT_SYMBOL(radix__local_flush_all_mm);
a46cc7a9 223#endif /* CONFIG_SMP */
a145abf1 224
f22dfc91 225void radix__local_flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
fbfa26d8 226 int psize)
1a472c9d 227{
9690c157 228 unsigned long pid;
1a472c9d
AK
229
230 preempt_disable();
67730272 231 pid = mm->context.id;
1a472c9d 232 if (pid != MMU_NO_CONTEXT)
d665767e 233 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
1a472c9d
AK
234 preempt_enable();
235}
236
237void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
238{
48483760
AK
239#ifdef CONFIG_HUGETLB_PAGE
240 /* need the return fix for nohash.c */
67730272
ME
241 if (is_vm_hugetlb_page(vma))
242 return radix__local_flush_hugetlb_page(vma, vmaddr);
48483760 243#endif
67730272 244 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
1a472c9d
AK
245}
246EXPORT_SYMBOL(radix__local_flush_tlb_page);
247
248#ifdef CONFIG_SMP
1a472c9d
AK
249void radix__flush_tlb_mm(struct mm_struct *mm)
250{
9690c157 251 unsigned long pid;
1a472c9d 252
1a472c9d
AK
253 pid = mm->context.id;
254 if (unlikely(pid == MMU_NO_CONTEXT))
dffe8449 255 return;
1a472c9d 256
dffe8449 257 preempt_disable();
3c9ac2bc 258 if (!mm_is_thread_local(mm))
a46cc7a9 259 _tlbie_pid(pid, RIC_FLUSH_TLB);
3c9ac2bc 260 else
a46cc7a9 261 _tlbiel_pid(pid, RIC_FLUSH_TLB);
1a472c9d
AK
262 preempt_enable();
263}
264EXPORT_SYMBOL(radix__flush_tlb_mm);
265
6110236b 266void radix__flush_all_mm(struct mm_struct *mm)
a145abf1
AK
267{
268 unsigned long pid;
a145abf1 269
a145abf1
AK
270 pid = mm->context.id;
271 if (unlikely(pid == MMU_NO_CONTEXT))
dffe8449 272 return;
a145abf1 273
dffe8449 274 preempt_disable();
3c9ac2bc 275 if (!mm_is_thread_local(mm))
a46cc7a9 276 _tlbie_pid(pid, RIC_FLUSH_ALL);
3c9ac2bc 277 else
a46cc7a9 278 _tlbiel_pid(pid, RIC_FLUSH_ALL);
a145abf1
AK
279 preempt_enable();
280}
6110236b 281EXPORT_SYMBOL(radix__flush_all_mm);
a46cc7a9
BH
282
283void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
284{
285 tlb->need_flush_all = 1;
286}
a145abf1
AK
287EXPORT_SYMBOL(radix__flush_tlb_pwc);
288
f22dfc91 289void radix__flush_tlb_page_psize(struct mm_struct *mm, unsigned long vmaddr,
fbfa26d8 290 int psize)
1a472c9d 291{
9690c157 292 unsigned long pid;
1a472c9d 293
67730272 294 pid = mm->context.id;
1a472c9d 295 if (unlikely(pid == MMU_NO_CONTEXT))
dffe8449
NP
296 return;
297
298 preempt_disable();
3c9ac2bc 299 if (!mm_is_thread_local(mm))
d665767e 300 _tlbie_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
3c9ac2bc 301 else
d665767e 302 _tlbiel_va(vmaddr, pid, psize, RIC_FLUSH_TLB);
1a472c9d
AK
303 preempt_enable();
304}
305
306void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
307{
48483760 308#ifdef CONFIG_HUGETLB_PAGE
67730272
ME
309 if (is_vm_hugetlb_page(vma))
310 return radix__flush_hugetlb_page(vma, vmaddr);
48483760 311#endif
67730272 312 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, mmu_virtual_psize);
1a472c9d
AK
313}
314EXPORT_SYMBOL(radix__flush_tlb_page);
315
a46cc7a9
BH
316#else /* CONFIG_SMP */
317#define radix__flush_all_mm radix__local_flush_all_mm
1a472c9d
AK
318#endif /* CONFIG_SMP */
319
320void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
321{
36194812 322 _tlbie_pid(0, RIC_FLUSH_ALL);
1a472c9d
AK
323}
324EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
325
cbf09c83
NP
326#define TLB_FLUSH_ALL -1UL
327
1a472c9d 328/*
cbf09c83
NP
329 * Number of pages above which we invalidate the entire PID rather than
330 * flush individual pages, for local and global flushes respectively.
331 *
332 * tlbie goes out to the interconnect and individual ops are more costly.
333 * It also does not iterate over sets like the local tlbiel variant when
334 * invalidating a full PID, so it has a far lower threshold to change from
335 * individual page flushes to full-pid flushes.
1a472c9d 336 */
cbf09c83 337static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
f6f27951 338static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
cbf09c83 339
1a472c9d
AK
340void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
341 unsigned long end)
342
343{
344 struct mm_struct *mm = vma->vm_mm;
cbf09c83
NP
345 unsigned long pid;
346 unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
347 unsigned long page_size = 1UL << page_shift;
348 unsigned long nr_pages = (end - start) >> page_shift;
349 bool local, full;
a46cc7a9 350
cbf09c83
NP
351#ifdef CONFIG_HUGETLB_PAGE
352 if (is_vm_hugetlb_page(vma))
353 return radix__flush_hugetlb_tlb_range(vma, start, end);
354#endif
355
356 pid = mm->context.id;
357 if (unlikely(pid == MMU_NO_CONTEXT))
358 return;
359
360 preempt_disable();
f6f27951
NP
361 if (mm_is_thread_local(mm)) {
362 local = true;
363 full = (end == TLB_FLUSH_ALL ||
364 nr_pages > tlb_local_single_page_flush_ceiling);
365 } else {
366 local = false;
367 full = (end == TLB_FLUSH_ALL ||
368 nr_pages > tlb_single_page_flush_ceiling);
369 }
cbf09c83
NP
370
371 if (full) {
372 if (local)
373 _tlbiel_pid(pid, RIC_FLUSH_TLB);
374 else
375 _tlbie_pid(pid, RIC_FLUSH_TLB);
376 } else {
377 bool hflush = false;
378 unsigned long hstart, hend;
379
380#ifdef CONFIG_TRANSPARENT_HUGEPAGE
381 hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT;
382 hend = end >> HPAGE_PMD_SHIFT;
383 if (hstart < hend) {
384 hstart <<= HPAGE_PMD_SHIFT;
385 hend <<= HPAGE_PMD_SHIFT;
386 hflush = true;
387 }
388#endif
389
390 asm volatile("ptesync": : :"memory");
391 if (local) {
392 __tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
393 if (hflush)
394 __tlbiel_va_range(hstart, hend, pid,
395 HPAGE_PMD_SIZE, MMU_PAGE_2M);
396 asm volatile("ptesync": : :"memory");
397 } else {
398 __tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
399 if (hflush)
400 __tlbie_va_range(hstart, hend, pid,
401 HPAGE_PMD_SIZE, MMU_PAGE_2M);
402 asm volatile("eieio; tlbsync; ptesync": : :"memory");
403 }
404 }
405 preempt_enable();
1a472c9d
AK
406}
407EXPORT_SYMBOL(radix__flush_tlb_range);
408
912cc87a
AK
409static int radix_get_mmu_psize(int page_size)
410{
411 int psize;
412
413 if (page_size == (1UL << mmu_psize_defs[mmu_virtual_psize].shift))
414 psize = mmu_virtual_psize;
415 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_2M].shift))
416 psize = MMU_PAGE_2M;
417 else if (page_size == (1UL << mmu_psize_defs[MMU_PAGE_1G].shift))
418 psize = MMU_PAGE_1G;
419 else
420 return -1;
421 return psize;
422}
1a472c9d 423
0b2f5a8a
NP
424static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
425 unsigned long end, int psize);
426
1a472c9d
AK
427void radix__tlb_flush(struct mmu_gather *tlb)
428{
8cb8140c 429 int psize = 0;
1a472c9d 430 struct mm_struct *mm = tlb->mm;
8cb8140c
AK
431 int page_size = tlb->page_size;
432
8cb8140c
AK
433 /*
434 * if page size is not something we understand, do a full mm flush
30b49ec7
NP
435 *
436 * A "fullmm" flush must always do a flush_all_mm (RIC=2) flush
437 * that flushes the process table entry cache upon process teardown.
438 * See the comment for radix in arch_exit_mmap().
8cb8140c 439 */
0b2f5a8a 440 if (tlb->fullmm) {
a46cc7a9 441 radix__flush_all_mm(mm);
0b2f5a8a
NP
442 } else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
443 if (!tlb->need_flush_all)
444 radix__flush_tlb_mm(mm);
445 else
446 radix__flush_all_mm(mm);
447 } else {
448 unsigned long start = tlb->start;
449 unsigned long end = tlb->end;
450
451 if (!tlb->need_flush_all)
452 radix__flush_tlb_range_psize(mm, start, end, psize);
453 else
454 radix__flush_tlb_pwc_range_psize(mm, start, end, psize);
455 }
456 tlb->need_flush_all = 0;
8cb8140c
AK
457}
458
0b2f5a8a
NP
459static inline void __radix__flush_tlb_range_psize(struct mm_struct *mm,
460 unsigned long start, unsigned long end,
461 int psize, bool also_pwc)
8cb8140c
AK
462{
463 unsigned long pid;
cbf09c83
NP
464 unsigned int page_shift = mmu_psize_defs[psize].shift;
465 unsigned long page_size = 1UL << page_shift;
466 unsigned long nr_pages = (end - start) >> page_shift;
467 bool local, full;
8cb8140c 468
67730272 469 pid = mm->context.id;
8cb8140c 470 if (unlikely(pid == MMU_NO_CONTEXT))
dffe8449 471 return;
8cb8140c 472
dffe8449 473 preempt_disable();
f6f27951
NP
474 if (mm_is_thread_local(mm)) {
475 local = true;
476 full = (end == TLB_FLUSH_ALL ||
477 nr_pages > tlb_local_single_page_flush_ceiling);
478 } else {
479 local = false;
480 full = (end == TLB_FLUSH_ALL ||
481 nr_pages > tlb_single_page_flush_ceiling);
482 }
cbf09c83
NP
483
484 if (full) {
8cb8140c 485 if (local)
0b2f5a8a 486 _tlbiel_pid(pid, also_pwc ? RIC_FLUSH_ALL : RIC_FLUSH_TLB);
8cb8140c 487 else
0b2f5a8a 488 _tlbie_pid(pid, also_pwc ? RIC_FLUSH_ALL: RIC_FLUSH_TLB);
dffe8449 489 } else {
14001c60 490 if (local)
0b2f5a8a 491 _tlbiel_va_range(start, end, pid, page_size, psize, also_pwc);
14001c60 492 else
0b2f5a8a 493 _tlbie_va_range(start, end, pid, page_size, psize, also_pwc);
8cb8140c 494 }
8cb8140c 495 preempt_enable();
1a472c9d 496}
912cc87a 497
0b2f5a8a
NP
498void radix__flush_tlb_range_psize(struct mm_struct *mm, unsigned long start,
499 unsigned long end, int psize)
500{
501 return __radix__flush_tlb_range_psize(mm, start, end, psize, false);
502}
503
504static void radix__flush_tlb_pwc_range_psize(struct mm_struct *mm, unsigned long start,
505 unsigned long end, int psize)
506{
507 __radix__flush_tlb_range_psize(mm, start, end, psize, true);
508}
509
424de9c6
BH
510#ifdef CONFIG_TRANSPARENT_HUGEPAGE
511void radix__flush_tlb_collapsed_pmd(struct mm_struct *mm, unsigned long addr)
512{
424de9c6 513 unsigned long pid, end;
424de9c6 514
67730272 515 pid = mm->context.id;
424de9c6 516 if (unlikely(pid == MMU_NO_CONTEXT))
dffe8449 517 return;
424de9c6
BH
518
519 /* 4k page size, just blow the world */
520 if (PAGE_SIZE == 0x1000) {
521 radix__flush_all_mm(mm);
522 return;
523 }
524
cbf09c83
NP
525 end = addr + HPAGE_PMD_SIZE;
526
527 /* Otherwise first do the PWC, then iterate the pages. */
dffe8449 528 preempt_disable();
424de9c6 529
cbf09c83 530 if (mm_is_thread_local(mm)) {
0b2f5a8a 531 _tlbiel_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
cbf09c83 532 } else {
0b2f5a8a 533 _tlbie_va_range(addr, end, pid, PAGE_SIZE, mmu_virtual_psize, true);
cbf09c83 534 }
14001c60 535
424de9c6
BH
536 preempt_enable();
537}
538#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
539
912cc87a
AK
540void radix__flush_tlb_lpid_va(unsigned long lpid, unsigned long gpa,
541 unsigned long page_size)
542{
543 unsigned long rb,rs,prs,r;
544 unsigned long ap;
545 unsigned long ric = RIC_FLUSH_TLB;
546
547 ap = mmu_get_ap(radix_get_mmu_psize(page_size));
548 rb = gpa & ~(PPC_BITMASK(52, 63));
549 rb |= ap << PPC_BITLSHIFT(58);
550 rs = lpid & ((1UL << 32) - 1);
551 prs = 0; /* process scoped */
552 r = 1; /* raidx format */
553
554 asm volatile("ptesync": : :"memory");
555 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
556 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
557 asm volatile("eieio; tlbsync; ptesync": : :"memory");
0428491c 558 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
912cc87a
AK
559}
560EXPORT_SYMBOL(radix__flush_tlb_lpid_va);
561
562void radix__flush_tlb_lpid(unsigned long lpid)
563{
564 unsigned long rb,rs,prs,r;
565 unsigned long ric = RIC_FLUSH_ALL;
566
567 rb = 0x2 << PPC_BITLSHIFT(53); /* IS = 2 */
568 rs = lpid & ((1UL << 32) - 1);
569 prs = 0; /* partition scoped */
570 r = 1; /* raidx format */
571
572 asm volatile("ptesync": : :"memory");
573 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
574 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
575 asm volatile("eieio; tlbsync; ptesync": : :"memory");
0428491c 576 trace_tlbie(lpid, 0, rb, rs, ric, prs, r);
912cc87a
AK
577}
578EXPORT_SYMBOL(radix__flush_tlb_lpid);
d8e91e93
AK
579
580void radix__flush_pmd_tlb_range(struct vm_area_struct *vma,
581 unsigned long start, unsigned long end)
582{
583 radix__flush_tlb_range_psize(vma->vm_mm, start, end, MMU_PAGE_2M);
584}
585EXPORT_SYMBOL(radix__flush_pmd_tlb_range);
be34d300
AK
586
587void radix__flush_tlb_all(void)
588{
589 unsigned long rb,prs,r,rs;
590 unsigned long ric = RIC_FLUSH_ALL;
591
592 rb = 0x3 << PPC_BITLSHIFT(53); /* IS = 3 */
593 prs = 0; /* partition scoped */
594 r = 1; /* raidx format */
595 rs = 1 & ((1UL << 32) - 1); /* any LPID value to flush guest mappings */
596
597 asm volatile("ptesync": : :"memory");
598 /*
599 * now flush guest entries by passing PRS = 1 and LPID != 0
600 */
601 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
602 : : "r"(rb), "i"(r), "i"(1), "i"(ric), "r"(rs) : "memory");
0428491c 603 trace_tlbie(0, 0, rb, rs, ric, prs, r);
be34d300
AK
604 /*
605 * now flush host entires by passing PRS = 0 and LPID == 0
606 */
607 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
608 : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(0) : "memory");
609 asm volatile("eieio; tlbsync; ptesync": : :"memory");
0428491c 610 trace_tlbie(0, 0, rb, 0, ric, prs, r);
be34d300 611}
6d3a0379
AK
612
613void radix__flush_tlb_pte_p9_dd1(unsigned long old_pte, struct mm_struct *mm,
614 unsigned long address)
615{
616 /*
617 * We track page size in pte only for DD1, So we can
618 * call this only on DD1.
619 */
620 if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) {
621 VM_WARN_ON(1);
622 return;
623 }
624
ddb014b6 625 if (old_pte & R_PAGE_LARGE)
6d3a0379
AK
626 radix__flush_tlb_page_psize(mm, address, MMU_PAGE_2M);
627 else
628 radix__flush_tlb_page_psize(mm, address, mmu_virtual_psize);
629}
a25bd72b
BH
630
631#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
632extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
633{
634 unsigned int pid = mm->context.id;
635
636 if (unlikely(pid == MMU_NO_CONTEXT))
637 return;
638
639 /*
640 * If this context hasn't run on that CPU before and KVM is
641 * around, there's a slim chance that the guest on another
642 * CPU just brought in obsolete translation into the TLB of
643 * this CPU due to a bad prefetch using the guest PID on
644 * the way into the hypervisor.
645 *
646 * We work around this here. If KVM is possible, we check if
647 * any sibling thread is in KVM. If it is, the window may exist
648 * and thus we flush that PID from the core.
649 *
650 * A potential future improvement would be to mark which PIDs
651 * have never been used on the system and avoid it if the PID
652 * is new and the process has no other cpumask bit set.
653 */
654 if (cpu_has_feature(CPU_FTR_HVMODE) && radix_enabled()) {
655 int cpu = smp_processor_id();
656 int sib = cpu_first_thread_sibling(cpu);
657 bool flush = false;
658
659 for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
660 if (sib == cpu)
661 continue;
662 if (paca[sib].kvm_hstate.kvm_vcpu)
663 flush = true;
664 }
665 if (flush)
666 _tlbiel_pid(pid, RIC_FLUSH_ALL);
667 }
668}
669EXPORT_SYMBOL_GPL(radix_kvm_prefetch_workaround);
670#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */