]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * This file contains the routines for flushing entries from the | |
3 | * TLB and MMU hash table. | |
4 | * | |
5 | * Derived from arch/ppc64/mm/init.c: | |
6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
7 | * | |
8 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | |
9 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | |
10 | * Copyright (C) 1996 Paul Mackerras | |
1da177e4 LT |
11 | * |
12 | * Derived from "arch/i386/mm/init.c" | |
13 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
14 | * | |
15 | * Dave Engebretsen <engebret@us.ibm.com> | |
16 | * Rework for PPC64 port. | |
17 | * | |
18 | * This program is free software; you can redistribute it and/or | |
19 | * modify it under the terms of the GNU General Public License | |
20 | * as published by the Free Software Foundation; either version | |
21 | * 2 of the License, or (at your option) any later version. | |
22 | */ | |
3c726f8d | 23 | |
1da177e4 LT |
24 | #include <linux/kernel.h> |
25 | #include <linux/mm.h> | |
1da177e4 LT |
26 | #include <linux/percpu.h> |
27 | #include <linux/hardirq.h> | |
28 | #include <asm/pgalloc.h> | |
29 | #include <asm/tlbflush.h> | |
30 | #include <asm/tlb.h> | |
3c726f8d | 31 | #include <asm/bug.h> |
1da177e4 | 32 | |
9e813308 AK |
33 | #include <trace/events/thp.h> |
34 | ||
1da177e4 LT |
35 | DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); |
36 | ||
1da177e4 | 37 | /* |
a741e679 BH |
38 | * A linux PTE was changed and the corresponding hash table entry |
39 | * neesd to be flushed. This function will either perform the flush | |
40 | * immediately or will batch it up if the current CPU has an active | |
41 | * batch on it. | |
1da177e4 | 42 | */ |
a741e679 BH |
43 | void hpte_need_flush(struct mm_struct *mm, unsigned long addr, |
44 | pte_t *ptep, unsigned long pte, int huge) | |
1da177e4 | 45 | { |
5524a27d | 46 | unsigned long vpn; |
f342552b | 47 | struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); |
5524a27d | 48 | unsigned long vsid; |
bf72aeba | 49 | unsigned int psize; |
1189be65 | 50 | int ssize; |
a741e679 | 51 | real_pte_t rpte; |
61b1a942 | 52 | int i; |
1da177e4 | 53 | |
1da177e4 LT |
54 | i = batch->index; |
55 | ||
16c2d476 BH |
56 | /* Get page size (maybe move back to caller). |
57 | * | |
58 | * NOTE: when using special 64K mappings in 4K environment like | |
59 | * for SPEs, we obtain the page size from the slice, which thus | |
60 | * must still exist (and thus the VMA not reused) at the time | |
61 | * of this call | |
62 | */ | |
3c726f8d BH |
63 | if (huge) { |
64 | #ifdef CONFIG_HUGETLB_PAGE | |
d258e64e | 65 | psize = get_slice_psize(mm, addr); |
77058e1a DG |
66 | /* Mask the address for the correct page size */ |
67 | addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1); | |
3c726f8d BH |
68 | #else |
69 | BUG(); | |
16c2d476 | 70 | psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */ |
3c726f8d | 71 | #endif |
77058e1a | 72 | } else { |
16c2d476 | 73 | psize = pte_pagesize_index(mm, addr, pte); |
77058e1a DG |
74 | /* Mask the address for the standard page size. If we |
75 | * have a 64k page kernel, but the hardware does not | |
76 | * support 64k pages, this might be different from the | |
77 | * hardware page size encoded in the slice table. */ | |
78 | addr &= PAGE_MASK; | |
79 | } | |
3c726f8d | 80 | |
f71dc176 | 81 | |
a741e679 BH |
82 | /* Build full vaddr */ |
83 | if (!is_kernel_addr(addr)) { | |
1189be65 PM |
84 | ssize = user_segment_size(addr); |
85 | vsid = get_vsid(mm->context.id, addr, ssize); | |
1189be65 PM |
86 | } else { |
87 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); | |
88 | ssize = mmu_kernel_ssize; | |
89 | } | |
c60ac569 | 90 | WARN_ON(vsid == 0); |
5524a27d | 91 | vpn = hpt_vpn(addr, vsid, ssize); |
a741e679 BH |
92 | rpte = __real_pte(__pte(pte), ptep); |
93 | ||
94 | /* | |
95 | * Check if we have an active batch on this CPU. If not, just | |
96 | * flush now and return. For now, we don global invalidates | |
97 | * in that case, might be worth testing the mm cpu mask though | |
98 | * and decide to use local invalidates instead... | |
99 | */ | |
100 | if (!batch->active) { | |
5524a27d | 101 | flush_hash_page(vpn, rpte, psize, ssize, 0); |
f342552b | 102 | put_cpu_var(ppc64_tlb_batch); |
a741e679 BH |
103 | return; |
104 | } | |
105 | ||
1da177e4 LT |
106 | /* |
107 | * This can happen when we are in the middle of a TLB batch and | |
108 | * we encounter memory pressure (eg copy_page_range when it tries | |
109 | * to allocate a new pte). If we have to reclaim memory and end | |
110 | * up scanning and resetting referenced bits then our batch context | |
111 | * will change mid stream. | |
3c726f8d BH |
112 | * |
113 | * We also need to ensure only one page size is present in a given | |
114 | * batch | |
1da177e4 | 115 | */ |
1189be65 PM |
116 | if (i != 0 && (mm != batch->mm || batch->psize != psize || |
117 | batch->ssize != ssize)) { | |
a741e679 | 118 | __flush_tlb_pending(batch); |
1da177e4 LT |
119 | i = 0; |
120 | } | |
1da177e4 | 121 | if (i == 0) { |
1da177e4 | 122 | batch->mm = mm; |
3c726f8d | 123 | batch->psize = psize; |
1189be65 | 124 | batch->ssize = ssize; |
1da177e4 | 125 | } |
a741e679 | 126 | batch->pte[i] = rpte; |
5524a27d | 127 | batch->vpn[i] = vpn; |
1da177e4 LT |
128 | batch->index = ++i; |
129 | if (i >= PPC64_TLB_BATCH_NR) | |
a741e679 | 130 | __flush_tlb_pending(batch); |
f342552b | 131 | put_cpu_var(ppc64_tlb_batch); |
1da177e4 LT |
132 | } |
133 | ||
a741e679 BH |
134 | /* |
135 | * This function is called when terminating an mmu batch or when a batch | |
136 | * is full. It will perform the flush of all the entries currently stored | |
137 | * in a batch. | |
138 | * | |
139 | * Must be called from within some kind of spinlock/non-preempt region... | |
140 | */ | |
1da177e4 LT |
141 | void __flush_tlb_pending(struct ppc64_tlb_batch *batch) |
142 | { | |
56aa4129 | 143 | const struct cpumask *tmp; |
a741e679 | 144 | int i, local = 0; |
1da177e4 | 145 | |
1da177e4 | 146 | i = batch->index; |
56aa4129 RR |
147 | tmp = cpumask_of(smp_processor_id()); |
148 | if (cpumask_equal(mm_cpumask(batch->mm), tmp)) | |
1da177e4 | 149 | local = 1; |
1da177e4 | 150 | if (i == 1) |
5524a27d | 151 | flush_hash_page(batch->vpn[0], batch->pte[0], |
1189be65 | 152 | batch->psize, batch->ssize, local); |
1da177e4 | 153 | else |
61b1a942 | 154 | flush_hash_range(i, local); |
1da177e4 | 155 | batch->index = 0; |
1da177e4 LT |
156 | } |
157 | ||
676012a6 | 158 | void hash__tlb_flush(struct mmu_gather *tlb) |
c7cc58a1 | 159 | { |
d6bf29b4 | 160 | struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch); |
c7cc58a1 BH |
161 | |
162 | /* If there's a TLB batch pending, then we must flush it because the | |
163 | * pages are going to be freed and we really don't want to have a CPU | |
164 | * access a freed page because it has a stale TLB | |
165 | */ | |
166 | if (tlbbatch->index) | |
167 | __flush_tlb_pending(tlbbatch); | |
168 | ||
d6bf29b4 | 169 | put_cpu_var(ppc64_tlb_batch); |
c7cc58a1 BH |
170 | } |
171 | ||
3d5134ee BH |
172 | /** |
173 | * __flush_hash_table_range - Flush all HPTEs for a given address range | |
174 | * from the hash table (and the TLB). But keeps | |
175 | * the linux PTEs intact. | |
176 | * | |
177 | * @mm : mm_struct of the target address space (generally init_mm) | |
178 | * @start : starting address | |
179 | * @end : ending address (not included in the flush) | |
180 | * | |
181 | * This function is mostly to be used by some IO hotplug code in order | |
182 | * to remove all hash entries from a given address range used to map IO | |
183 | * space on a removed PCI-PCI bidge without tearing down the full mapping | |
184 | * since 64K pages may overlap with other bridges when using 64K pages | |
185 | * with 4K HW pages on IO space. | |
186 | * | |
40b31360 SR |
187 | * Because of that usage pattern, it is implemented for small size rather |
188 | * than speed. | |
3d5134ee | 189 | */ |
3d5134ee BH |
190 | void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, |
191 | unsigned long end) | |
192 | { | |
891121e6 | 193 | bool is_thp; |
12bc9f6f | 194 | int hugepage_shift; |
3d5134ee BH |
195 | unsigned long flags; |
196 | ||
197 | start = _ALIGN_DOWN(start, PAGE_SIZE); | |
198 | end = _ALIGN_UP(end, PAGE_SIZE); | |
199 | ||
200 | BUG_ON(!mm->pgd); | |
201 | ||
202 | /* Note: Normally, we should only ever use a batch within a | |
203 | * PTE locked section. This violates the rule, but will work | |
204 | * since we don't actually modify the PTEs, we just flush the | |
205 | * hash while leaving the PTEs intact (including their reference | |
206 | * to being hashed). This is not the most performance oriented | |
207 | * way to do things but is fine for our needs here. | |
208 | */ | |
209 | local_irq_save(flags); | |
210 | arch_enter_lazy_mmu_mode(); | |
211 | for (; start < end; start += PAGE_SIZE) { | |
891121e6 | 212 | pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, &is_thp, |
12bc9f6f | 213 | &hugepage_shift); |
3d5134ee BH |
214 | unsigned long pte; |
215 | ||
216 | if (ptep == NULL) | |
217 | continue; | |
218 | pte = pte_val(*ptep); | |
891121e6 | 219 | if (is_thp) |
4f9c53c8 | 220 | trace_hugepage_invalidate(start, pte); |
945537df | 221 | if (!(pte & H_PAGE_HASHPTE)) |
3d5134ee | 222 | continue; |
891121e6 | 223 | if (unlikely(is_thp)) |
fc047955 | 224 | hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte); |
12bc9f6f | 225 | else |
891121e6 | 226 | hpte_need_flush(mm, start, ptep, pte, hugepage_shift); |
3d5134ee BH |
227 | } |
228 | arch_leave_lazy_mmu_mode(); | |
229 | local_irq_restore(flags); | |
230 | } | |
074c2eae AK |
231 | |
232 | void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr) | |
233 | { | |
234 | pte_t *pte; | |
235 | pte_t *start_pte; | |
236 | unsigned long flags; | |
237 | ||
238 | addr = _ALIGN_DOWN(addr, PMD_SIZE); | |
239 | /* Note: Normally, we should only ever use a batch within a | |
240 | * PTE locked section. This violates the rule, but will work | |
241 | * since we don't actually modify the PTEs, we just flush the | |
242 | * hash while leaving the PTEs intact (including their reference | |
243 | * to being hashed). This is not the most performance oriented | |
244 | * way to do things but is fine for our needs here. | |
245 | */ | |
246 | local_irq_save(flags); | |
247 | arch_enter_lazy_mmu_mode(); | |
248 | start_pte = pte_offset_map(pmd, addr); | |
249 | for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) { | |
250 | unsigned long pteval = pte_val(*pte); | |
945537df | 251 | if (pteval & H_PAGE_HASHPTE) |
074c2eae AK |
252 | hpte_need_flush(mm, addr, pte, pteval, 0); |
253 | addr += PAGE_SIZE; | |
3d5134ee BH |
254 | } |
255 | arch_leave_lazy_mmu_mode(); | |
256 | local_irq_restore(flags); | |
257 | } |