]>
Commit | Line | Data |
---|---|---|
0186f47e KG |
1 | /* |
2 | * This file contains common routines for dealing with free of page tables | |
8d30c14c | 3 | * Along with common page table handling code |
0186f47e KG |
4 | * |
5 | * Derived from arch/powerpc/mm/tlb_64.c: | |
6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
7 | * | |
8 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | |
9 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | |
10 | * Copyright (C) 1996 Paul Mackerras | |
11 | * | |
12 | * Derived from "arch/i386/mm/init.c" | |
13 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
14 | * | |
15 | * Dave Engebretsen <engebret@us.ibm.com> | |
16 | * Rework for PPC64 port. | |
17 | * | |
18 | * This program is free software; you can redistribute it and/or | |
19 | * modify it under the terms of the GNU General Public License | |
20 | * as published by the Free Software Foundation; either version | |
21 | * 2 of the License, or (at your option) any later version. | |
22 | */ | |
23 | ||
24 | #include <linux/kernel.h> | |
25 | #include <linux/mm.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/percpu.h> | |
28 | #include <linux/hardirq.h> | |
29 | #include <asm/pgalloc.h> | |
30 | #include <asm/tlbflush.h> | |
31 | #include <asm/tlb.h> | |
32 | ||
a8f7758c BH |
33 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
34 | ||
c7cc58a1 BH |
35 | #ifdef CONFIG_SMP |
36 | ||
37 | /* | |
38 | * Handle batching of page table freeing on SMP. Page tables are | |
39 | * queued up and send to be freed later by RCU in order to avoid | |
40 | * freeing a page table page that is being walked without locks | |
41 | */ | |
42 | ||
0186f47e KG |
43 | static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); |
44 | static unsigned long pte_freelist_forced_free; | |
45 | ||
46 | struct pte_freelist_batch | |
47 | { | |
48 | struct rcu_head rcu; | |
49 | unsigned int index; | |
50 | pgtable_free_t tables[0]; | |
51 | }; | |
52 | ||
53 | #define PTE_FREELIST_SIZE \ | |
54 | ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ | |
55 | / sizeof(pgtable_free_t)) | |
56 | ||
57 | static void pte_free_smp_sync(void *arg) | |
58 | { | |
59 | /* Do nothing, just ensure we sync with all CPUs */ | |
60 | } | |
61 | ||
62 | /* This is only called when we are critically out of memory | |
63 | * (and fail to get a page in pte_free_tlb). | |
64 | */ | |
65 | static void pgtable_free_now(pgtable_free_t pgf) | |
66 | { | |
67 | pte_freelist_forced_free++; | |
68 | ||
69 | smp_call_function(pte_free_smp_sync, NULL, 1); | |
70 | ||
71 | pgtable_free(pgf); | |
72 | } | |
73 | ||
74 | static void pte_free_rcu_callback(struct rcu_head *head) | |
75 | { | |
76 | struct pte_freelist_batch *batch = | |
77 | container_of(head, struct pte_freelist_batch, rcu); | |
78 | unsigned int i; | |
79 | ||
80 | for (i = 0; i < batch->index; i++) | |
81 | pgtable_free(batch->tables[i]); | |
82 | ||
83 | free_page((unsigned long)batch); | |
84 | } | |
85 | ||
86 | static void pte_free_submit(struct pte_freelist_batch *batch) | |
87 | { | |
88 | INIT_RCU_HEAD(&batch->rcu); | |
89 | call_rcu(&batch->rcu, pte_free_rcu_callback); | |
90 | } | |
91 | ||
92 | void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf) | |
93 | { | |
94 | /* This is safe since tlb_gather_mmu has disabled preemption */ | |
0186f47e KG |
95 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); |
96 | ||
97 | if (atomic_read(&tlb->mm->mm_users) < 2 || | |
56aa4129 | 98 | cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){ |
0186f47e KG |
99 | pgtable_free(pgf); |
100 | return; | |
101 | } | |
102 | ||
103 | if (*batchp == NULL) { | |
104 | *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); | |
105 | if (*batchp == NULL) { | |
106 | pgtable_free_now(pgf); | |
107 | return; | |
108 | } | |
109 | (*batchp)->index = 0; | |
110 | } | |
111 | (*batchp)->tables[(*batchp)->index++] = pgf; | |
112 | if ((*batchp)->index == PTE_FREELIST_SIZE) { | |
113 | pte_free_submit(*batchp); | |
114 | *batchp = NULL; | |
115 | } | |
116 | } | |
117 | ||
118 | void pte_free_finish(void) | |
119 | { | |
120 | /* This is safe since tlb_gather_mmu has disabled preemption */ | |
121 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | |
122 | ||
123 | if (*batchp == NULL) | |
124 | return; | |
125 | pte_free_submit(*batchp); | |
126 | *batchp = NULL; | |
127 | } | |
8d30c14c | 128 | |
c7cc58a1 BH |
129 | #endif /* CONFIG_SMP */ |
130 | ||
8d30c14c BH |
131 | static inline int is_exec_fault(void) |
132 | { | |
133 | return current->thread.regs && TRAP(current->thread.regs) == 0x400; | |
134 | } | |
135 | ||
136 | /* We only try to do i/d cache coherency on stuff that looks like | |
137 | * reasonably "normal" PTEs. We currently require a PTE to be present | |
ea3cc330 BH |
138 | * and we avoid _PAGE_SPECIAL and _PAGE_NO_CACHE. We also only do that |
139 | * on userspace PTEs | |
8d30c14c BH |
140 | */ |
141 | static inline int pte_looks_normal(pte_t pte) | |
142 | { | |
143 | return (pte_val(pte) & | |
ea3cc330 BH |
144 | (_PAGE_PRESENT | _PAGE_SPECIAL | _PAGE_NO_CACHE | _PAGE_USER)) == |
145 | (_PAGE_PRESENT | _PAGE_USER); | |
8d30c14c BH |
146 | } |
147 | ||
ea3cc330 BH |
148 | struct page * maybe_pte_to_page(pte_t pte) |
149 | { | |
150 | unsigned long pfn = pte_pfn(pte); | |
151 | struct page *page; | |
152 | ||
153 | if (unlikely(!pfn_valid(pfn))) | |
154 | return NULL; | |
155 | page = pfn_to_page(pfn); | |
156 | if (PageReserved(page)) | |
157 | return NULL; | |
158 | return page; | |
159 | } | |
160 | ||
161 | #if defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0 | |
162 | ||
8d30c14c | 163 | /* Server-style MMU handles coherency when hashing if HW exec permission |
ea3cc330 BH |
164 | * is supposed per page (currently 64-bit only). If not, then, we always |
165 | * flush the cache for valid PTEs in set_pte. Embedded CPU without HW exec | |
166 | * support falls into the same category. | |
8d30c14c | 167 | */ |
ea3cc330 BH |
168 | |
169 | static pte_t set_pte_filter(pte_t pte) | |
8d30c14c | 170 | { |
ea3cc330 BH |
171 | pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); |
172 | if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) || | |
173 | cpu_has_feature(CPU_FTR_NOEXECUTE))) { | |
174 | struct page *pg = maybe_pte_to_page(pte); | |
175 | if (!pg) | |
176 | return pte; | |
177 | if (!test_bit(PG_arch_1, &pg->flags)) { | |
178 | flush_dcache_icache_page(pg); | |
179 | set_bit(PG_arch_1, &pg->flags); | |
180 | } | |
181 | } | |
182 | return pte; | |
8d30c14c | 183 | } |
ea3cc330 BH |
184 | |
185 | static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, | |
186 | int dirty) | |
8d30c14c | 187 | { |
ea3cc330 | 188 | return pte; |
8d30c14c | 189 | } |
ea3cc330 BH |
190 | |
191 | #else /* defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0 */ | |
192 | ||
193 | /* Embedded type MMU with HW exec support. This is a bit more complicated | |
194 | * as we don't have two bits to spare for _PAGE_EXEC and _PAGE_HWEXEC so | |
195 | * instead we "filter out" the exec permission for non clean pages. | |
8d30c14c | 196 | */ |
ea3cc330 | 197 | static pte_t set_pte_filter(pte_t pte) |
8d30c14c | 198 | { |
ea3cc330 BH |
199 | struct page *pg; |
200 | ||
201 | /* No exec permission in the first place, move on */ | |
202 | if (!(pte_val(pte) & _PAGE_EXEC) || !pte_looks_normal(pte)) | |
203 | return pte; | |
204 | ||
205 | /* If you set _PAGE_EXEC on weird pages you're on your own */ | |
206 | pg = maybe_pte_to_page(pte); | |
207 | if (unlikely(!pg)) | |
208 | return pte; | |
209 | ||
210 | /* If the page clean, we move on */ | |
211 | if (test_bit(PG_arch_1, &pg->flags)) | |
212 | return pte; | |
213 | ||
214 | /* If it's an exec fault, we flush the cache and make it clean */ | |
215 | if (is_exec_fault()) { | |
216 | flush_dcache_icache_page(pg); | |
217 | set_bit(PG_arch_1, &pg->flags); | |
218 | return pte; | |
219 | } | |
220 | ||
221 | /* Else, we filter out _PAGE_EXEC */ | |
222 | return __pte(pte_val(pte) & ~_PAGE_EXEC); | |
8d30c14c | 223 | } |
ea3cc330 BH |
224 | |
225 | static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma, | |
226 | int dirty) | |
227 | { | |
228 | struct page *pg; | |
229 | ||
230 | /* So here, we only care about exec faults, as we use them | |
231 | * to recover lost _PAGE_EXEC and perform I$/D$ coherency | |
232 | * if necessary. Also if _PAGE_EXEC is already set, same deal, | |
233 | * we just bail out | |
234 | */ | |
235 | if (dirty || (pte_val(pte) & _PAGE_EXEC) || !is_exec_fault()) | |
236 | return pte; | |
237 | ||
238 | #ifdef CONFIG_DEBUG_VM | |
239 | /* So this is an exec fault, _PAGE_EXEC is not set. If it was | |
240 | * an error we would have bailed out earlier in do_page_fault() | |
241 | * but let's make sure of it | |
242 | */ | |
243 | if (WARN_ON(!(vma->vm_flags & VM_EXEC))) | |
244 | return pte; | |
245 | #endif /* CONFIG_DEBUG_VM */ | |
246 | ||
247 | /* If you set _PAGE_EXEC on weird pages you're on your own */ | |
248 | pg = maybe_pte_to_page(pte); | |
249 | if (unlikely(!pg)) | |
250 | goto bail; | |
251 | ||
252 | /* If the page is already clean, we move on */ | |
253 | if (test_bit(PG_arch_1, &pg->flags)) | |
254 | goto bail; | |
255 | ||
256 | /* Clean the page and set PG_arch_1 */ | |
257 | flush_dcache_icache_page(pg); | |
258 | set_bit(PG_arch_1, &pg->flags); | |
259 | ||
260 | bail: | |
261 | return __pte(pte_val(pte) | _PAGE_EXEC); | |
262 | } | |
263 | ||
264 | #endif /* !(defined(CONFIG_PPC_STD_MMU) || _PAGE_EXEC == 0) */ | |
8d30c14c BH |
265 | |
266 | /* | |
267 | * set_pte stores a linux PTE into the linux page table. | |
268 | */ | |
ea3cc330 BH |
269 | void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, |
270 | pte_t pte) | |
8d30c14c BH |
271 | { |
272 | #ifdef CONFIG_DEBUG_VM | |
273 | WARN_ON(pte_present(*ptep)); | |
274 | #endif | |
275 | /* Note: mm->context.id might not yet have been assigned as | |
276 | * this context might not have been activated yet when this | |
277 | * is called. | |
278 | */ | |
ea3cc330 | 279 | pte = set_pte_filter(pte); |
8d30c14c BH |
280 | |
281 | /* Perform the setting of the PTE */ | |
282 | __set_pte_at(mm, addr, ptep, pte, 0); | |
283 | } | |
284 | ||
285 | /* | |
286 | * This is called when relaxing access to a PTE. It's also called in the page | |
287 | * fault path when we don't hit any of the major fault cases, ie, a minor | |
288 | * update of _PAGE_ACCESSED, _PAGE_DIRTY, etc... The generic code will have | |
289 | * handled those two for us, we additionally deal with missing execute | |
290 | * permission here on some processors | |
291 | */ | |
292 | int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, | |
293 | pte_t *ptep, pte_t entry, int dirty) | |
294 | { | |
295 | int changed; | |
ea3cc330 | 296 | entry = set_access_flags_filter(entry, vma, dirty); |
8d30c14c BH |
297 | changed = !pte_same(*(ptep), entry); |
298 | if (changed) { | |
af3e4aca MG |
299 | if (!(vma->vm_flags & VM_HUGETLB)) |
300 | assert_pte_locked(vma->vm_mm, address); | |
8d30c14c BH |
301 | __ptep_set_access_flags(ptep, entry); |
302 | flush_tlb_page_nohash(vma, address); | |
303 | } | |
304 | return changed; | |
305 | } | |
306 | ||
307 | #ifdef CONFIG_DEBUG_VM | |
308 | void assert_pte_locked(struct mm_struct *mm, unsigned long addr) | |
309 | { | |
310 | pgd_t *pgd; | |
311 | pud_t *pud; | |
312 | pmd_t *pmd; | |
313 | ||
314 | if (mm == &init_mm) | |
315 | return; | |
316 | pgd = mm->pgd + pgd_index(addr); | |
317 | BUG_ON(pgd_none(*pgd)); | |
318 | pud = pud_offset(pgd, addr); | |
319 | BUG_ON(pud_none(*pud)); | |
320 | pmd = pmd_offset(pud, addr); | |
321 | BUG_ON(!pmd_present(*pmd)); | |
797a747a | 322 | assert_spin_locked(pte_lockptr(mm, pmd)); |
8d30c14c BH |
323 | } |
324 | #endif /* CONFIG_DEBUG_VM */ | |
325 |