]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/mm/fault-armv.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | * Modifications for ARM processor (c) 1995-2002 Russell King | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #include <linux/module.h> | |
12 | #include <linux/sched.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/mm.h> | |
15 | #include <linux/bitops.h> | |
16 | #include <linux/vmalloc.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/pagemap.h> | |
5a0e3ad6 | 19 | #include <linux/gfp.h> |
1da177e4 | 20 | |
09d9bae0 | 21 | #include <asm/bugs.h> |
1da177e4 | 22 | #include <asm/cacheflush.h> |
46097c7d | 23 | #include <asm/cachetype.h> |
1da177e4 LT |
24 | #include <asm/pgtable.h> |
25 | #include <asm/tlbflush.h> | |
26 | ||
7b0a1003 RK |
27 | #include "mm.h" |
28 | ||
bb30f36f | 29 | static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; |
1da177e4 LT |
30 | |
31 | /* | |
32 | * We take the easy way out of this problem - we make the | |
33 | * PTE uncacheable. However, we leave the write buffer on. | |
69b04754 HD |
34 | * |
35 | * Note that the pte lock held when calling update_mmu_cache must also | |
36 | * guard the pte (somewhere else in the same mm) that we modify here. | |
37 | * Therefore those configurations which might call adjust_pte (those | |
38 | * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. | |
1da177e4 | 39 | */ |
c26c20b8 | 40 | static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, |
ed42acae | 41 | unsigned long pfn, pte_t *ptep) |
1da177e4 | 42 | { |
c26c20b8 | 43 | pte_t entry = *ptep; |
53cdb27a | 44 | int ret; |
1da177e4 | 45 | |
53cdb27a RK |
46 | /* |
47 | * If this page is present, it's actually being shared. | |
48 | */ | |
49 | ret = pte_present(entry); | |
50 | ||
1da177e4 LT |
51 | /* |
52 | * If this page isn't present, or is already setup to | |
53 | * fault (ie, is old), we can safely ignore any issues. | |
54 | */ | |
bb30f36f | 55 | if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { |
08e445bd NP |
56 | flush_cache_page(vma, address, pfn); |
57 | outer_flush_range((pfn << PAGE_SHIFT), | |
58 | (pfn << PAGE_SHIFT) + PAGE_SIZE); | |
bb30f36f RK |
59 | pte_val(entry) &= ~L_PTE_MT_MASK; |
60 | pte_val(entry) |= shared_pte_mask; | |
c26c20b8 | 61 | set_pte_at(vma->vm_mm, address, ptep, entry); |
1da177e4 | 62 | flush_tlb_page(vma, address); |
1da177e4 | 63 | } |
c26c20b8 RK |
64 | |
65 | return ret; | |
66 | } | |
67 | ||
ed42acae RK |
68 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address, |
69 | unsigned long pfn) | |
c26c20b8 | 70 | { |
56dd4709 | 71 | spinlock_t *ptl; |
c26c20b8 RK |
72 | pgd_t *pgd; |
73 | pmd_t *pmd; | |
74 | pte_t *pte; | |
75 | int ret; | |
76 | ||
77 | pgd = pgd_offset(vma->vm_mm, address); | |
f8a85f11 RK |
78 | if (pgd_none_or_clear_bad(pgd)) |
79 | return 0; | |
c26c20b8 RK |
80 | |
81 | pmd = pmd_offset(pgd, address); | |
f8a85f11 RK |
82 | if (pmd_none_or_clear_bad(pmd)) |
83 | return 0; | |
c26c20b8 | 84 | |
56dd4709 RK |
85 | /* |
86 | * This is called while another page table is mapped, so we | |
87 | * must use the nested version. This also means we need to | |
88 | * open-code the spin-locking. | |
89 | */ | |
90 | ptl = pte_lockptr(vma->vm_mm, pmd); | |
91 | pte = pte_offset_map_nested(pmd, address); | |
92 | spin_lock(ptl); | |
c26c20b8 | 93 | |
ed42acae | 94 | ret = do_adjust_pte(vma, address, pfn, pte); |
c26c20b8 | 95 | |
56dd4709 RK |
96 | spin_unlock(ptl); |
97 | pte_unmap_nested(pte); | |
c26c20b8 | 98 | |
1da177e4 | 99 | return ret; |
1da177e4 LT |
100 | } |
101 | ||
102 | static void | |
ae140202 RK |
103 | make_coherent(struct address_space *mapping, struct vm_area_struct *vma, |
104 | unsigned long addr, pte_t *ptep, unsigned long pfn) | |
1da177e4 | 105 | { |
1da177e4 LT |
106 | struct mm_struct *mm = vma->vm_mm; |
107 | struct vm_area_struct *mpnt; | |
108 | struct prio_tree_iter iter; | |
109 | unsigned long offset; | |
110 | pgoff_t pgoff; | |
111 | int aliases = 0; | |
112 | ||
1da177e4 LT |
113 | pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT); |
114 | ||
115 | /* | |
116 | * If we have any shared mappings that are in the same mm | |
117 | * space, then we need to handle them specially to maintain | |
118 | * cache coherency. | |
119 | */ | |
120 | flush_dcache_mmap_lock(mapping); | |
121 | vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { | |
122 | /* | |
123 | * If this VMA is not in our MM, we can ignore it. | |
124 | * Note that we intentionally mask out the VMA | |
125 | * that we are fixing up. | |
126 | */ | |
127 | if (mpnt->vm_mm != mm || mpnt == vma) | |
128 | continue; | |
129 | if (!(mpnt->vm_flags & VM_MAYSHARE)) | |
130 | continue; | |
131 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; | |
ed42acae | 132 | aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn); |
1da177e4 LT |
133 | } |
134 | flush_dcache_mmap_unlock(mapping); | |
135 | if (aliases) | |
ae140202 | 136 | do_adjust_pte(vma, addr, pfn, ptep); |
1da177e4 | 137 | else |
8830f04a | 138 | flush_cache_page(vma, addr, pfn); |
1da177e4 LT |
139 | } |
140 | ||
141 | /* | |
142 | * Take care of architecture specific things when placing a new PTE into | |
143 | * a page table, or changing an existing PTE. Basically, there are two | |
144 | * things that we need to take care of: | |
145 | * | |
146 | * 1. If PG_dcache_dirty is set for the page, we need to ensure | |
147 | * that any cache entries for the kernels virtual memory | |
148 | * range are written back to the page. | |
149 | * 2. If we have multiple shared mappings of the same space in | |
150 | * an object, we need to deal with the cache aliasing issues. | |
151 | * | |
69b04754 | 152 | * Note that the pte lock will be held. |
1da177e4 | 153 | */ |
4b3073e1 RK |
154 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, |
155 | pte_t *ptep) | |
1da177e4 | 156 | { |
4b3073e1 | 157 | unsigned long pfn = pte_pfn(*ptep); |
8830f04a | 158 | struct address_space *mapping; |
1da177e4 LT |
159 | struct page *page; |
160 | ||
161 | if (!pfn_valid(pfn)) | |
162 | return; | |
8830f04a | 163 | |
421fe93c RK |
164 | /* |
165 | * The zero page is never written to, so never has any dirty | |
166 | * cache lines, and therefore never needs to be flushed. | |
167 | */ | |
1da177e4 | 168 | page = pfn_to_page(pfn); |
421fe93c RK |
169 | if (page == ZERO_PAGE(0)) |
170 | return; | |
171 | ||
8830f04a | 172 | mapping = page_mapping(page); |
826cbdaf | 173 | #ifndef CONFIG_SMP |
787b2faa NG |
174 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) |
175 | __flush_dcache_page(mapping, page); | |
826cbdaf | 176 | #endif |
787b2faa | 177 | if (mapping) { |
1da177e4 | 178 | if (cache_is_vivt()) |
ae140202 | 179 | make_coherent(mapping, vma, addr, ptep, pfn); |
826cbdaf CM |
180 | else if (vma->vm_flags & VM_EXEC) |
181 | __flush_icache_all(); | |
1da177e4 LT |
182 | } |
183 | } | |
184 | ||
185 | /* | |
186 | * Check whether the write buffer has physical address aliasing | |
187 | * issues. If it has, we need to avoid them for the case where | |
188 | * we have several shared mappings of the same object in user | |
189 | * space. | |
190 | */ | |
191 | static int __init check_writebuffer(unsigned long *p1, unsigned long *p2) | |
192 | { | |
193 | register unsigned long zero = 0, one = 1, val; | |
194 | ||
195 | local_irq_disable(); | |
196 | mb(); | |
197 | *p1 = one; | |
198 | mb(); | |
199 | *p2 = zero; | |
200 | mb(); | |
201 | val = *p1; | |
202 | mb(); | |
203 | local_irq_enable(); | |
204 | return val != zero; | |
205 | } | |
206 | ||
207 | void __init check_writebuffer_bugs(void) | |
208 | { | |
209 | struct page *page; | |
210 | const char *reason; | |
211 | unsigned long v = 1; | |
212 | ||
213 | printk(KERN_INFO "CPU: Testing write buffer coherency: "); | |
214 | ||
215 | page = alloc_page(GFP_KERNEL); | |
216 | if (page) { | |
217 | unsigned long *p1, *p2; | |
52e8bfd8 RK |
218 | pgprot_t prot = __pgprot_modify(PAGE_KERNEL, |
219 | L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE); | |
1da177e4 LT |
220 | |
221 | p1 = vmap(&page, 1, VM_IOREMAP, prot); | |
222 | p2 = vmap(&page, 1, VM_IOREMAP, prot); | |
223 | ||
224 | if (p1 && p2) { | |
225 | v = check_writebuffer(p1, p2); | |
226 | reason = "enabling work-around"; | |
227 | } else { | |
228 | reason = "unable to map memory\n"; | |
229 | } | |
230 | ||
231 | vunmap(p1); | |
232 | vunmap(p2); | |
233 | put_page(page); | |
234 | } else { | |
235 | reason = "unable to grab page\n"; | |
236 | } | |
237 | ||
238 | if (v) { | |
239 | printk("failed, %s\n", reason); | |
bb30f36f | 240 | shared_pte_mask = L_PTE_MT_UNCACHED; |
1da177e4 LT |
241 | } else { |
242 | printk("ok\n"); | |
243 | } | |
244 | } |