]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm/mm/fault-armv.c
iwlwifi: don't include iwl-dev.h from iwl-devtrace.h
[mirror_ubuntu-artful-kernel.git] / arch / arm / mm / fault-armv.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/mm/fault-armv.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2002 Russell King
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/bitops.h>
16#include <linux/vmalloc.h>
17#include <linux/init.h>
18#include <linux/pagemap.h>
19
09d9bae0 20#include <asm/bugs.h>
1da177e4 21#include <asm/cacheflush.h>
46097c7d 22#include <asm/cachetype.h>
1da177e4
LT
23#include <asm/pgtable.h>
24#include <asm/tlbflush.h>
25
7b0a1003
RK
26#include "mm.h"
27
bb30f36f 28static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
1da177e4
LT
29
30/*
31 * We take the easy way out of this problem - we make the
32 * PTE uncacheable. However, we leave the write buffer on.
69b04754
HD
33 *
34 * Note that the pte lock held when calling update_mmu_cache must also
35 * guard the pte (somewhere else in the same mm) that we modify here.
36 * Therefore those configurations which might call adjust_pte (those
37 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
1da177e4 38 */
c26c20b8 39static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
ed42acae 40 unsigned long pfn, pte_t *ptep)
1da177e4 41{
c26c20b8 42 pte_t entry = *ptep;
53cdb27a 43 int ret;
1da177e4 44
53cdb27a
RK
45 /*
46 * If this page is present, it's actually being shared.
47 */
48 ret = pte_present(entry);
49
1da177e4
LT
50 /*
51 * If this page isn't present, or is already setup to
52 * fault (ie, is old), we can safely ignore any issues.
53 */
bb30f36f 54 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
08e445bd
NP
55 flush_cache_page(vma, address, pfn);
56 outer_flush_range((pfn << PAGE_SHIFT),
57 (pfn << PAGE_SHIFT) + PAGE_SIZE);
bb30f36f
RK
58 pte_val(entry) &= ~L_PTE_MT_MASK;
59 pte_val(entry) |= shared_pte_mask;
c26c20b8 60 set_pte_at(vma->vm_mm, address, ptep, entry);
1da177e4 61 flush_tlb_page(vma, address);
1da177e4 62 }
c26c20b8
RK
63
64 return ret;
65}
66
ed42acae
RK
67static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
68 unsigned long pfn)
c26c20b8 69{
56dd4709 70 spinlock_t *ptl;
c26c20b8
RK
71 pgd_t *pgd;
72 pmd_t *pmd;
73 pte_t *pte;
74 int ret;
75
76 pgd = pgd_offset(vma->vm_mm, address);
f8a85f11
RK
77 if (pgd_none_or_clear_bad(pgd))
78 return 0;
c26c20b8
RK
79
80 pmd = pmd_offset(pgd, address);
f8a85f11
RK
81 if (pmd_none_or_clear_bad(pmd))
82 return 0;
c26c20b8 83
56dd4709
RK
84 /*
85 * This is called while another page table is mapped, so we
86 * must use the nested version. This also means we need to
87 * open-code the spin-locking.
88 */
89 ptl = pte_lockptr(vma->vm_mm, pmd);
90 pte = pte_offset_map_nested(pmd, address);
91 spin_lock(ptl);
c26c20b8 92
ed42acae 93 ret = do_adjust_pte(vma, address, pfn, pte);
c26c20b8 94
56dd4709
RK
95 spin_unlock(ptl);
96 pte_unmap_nested(pte);
c26c20b8 97
1da177e4 98 return ret;
1da177e4
LT
99}
100
101static void
ae140202
RK
102make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
103 unsigned long addr, pte_t *ptep, unsigned long pfn)
1da177e4 104{
1da177e4
LT
105 struct mm_struct *mm = vma->vm_mm;
106 struct vm_area_struct *mpnt;
107 struct prio_tree_iter iter;
108 unsigned long offset;
109 pgoff_t pgoff;
110 int aliases = 0;
111
1da177e4
LT
112 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
113
114 /*
115 * If we have any shared mappings that are in the same mm
116 * space, then we need to handle them specially to maintain
117 * cache coherency.
118 */
119 flush_dcache_mmap_lock(mapping);
120 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
121 /*
122 * If this VMA is not in our MM, we can ignore it.
123 * Note that we intentionally mask out the VMA
124 * that we are fixing up.
125 */
126 if (mpnt->vm_mm != mm || mpnt == vma)
127 continue;
128 if (!(mpnt->vm_flags & VM_MAYSHARE))
129 continue;
130 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
ed42acae 131 aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
1da177e4
LT
132 }
133 flush_dcache_mmap_unlock(mapping);
134 if (aliases)
ae140202 135 do_adjust_pte(vma, addr, pfn, ptep);
1da177e4 136 else
8830f04a 137 flush_cache_page(vma, addr, pfn);
1da177e4
LT
138}
139
140/*
141 * Take care of architecture specific things when placing a new PTE into
142 * a page table, or changing an existing PTE. Basically, there are two
143 * things that we need to take care of:
144 *
145 * 1. If PG_dcache_dirty is set for the page, we need to ensure
146 * that any cache entries for the kernels virtual memory
147 * range are written back to the page.
148 * 2. If we have multiple shared mappings of the same space in
149 * an object, we need to deal with the cache aliasing issues.
150 *
69b04754 151 * Note that the pte lock will be held.
1da177e4 152 */
4b3073e1
RK
153void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
154 pte_t *ptep)
1da177e4 155{
4b3073e1 156 unsigned long pfn = pte_pfn(*ptep);
8830f04a 157 struct address_space *mapping;
1da177e4
LT
158 struct page *page;
159
160 if (!pfn_valid(pfn))
161 return;
8830f04a 162
421fe93c
RK
163 /*
164 * The zero page is never written to, so never has any dirty
165 * cache lines, and therefore never needs to be flushed.
166 */
1da177e4 167 page = pfn_to_page(pfn);
421fe93c
RK
168 if (page == ZERO_PAGE(0))
169 return;
170
8830f04a 171 mapping = page_mapping(page);
826cbdaf 172#ifndef CONFIG_SMP
787b2faa
NG
173 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
174 __flush_dcache_page(mapping, page);
826cbdaf 175#endif
787b2faa 176 if (mapping) {
1da177e4 177 if (cache_is_vivt())
ae140202 178 make_coherent(mapping, vma, addr, ptep, pfn);
826cbdaf
CM
179 else if (vma->vm_flags & VM_EXEC)
180 __flush_icache_all();
1da177e4
LT
181 }
182}
183
184/*
185 * Check whether the write buffer has physical address aliasing
186 * issues. If it has, we need to avoid them for the case where
187 * we have several shared mappings of the same object in user
188 * space.
189 */
190static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
191{
192 register unsigned long zero = 0, one = 1, val;
193
194 local_irq_disable();
195 mb();
196 *p1 = one;
197 mb();
198 *p2 = zero;
199 mb();
200 val = *p1;
201 mb();
202 local_irq_enable();
203 return val != zero;
204}
205
206void __init check_writebuffer_bugs(void)
207{
208 struct page *page;
209 const char *reason;
210 unsigned long v = 1;
211
212 printk(KERN_INFO "CPU: Testing write buffer coherency: ");
213
214 page = alloc_page(GFP_KERNEL);
215 if (page) {
216 unsigned long *p1, *p2;
52e8bfd8
RK
217 pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
218 L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
1da177e4
LT
219
220 p1 = vmap(&page, 1, VM_IOREMAP, prot);
221 p2 = vmap(&page, 1, VM_IOREMAP, prot);
222
223 if (p1 && p2) {
224 v = check_writebuffer(p1, p2);
225 reason = "enabling work-around";
226 } else {
227 reason = "unable to map memory\n";
228 }
229
230 vunmap(p1);
231 vunmap(p2);
232 put_page(page);
233 } else {
234 reason = "unable to grab page\n";
235 }
236
237 if (v) {
238 printk("failed, %s\n", reason);
bb30f36f 239 shared_pte_mask = L_PTE_MT_UNCACHED;
1da177e4
LT
240 } else {
241 printk("ok\n");
242 }
243}