]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/arm/mm/fault-armv.c
Merge branches 'bugzilla-14446', 'bugzilla-14753' and 'bugzilla-14824' into release
[mirror_ubuntu-jammy-kernel.git] / arch / arm / mm / fault-armv.c
1 /*
2 * linux/arch/arm/mm/fault-armv.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 * Modifications for ARM processor (c) 1995-2002 Russell King
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/kernel.h>
14 #include <linux/mm.h>
15 #include <linux/bitops.h>
16 #include <linux/vmalloc.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
19
20 #include <asm/bugs.h>
21 #include <asm/cacheflush.h>
22 #include <asm/cachetype.h>
23 #include <asm/pgtable.h>
24 #include <asm/tlbflush.h>
25
26 #include "mm.h"
27
28 static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
29
30 /*
31 * We take the easy way out of this problem - we make the
32 * PTE uncacheable. However, we leave the write buffer on.
33 *
34 * Note that the pte lock held when calling update_mmu_cache must also
35 * guard the pte (somewhere else in the same mm) that we modify here.
36 * Therefore those configurations which might call adjust_pte (those
37 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
38 */
39 static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
40 {
41 pgd_t *pgd;
42 pmd_t *pmd;
43 pte_t *pte, entry;
44 int ret;
45
46 pgd = pgd_offset(vma->vm_mm, address);
47 if (pgd_none(*pgd))
48 goto no_pgd;
49 if (pgd_bad(*pgd))
50 goto bad_pgd;
51
52 pmd = pmd_offset(pgd, address);
53 if (pmd_none(*pmd))
54 goto no_pmd;
55 if (pmd_bad(*pmd))
56 goto bad_pmd;
57
58 pte = pte_offset_map(pmd, address);
59 entry = *pte;
60
61 /*
62 * If this page is present, it's actually being shared.
63 */
64 ret = pte_present(entry);
65
66 /*
67 * If this page isn't present, or is already setup to
68 * fault (ie, is old), we can safely ignore any issues.
69 */
70 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
71 unsigned long pfn = pte_pfn(entry);
72 flush_cache_page(vma, address, pfn);
73 outer_flush_range((pfn << PAGE_SHIFT),
74 (pfn << PAGE_SHIFT) + PAGE_SIZE);
75 pte_val(entry) &= ~L_PTE_MT_MASK;
76 pte_val(entry) |= shared_pte_mask;
77 set_pte_at(vma->vm_mm, address, pte, entry);
78 flush_tlb_page(vma, address);
79 }
80 pte_unmap(pte);
81 return ret;
82
83 bad_pgd:
84 pgd_ERROR(*pgd);
85 pgd_clear(pgd);
86 no_pgd:
87 return 0;
88
89 bad_pmd:
90 pmd_ERROR(*pmd);
91 pmd_clear(pmd);
92 no_pmd:
93 return 0;
94 }
95
96 static void
97 make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn)
98 {
99 struct mm_struct *mm = vma->vm_mm;
100 struct vm_area_struct *mpnt;
101 struct prio_tree_iter iter;
102 unsigned long offset;
103 pgoff_t pgoff;
104 int aliases = 0;
105
106 pgoff = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
107
108 /*
109 * If we have any shared mappings that are in the same mm
110 * space, then we need to handle them specially to maintain
111 * cache coherency.
112 */
113 flush_dcache_mmap_lock(mapping);
114 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
115 /*
116 * If this VMA is not in our MM, we can ignore it.
117 * Note that we intentionally mask out the VMA
118 * that we are fixing up.
119 */
120 if (mpnt->vm_mm != mm || mpnt == vma)
121 continue;
122 if (!(mpnt->vm_flags & VM_MAYSHARE))
123 continue;
124 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
125 aliases += adjust_pte(mpnt, mpnt->vm_start + offset);
126 }
127 flush_dcache_mmap_unlock(mapping);
128 if (aliases)
129 adjust_pte(vma, addr);
130 else
131 flush_cache_page(vma, addr, pfn);
132 }
133
134 /*
135 * Take care of architecture specific things when placing a new PTE into
136 * a page table, or changing an existing PTE. Basically, there are two
137 * things that we need to take care of:
138 *
139 * 1. If PG_dcache_dirty is set for the page, we need to ensure
140 * that any cache entries for the kernels virtual memory
141 * range are written back to the page.
142 * 2. If we have multiple shared mappings of the same space in
143 * an object, we need to deal with the cache aliasing issues.
144 *
145 * Note that the pte lock will be held.
146 */
147 void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
148 {
149 unsigned long pfn = pte_pfn(pte);
150 struct address_space *mapping;
151 struct page *page;
152
153 if (!pfn_valid(pfn))
154 return;
155
156 /*
157 * The zero page is never written to, so never has any dirty
158 * cache lines, and therefore never needs to be flushed.
159 */
160 page = pfn_to_page(pfn);
161 if (page == ZERO_PAGE(0))
162 return;
163
164 mapping = page_mapping(page);
165 #ifndef CONFIG_SMP
166 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
167 __flush_dcache_page(mapping, page);
168 #endif
169 if (mapping) {
170 if (cache_is_vivt())
171 make_coherent(mapping, vma, addr, pfn);
172 else if (vma->vm_flags & VM_EXEC)
173 __flush_icache_all();
174 }
175 }
176
177 /*
178 * Check whether the write buffer has physical address aliasing
179 * issues. If it has, we need to avoid them for the case where
180 * we have several shared mappings of the same object in user
181 * space.
182 */
183 static int __init check_writebuffer(unsigned long *p1, unsigned long *p2)
184 {
185 register unsigned long zero = 0, one = 1, val;
186
187 local_irq_disable();
188 mb();
189 *p1 = one;
190 mb();
191 *p2 = zero;
192 mb();
193 val = *p1;
194 mb();
195 local_irq_enable();
196 return val != zero;
197 }
198
199 void __init check_writebuffer_bugs(void)
200 {
201 struct page *page;
202 const char *reason;
203 unsigned long v = 1;
204
205 printk(KERN_INFO "CPU: Testing write buffer coherency: ");
206
207 page = alloc_page(GFP_KERNEL);
208 if (page) {
209 unsigned long *p1, *p2;
210 pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG|
211 L_PTE_DIRTY|L_PTE_WRITE|
212 L_PTE_MT_BUFFERABLE);
213
214 p1 = vmap(&page, 1, VM_IOREMAP, prot);
215 p2 = vmap(&page, 1, VM_IOREMAP, prot);
216
217 if (p1 && p2) {
218 v = check_writebuffer(p1, p2);
219 reason = "enabling work-around";
220 } else {
221 reason = "unable to map memory\n";
222 }
223
224 vunmap(p1);
225 vunmap(p2);
226 put_page(page);
227 } else {
228 reason = "unable to grab page\n";
229 }
230
231 if (v) {
232 printk("failed, %s\n", reason);
233 shared_pte_mask = L_PTE_MT_UNCACHED;
234 } else {
235 printk("ok\n");
236 }
237 }