]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/xtensa/mm/cache.c
xtensa: fix cache aliasing handling code for WT cache
[mirror_ubuntu-artful-kernel.git] / arch / xtensa / mm / cache.c
CommitLineData
6656920b
CZ
1/*
2 * arch/xtensa/mm/cache.c
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001-2006 Tensilica Inc.
9 *
10 * Chris Zankel <chris@zankel.net>
11 * Joe Taylor
12 * Marc Gauthier
13 *
14 */
15
16#include <linux/init.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/ptrace.h>
24#include <linux/bootmem.h>
25#include <linux/swap.h>
26#include <linux/pagemap.h>
27
6656920b
CZ
28#include <asm/bootparam.h>
29#include <asm/mmu_context.h>
30#include <asm/tlb.h>
31#include <asm/tlbflush.h>
32#include <asm/page.h>
33#include <asm/pgalloc.h>
34#include <asm/pgtable.h>
35
36//#define printd(x...) printk(x)
37#define printd(x...) do { } while(0)
38
39/*
40 * Note:
41 * The kernel provides one architecture bit PG_arch_1 in the page flags that
42 * can be used for cache coherency.
43 *
44 * I$-D$ coherency.
45 *
46 * The Xtensa architecture doesn't keep the instruction cache coherent with
47 * the data cache. We use the architecture bit to indicate if the caches
48 * are coherent. The kernel clears this bit whenever a page is added to the
49 * page cache. At that time, the caches might not be in sync. We, therefore,
50 * define this flag as 'clean' if set.
51 *
52 * D-cache aliasing.
53 *
54 * With cache aliasing, we have to always flush the cache when pages are
55 * unmapped (see tlb_start_vma(). So, we use this flag to indicate a dirty
56 * page.
57 *
58 *
59 *
60 */
61
a91902db
MF
62#if (DCACHE_WAY_SIZE > PAGE_SIZE)
63static inline void kmap_invalidate_coherent(struct page *page,
64 unsigned long vaddr)
65{
66 if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
67 unsigned long kvaddr;
68
69 if (!PageHighMem(page)) {
70 kvaddr = (unsigned long)page_to_virt(page);
71
72 __invalidate_dcache_page(kvaddr);
73 } else {
74 kvaddr = TLBTEMP_BASE_1 +
75 (page_to_phys(page) & DCACHE_ALIAS_MASK);
76
77 __invalidate_dcache_page_alias(kvaddr,
78 page_to_phys(page));
79 }
80 }
81}
82
83static inline void *coherent_kvaddr(struct page *page, unsigned long base,
84 unsigned long vaddr, unsigned long *paddr)
85{
86 if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) {
87 *paddr = page_to_phys(page);
88 return (void *)(base + (vaddr & DCACHE_ALIAS_MASK));
89 } else {
90 *paddr = 0;
91 return page_to_virt(page);
92 }
93}
94
95void clear_user_highpage(struct page *page, unsigned long vaddr)
96{
97 unsigned long paddr;
98 void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr);
99
a67cc9aa 100 preempt_disable();
a91902db
MF
101 kmap_invalidate_coherent(page, vaddr);
102 set_bit(PG_arch_1, &page->flags);
103 clear_page_alias(kvaddr, paddr);
a67cc9aa 104 preempt_enable();
a91902db
MF
105}
106
107void copy_user_highpage(struct page *dst, struct page *src,
108 unsigned long vaddr, struct vm_area_struct *vma)
109{
110 unsigned long dst_paddr, src_paddr;
111 void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr,
112 &dst_paddr);
113 void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr,
114 &src_paddr);
115
a67cc9aa 116 preempt_disable();
a91902db
MF
117 kmap_invalidate_coherent(dst, vaddr);
118 set_bit(PG_arch_1, &dst->flags);
119 copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr);
a67cc9aa 120 preempt_enable();
a91902db
MF
121}
122
6656920b
CZ
123/*
124 * Any time the kernel writes to a user page cache page, or it is about to
125 * read from a page cache page this routine is called.
126 *
127 */
128
129void flush_dcache_page(struct page *page)
130{
131 struct address_space *mapping = page_mapping(page);
132
133 /*
134 * If we have a mapping but the page is not mapped to user-space
135 * yet, we simply mark this page dirty and defer flushing the
136 * caches until update_mmu().
137 */
138
139 if (mapping && !mapping_mapped(mapping)) {
140 if (!test_bit(PG_arch_1, &page->flags))
141 set_bit(PG_arch_1, &page->flags);
142 return;
143
144 } else {
145
146 unsigned long phys = page_to_phys(page);
147 unsigned long temp = page->index << PAGE_SHIFT;
148 unsigned long alias = !(DCACHE_ALIAS_EQ(temp, phys));
149 unsigned long virt;
150
151 /*
152 * Flush the page in kernel space and user space.
153 * Note that we can omit that step if aliasing is not
154 * an issue, but we do have to synchronize I$ and D$
155 * if we have a mapping.
156 */
157
158 if (!alias && !mapping)
159 return;
160
270eec76
MF
161 virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
162 __flush_invalidate_dcache_page_alias(virt, phys);
6656920b
CZ
163
164 virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK);
165
166 if (alias)
167 __flush_invalidate_dcache_page_alias(virt, phys);
168
169 if (mapping)
170 __invalidate_icache_page_alias(virt, phys);
171 }
172
173 /* There shouldn't be an entry in the cache for this page anymore. */
174}
175
176
177/*
178 * For now, flush the whole cache. FIXME??
179 */
180
f615136c 181void local_flush_cache_range(struct vm_area_struct *vma,
6656920b
CZ
182 unsigned long start, unsigned long end)
183{
184 __flush_invalidate_dcache_all();
185 __invalidate_icache_all();
186}
187
188/*
189 * Remove any entry in the cache for this page.
190 *
191 * Note that this function is only called for user pages, so use the
192 * alias versions of the cache flush functions.
193 */
194
f615136c 195void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
c4c4594b 196 unsigned long pfn)
6656920b
CZ
197{
198 /* Note that we have to use the 'alias' address to avoid multi-hit */
199
200 unsigned long phys = page_to_phys(pfn_to_page(pfn));
201 unsigned long virt = TLBTEMP_BASE_1 + (address & DCACHE_ALIAS_MASK);
202
203 __flush_invalidate_dcache_page_alias(virt, phys);
204 __invalidate_icache_page_alias(virt, phys);
205}
206
6d0f581d 207#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
6656920b
CZ
208
209void
4b3073e1 210update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
6656920b 211{
4b3073e1 212 unsigned long pfn = pte_pfn(*ptep);
6656920b
CZ
213 struct page *page;
214
215 if (!pfn_valid(pfn))
216 return;
217
218 page = pfn_to_page(pfn);
219
220 /* Invalidate old entry in TLBs */
221
f615136c 222 flush_tlb_page(vma, addr);
6656920b 223
6d0f581d 224#if (DCACHE_WAY_SIZE > PAGE_SIZE)
6656920b
CZ
225
226 if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
6656920b 227 unsigned long phys = page_to_phys(page);
270eec76 228 unsigned long tmp;
6656920b 229
270eec76
MF
230 tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK);
231 __flush_invalidate_dcache_page_alias(tmp, phys);
232 tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK);
c4c4594b
CZ
233 __flush_invalidate_dcache_page_alias(tmp, phys);
234 __invalidate_icache_page_alias(tmp, phys);
6656920b
CZ
235
236 clear_bit(PG_arch_1, &page->flags);
237 }
238#else
239 if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
240 && (vma->vm_flags & VM_EXEC) != 0) {
65559100 241 unsigned long paddr = (unsigned long)kmap_atomic(page);
b67360db
CZ
242 __flush_dcache_page(paddr);
243 __invalidate_icache_page(paddr);
6656920b 244 set_bit(PG_arch_1, &page->flags);
65559100 245 kunmap_atomic((void *)paddr);
6656920b
CZ
246 }
247#endif
248}
249
250/*
251 * access_process_vm() has called get_user_pages(), which has done a
252 * flush_dcache_page() on the page.
253 */
254
6d0f581d 255#if (DCACHE_WAY_SIZE > PAGE_SIZE)
6656920b 256
c4c4594b 257void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
6656920b
CZ
258 unsigned long vaddr, void *dst, const void *src,
259 unsigned long len)
260{
261 unsigned long phys = page_to_phys(page);
262 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
263
264 /* Flush and invalidate user page if aliased. */
265
266 if (alias) {
c4c4594b
CZ
267 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
268 __flush_invalidate_dcache_page_alias(t, phys);
6656920b
CZ
269 }
270
271 /* Copy data */
272
273 memcpy(dst, src, len);
274
275 /*
276 * Flush and invalidate kernel page if aliased and synchronize
277 * data and instruction caches for executable pages.
278 */
279
280 if (alias) {
c4c4594b 281 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
6656920b
CZ
282
283 __flush_invalidate_dcache_range((unsigned long) dst, len);
c4c4594b
CZ
284 if ((vma->vm_flags & VM_EXEC) != 0)
285 __invalidate_icache_page_alias(t, phys);
6656920b
CZ
286
287 } else if ((vma->vm_flags & VM_EXEC) != 0) {
288 __flush_dcache_range((unsigned long)dst,len);
289 __invalidate_icache_range((unsigned long) dst, len);
290 }
291}
292
293extern void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
294 unsigned long vaddr, void *dst, const void *src,
295 unsigned long len)
296{
297 unsigned long phys = page_to_phys(page);
298 unsigned long alias = !(DCACHE_ALIAS_EQ(vaddr, phys));
299
300 /*
301 * Flush user page if aliased.
302 * (Note: a simply flush would be sufficient)
303 */
304
305 if (alias) {
c4c4594b
CZ
306 unsigned long t = TLBTEMP_BASE_1 + (vaddr & DCACHE_ALIAS_MASK);
307 __flush_invalidate_dcache_page_alias(t, phys);
6656920b
CZ
308 }
309
310 memcpy(dst, src, len);
311}
312
313#endif