]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
f26b2a56 | 2 | * arch/sh/mm/cache.c |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | |
dfff0fa6 | 5 | * Copyright (C) 2002 - 2009 Paul Mundt |
1da177e4 LT |
6 | * |
7 | * Released under the terms of the GNU GPL v2.0. | |
8 | */ | |
1da177e4 | 9 | #include <linux/mm.h> |
acca4f4d | 10 | #include <linux/init.h> |
52e27782 | 11 | #include <linux/mutex.h> |
e06c4e57 | 12 | #include <linux/fs.h> |
f26b2a56 | 13 | #include <linux/smp.h> |
7747b9a4 PM |
14 | #include <linux/highmem.h> |
15 | #include <linux/module.h> | |
1da177e4 LT |
16 | #include <asm/mmu_context.h> |
17 | #include <asm/cacheflush.h> | |
18 | ||
f26b2a56 PM |
19 | void (*local_flush_cache_all)(void *args) = cache_noop; |
20 | void (*local_flush_cache_mm)(void *args) = cache_noop; | |
21 | void (*local_flush_cache_dup_mm)(void *args) = cache_noop; | |
22 | void (*local_flush_cache_page)(void *args) = cache_noop; | |
23 | void (*local_flush_cache_range)(void *args) = cache_noop; | |
24 | void (*local_flush_dcache_page)(void *args) = cache_noop; | |
25 | void (*local_flush_icache_range)(void *args) = cache_noop; | |
26 | void (*local_flush_icache_page)(void *args) = cache_noop; | |
27 | void (*local_flush_cache_sigtramp)(void *args) = cache_noop; | |
28 | ||
37443ef3 PM |
29 | void (*__flush_wback_region)(void *start, int size); |
30 | void (*__flush_purge_region)(void *start, int size); | |
31 | void (*__flush_invalidate_region)(void *start, int size); | |
32 | ||
37443ef3 PM |
33 | static inline void noop__flush_region(void *start, int size) |
34 | { | |
35 | } | |
36 | ||
ba1789ef PM |
37 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, |
38 | unsigned long vaddr, void *dst, const void *src, | |
39 | unsigned long len) | |
1da177e4 | 40 | { |
0dfae7d5 PM |
41 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && |
42 | !test_bit(PG_dcache_dirty, &page->flags)) { | |
2277ab4a PM |
43 | void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); |
44 | memcpy(vto, src, len); | |
b5eb10ae | 45 | kunmap_coherent(); |
2277ab4a PM |
46 | } else { |
47 | memcpy(dst, src, len); | |
0dfae7d5 PM |
48 | if (boot_cpu_data.dcache.n_aliases) |
49 | set_bit(PG_dcache_dirty, &page->flags); | |
2277ab4a | 50 | } |
ba1789ef PM |
51 | |
52 | if (vma->vm_flags & VM_EXEC) | |
53 | flush_cache_page(vma, vaddr, page_to_pfn(page)); | |
54 | } | |
55 | ||
56 | void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | |
57 | unsigned long vaddr, void *dst, const void *src, | |
58 | unsigned long len) | |
59 | { | |
0dfae7d5 PM |
60 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && |
61 | !test_bit(PG_dcache_dirty, &page->flags)) { | |
2277ab4a PM |
62 | void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); |
63 | memcpy(dst, vfrom, len); | |
b5eb10ae | 64 | kunmap_coherent(); |
2277ab4a PM |
65 | } else { |
66 | memcpy(dst, src, len); | |
0dfae7d5 PM |
67 | if (boot_cpu_data.dcache.n_aliases) |
68 | set_bit(PG_dcache_dirty, &page->flags); | |
2277ab4a | 69 | } |
1da177e4 | 70 | } |
39e688a9 | 71 | |
7747b9a4 PM |
72 | void copy_user_highpage(struct page *to, struct page *from, |
73 | unsigned long vaddr, struct vm_area_struct *vma) | |
74 | { | |
75 | void *vfrom, *vto; | |
76 | ||
7747b9a4 | 77 | vto = kmap_atomic(to, KM_USER1); |
7747b9a4 | 78 | |
0dfae7d5 PM |
79 | if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && |
80 | !test_bit(PG_dcache_dirty, &from->flags)) { | |
2277ab4a PM |
81 | vfrom = kmap_coherent(from, vaddr); |
82 | copy_page(vto, vfrom); | |
b5eb10ae | 83 | kunmap_coherent(); |
2277ab4a PM |
84 | } else { |
85 | vfrom = kmap_atomic(from, KM_USER0); | |
86 | copy_page(vto, vfrom); | |
87 | kunmap_atomic(vfrom, KM_USER0); | |
88 | } | |
89 | ||
90 | if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) | |
7747b9a4 PM |
91 | __flush_wback_region(vto, PAGE_SIZE); |
92 | ||
93 | kunmap_atomic(vto, KM_USER1); | |
94 | /* Make sure this page is cleared on other CPU's too before using it */ | |
95 | smp_wmb(); | |
96 | } | |
97 | EXPORT_SYMBOL(copy_user_highpage); | |
dfff0fa6 PM |
98 | |
99 | void clear_user_highpage(struct page *page, unsigned long vaddr) | |
100 | { | |
101 | void *kaddr = kmap_atomic(page, KM_USER0); | |
102 | ||
103 | clear_page(kaddr); | |
104 | ||
105 | if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) | |
106 | __flush_wback_region(kaddr, PAGE_SIZE); | |
107 | ||
108 | kunmap_atomic(kaddr, KM_USER0); | |
109 | } | |
110 | EXPORT_SYMBOL(clear_user_highpage); | |
9cef7492 PM |
111 | |
112 | void __update_cache(struct vm_area_struct *vma, | |
113 | unsigned long address, pte_t pte) | |
114 | { | |
115 | struct page *page; | |
116 | unsigned long pfn = pte_pfn(pte); | |
117 | ||
118 | if (!boot_cpu_data.dcache.n_aliases) | |
119 | return; | |
120 | ||
121 | page = pfn_to_page(pfn); | |
122 | if (pfn_valid(pfn) && page_mapping(page)) { | |
123 | int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); | |
124 | if (dirty) { | |
125 | unsigned long addr = (unsigned long)page_address(page); | |
126 | ||
127 | if (pages_do_alias(addr, address & PAGE_MASK)) | |
128 | __flush_wback_region((void *)addr, PAGE_SIZE); | |
129 | } | |
130 | } | |
131 | } | |
c0fe478d PM |
132 | |
133 | void __flush_anon_page(struct page *page, unsigned long vmaddr) | |
134 | { | |
135 | unsigned long addr = (unsigned long) page_address(page); | |
136 | ||
137 | if (pages_do_alias(addr, vmaddr)) { | |
138 | if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && | |
139 | !test_bit(PG_dcache_dirty, &page->flags)) { | |
140 | void *kaddr; | |
141 | ||
142 | kaddr = kmap_coherent(page, vmaddr); | |
143 | __flush_wback_region((void *)kaddr, PAGE_SIZE); | |
144 | kunmap_coherent(); | |
145 | } else | |
146 | __flush_wback_region((void *)addr, PAGE_SIZE); | |
147 | } | |
148 | } | |
ecba1060 | 149 | |
f26b2a56 PM |
150 | void flush_cache_all(void) |
151 | { | |
152 | on_each_cpu(local_flush_cache_all, NULL, 1); | |
153 | } | |
154 | ||
155 | void flush_cache_mm(struct mm_struct *mm) | |
156 | { | |
157 | on_each_cpu(local_flush_cache_mm, mm, 1); | |
158 | } | |
159 | ||
160 | void flush_cache_dup_mm(struct mm_struct *mm) | |
161 | { | |
162 | on_each_cpu(local_flush_cache_dup_mm, mm, 1); | |
163 | } | |
164 | ||
165 | void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, | |
166 | unsigned long pfn) | |
167 | { | |
168 | struct flusher_data data; | |
169 | ||
170 | data.vma = vma; | |
171 | data.addr1 = addr; | |
172 | data.addr2 = pfn; | |
173 | ||
174 | on_each_cpu(local_flush_cache_page, (void *)&data, 1); | |
175 | } | |
176 | ||
177 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | |
178 | unsigned long end) | |
179 | { | |
180 | struct flusher_data data; | |
181 | ||
182 | data.vma = vma; | |
183 | data.addr1 = start; | |
184 | data.addr2 = end; | |
185 | ||
186 | on_each_cpu(local_flush_cache_range, (void *)&data, 1); | |
187 | } | |
188 | ||
189 | void flush_dcache_page(struct page *page) | |
190 | { | |
191 | on_each_cpu(local_flush_dcache_page, page, 1); | |
192 | } | |
193 | ||
194 | void flush_icache_range(unsigned long start, unsigned long end) | |
195 | { | |
196 | struct flusher_data data; | |
197 | ||
198 | data.vma = NULL; | |
199 | data.addr1 = start; | |
200 | data.addr2 = end; | |
201 | ||
202 | on_each_cpu(local_flush_icache_range, (void *)&data, 1); | |
203 | } | |
204 | ||
205 | void flush_icache_page(struct vm_area_struct *vma, struct page *page) | |
206 | { | |
207 | /* Nothing uses the VMA, so just pass the struct page along */ | |
208 | on_each_cpu(local_flush_icache_page, page, 1); | |
209 | } | |
210 | ||
211 | void flush_cache_sigtramp(unsigned long address) | |
212 | { | |
213 | on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1); | |
214 | } | |
215 | ||
27d59ec1 PM |
216 | static void compute_alias(struct cache_info *c) |
217 | { | |
218 | c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1); | |
219 | c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0; | |
220 | } | |
221 | ||
222 | static void __init emit_cache_params(void) | |
223 | { | |
224 | printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n", | |
225 | boot_cpu_data.icache.ways, | |
226 | boot_cpu_data.icache.sets, | |
227 | boot_cpu_data.icache.way_incr); | |
228 | printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | |
229 | boot_cpu_data.icache.entry_mask, | |
230 | boot_cpu_data.icache.alias_mask, | |
231 | boot_cpu_data.icache.n_aliases); | |
232 | printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n", | |
233 | boot_cpu_data.dcache.ways, | |
234 | boot_cpu_data.dcache.sets, | |
235 | boot_cpu_data.dcache.way_incr); | |
236 | printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | |
237 | boot_cpu_data.dcache.entry_mask, | |
238 | boot_cpu_data.dcache.alias_mask, | |
239 | boot_cpu_data.dcache.n_aliases); | |
240 | ||
241 | /* | |
242 | * Emit Secondary Cache parameters if the CPU has a probed L2. | |
243 | */ | |
244 | if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) { | |
245 | printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n", | |
246 | boot_cpu_data.scache.ways, | |
247 | boot_cpu_data.scache.sets, | |
248 | boot_cpu_data.scache.way_incr); | |
249 | printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", | |
250 | boot_cpu_data.scache.entry_mask, | |
251 | boot_cpu_data.scache.alias_mask, | |
252 | boot_cpu_data.scache.n_aliases); | |
253 | } | |
254 | } | |
255 | ||
ecba1060 PM |
256 | void __init cpu_cache_init(void) |
257 | { | |
27d59ec1 PM |
258 | compute_alias(&boot_cpu_data.icache); |
259 | compute_alias(&boot_cpu_data.dcache); | |
260 | compute_alias(&boot_cpu_data.scache); | |
261 | ||
37443ef3 PM |
262 | __flush_wback_region = noop__flush_region; |
263 | __flush_purge_region = noop__flush_region; | |
264 | __flush_invalidate_region = noop__flush_region; | |
265 | ||
109b44a8 PM |
266 | if (boot_cpu_data.family == CPU_FAMILY_SH2) { |
267 | extern void __weak sh2_cache_init(void); | |
268 | ||
269 | sh2_cache_init(); | |
270 | } | |
271 | ||
a58e1a2a PM |
272 | if (boot_cpu_data.family == CPU_FAMILY_SH2A) { |
273 | extern void __weak sh2a_cache_init(void); | |
274 | ||
275 | sh2a_cache_init(); | |
276 | } | |
277 | ||
79f1c9da PM |
278 | if (boot_cpu_data.family == CPU_FAMILY_SH3) { |
279 | extern void __weak sh3_cache_init(void); | |
280 | ||
281 | sh3_cache_init(); | |
0d051d90 PM |
282 | |
283 | if ((boot_cpu_data.type == CPU_SH7705) && | |
284 | (boot_cpu_data.dcache.sets == 512)) { | |
285 | extern void __weak sh7705_cache_init(void); | |
286 | ||
287 | sh7705_cache_init(); | |
288 | } | |
79f1c9da PM |
289 | } |
290 | ||
ecba1060 PM |
291 | if ((boot_cpu_data.family == CPU_FAMILY_SH4) || |
292 | (boot_cpu_data.family == CPU_FAMILY_SH4A) || | |
293 | (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { | |
294 | extern void __weak sh4_cache_init(void); | |
295 | ||
296 | sh4_cache_init(); | |
297 | } | |
27d59ec1 | 298 | |
2b431518 PM |
299 | if (boot_cpu_data.family == CPU_FAMILY_SH5) { |
300 | extern void __weak sh5_cache_init(void); | |
301 | ||
302 | sh5_cache_init(); | |
303 | } | |
304 | ||
27d59ec1 | 305 | emit_cache_params(); |
ecba1060 | 306 | } |