]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/arc/mm/cache_arc700.c
ARC: Disable caches in early boot if so configured
[mirror_ubuntu-bionic-kernel.git] / arch / arc / mm / cache_arc700.c
CommitLineData
95d6976d
VG
1/*
2 * ARC700 VIPT Cache Management
3 *
4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
11 * -flush_cache_dup_mm (fork)
12 * -likewise for flush_cache_mm (exit/execve)
13 * -likewise for flush_cache_range,flush_cache_page (munmap, exit, COW-break)
14 *
15 * vineetg: Apr 2011
16 * -Now that MMU can support larger pg sz (16K), the determiniation of
17 * aliasing shd not be based on assumption of 8k pg
18 *
19 * vineetg: Mar 2011
20 * -optimised version of flush_icache_range( ) for making I/D coherent
21 * when vaddr is available (agnostic of num of aliases)
22 *
23 * vineetg: Mar 2011
24 * -Added documentation about I-cache aliasing on ARC700 and the way it
25 * was handled up until MMU V2.
26 * -Spotted a three year old bug when killing the 4 aliases, which needs
27 * bottom 2 bits, so we need to do paddr | {0x00, 0x01, 0x02, 0x03}
28 * instead of paddr | {0x00, 0x01, 0x10, 0x11}
29 * (Rajesh you owe me one now)
30 *
31 * vineetg: Dec 2010
32 * -Off-by-one error when computing num_of_lines to flush
33 * This broke signal handling with bionic which uses synthetic sigret stub
34 *
35 * vineetg: Mar 2010
36 * -GCC can't generate ZOL for core cache flush loops.
37 * Conv them into iterations based as opposed to while (start < end) types
38 *
39 * Vineetg: July 2009
40 * -In I-cache flush routine we used to chk for aliasing for every line INV.
41 * Instead now we setup routines per cache geometry and invoke them
42 * via function pointers.
43 *
44 * Vineetg: Jan 2009
45 * -Cache Line flush routines used to flush an extra line beyond end addr
46 * because check was while (end >= start) instead of (end > start)
47 * =Some call sites had to work around by doing -1, -4 etc to end param
48 * =Some callers didnt care. This was spec bad in case of INV routines
49 * which would discard valid data (cause of the horrible ext2 bug
50 * in ARC IDE driver)
51 *
52 * vineetg: June 11th 2008: Fixed flush_icache_range( )
53 * -Since ARC700 caches are not coherent (I$ doesnt snoop D$) both need
54 * to be flushed, which it was not doing.
55 * -load_module( ) passes vmalloc addr (Kernel Virtual Addr) to the API,
56 * however ARC cache maintenance OPs require PHY addr. Thus need to do
57 * vmalloc_to_phy.
58 * -Also added optimisation there, that for range > PAGE SIZE we flush the
59 * entire cache in one shot rather than line by line. For e.g. a module
60 * with Code sz 600k, old code flushed 600k worth of cache (line-by-line),
61 * while cache is only 16 or 32k.
62 */
63
64#include <linux/module.h>
65#include <linux/mm.h>
66#include <linux/sched.h>
67#include <linux/cache.h>
68#include <linux/mmu_context.h>
69#include <linux/syscalls.h>
70#include <linux/uaccess.h>
4102b533 71#include <linux/pagemap.h>
95d6976d
VG
72#include <asm/cacheflush.h>
73#include <asm/cachectl.h>
74#include <asm/setup.h>
75
c3441edd 76char *arc_cache_mumbojumbo(int c, char *buf, int len)
af617428
VG
77{
78 int n = 0;
af617428
VG
79
80#define PR_CACHE(p, enb, str) \
81{ \
82 if (!(p)->ver) \
83 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
84 else \
85 n += scnprintf(buf + n, len - n, \
86 str"\t\t: (%uK) VIPT, %dway set-asc, %ub Line %s\n", \
87 TO_KB((p)->sz), (p)->assoc, (p)->line_len, \
88 enb ? "" : "DISABLED (kernel-build)"); \
89}
90
8235703e
VG
91 PR_CACHE(&cpuinfo_arc700[c].icache, IS_ENABLED(CONFIG_ARC_HAS_ICACHE),
92 "I-Cache");
93 PR_CACHE(&cpuinfo_arc700[c].dcache, IS_ENABLED(CONFIG_ARC_HAS_DCACHE),
94 "D-Cache");
af617428
VG
95
96 return buf;
97}
98
95d6976d
VG
99/*
100 * Read the Cache Build Confuration Registers, Decode them and save into
101 * the cpuinfo structure for later use.
102 * No Validation done here, simply read/convert the BCRs
103 */
ce759956 104void read_decode_cache_bcr(void)
95d6976d 105{
95d6976d
VG
106 struct cpuinfo_arc_cache *p_ic, *p_dc;
107 unsigned int cpu = smp_processor_id();
da1677b0
VG
108 struct bcr_cache {
109#ifdef CONFIG_CPU_BIG_ENDIAN
110 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
111#else
112 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
113#endif
114 } ibcr, dbcr;
95d6976d
VG
115
116 p_ic = &cpuinfo_arc700[cpu].icache;
117 READ_BCR(ARC_REG_IC_BCR, ibcr);
118
30499186
VG
119 BUG_ON(ibcr.config != 3);
120 p_ic->assoc = 2; /* Fixed to 2w set assoc */
95d6976d
VG
121 p_ic->line_len = 8 << ibcr.line_len;
122 p_ic->sz = 0x200 << ibcr.sz;
123 p_ic->ver = ibcr.ver;
124
125 p_dc = &cpuinfo_arc700[cpu].dcache;
126 READ_BCR(ARC_REG_DC_BCR, dbcr);
127
30499186
VG
128 BUG_ON(dbcr.config != 2);
129 p_dc->assoc = 4; /* Fixed to 4w set assoc */
95d6976d
VG
130 p_dc->line_len = 16 << dbcr.line_len;
131 p_dc->sz = 0x200 << dbcr.sz;
132 p_dc->ver = dbcr.ver;
133}
134
135/*
136 * 1. Validate the Cache Geomtery (compile time config matches hardware)
137 * 2. If I-cache suffers from aliasing, setup work arounds (difft flush rtn)
138 * (aliasing D-cache configurations are not supported YET)
139 * 3. Enable the Caches, setup default flush mode for D-Cache
140 * 3. Calculate the SHMLBA used by user space
141 */
ce759956 142void arc_cache_init(void)
95d6976d 143{
ef680cdc
VG
144 unsigned int __maybe_unused cpu = smp_processor_id();
145 struct cpuinfo_arc_cache __maybe_unused *ic, __maybe_unused *dc;
af617428
VG
146 char str[256];
147
148 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
95d6976d 149
d626f547 150#ifdef CONFIG_ARC_HAS_ICACHE
ef680cdc
VG
151 ic = &cpuinfo_arc700[cpu].icache;
152 if (ic->ver) {
153 if (ic->line_len != L1_CACHE_BYTES)
154 panic("ICache line [%d] != kernel Config [%d]",
155 ic->line_len, L1_CACHE_BYTES);
156
157 if (ic->ver != CONFIG_ARC_MMU_VER)
158 panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
159 ic->ver, CONFIG_ARC_MMU_VER);
160 }
95d6976d
VG
161#endif
162
d626f547 163#ifdef CONFIG_ARC_HAS_DCACHE
ef680cdc
VG
164 dc = &cpuinfo_arc700[cpu].dcache;
165 if (dc->ver) {
166 unsigned int dcache_does_alias;
167
168 if (dc->line_len != L1_CACHE_BYTES)
169 panic("DCache line [%d] != kernel Config [%d]",
170 dc->line_len, L1_CACHE_BYTES);
171
172 /* check for D-Cache aliasing */
173 dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE;
174
175 if (dcache_does_alias && !cache_is_vipt_aliasing())
176 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
177 else if (!dcache_does_alias && cache_is_vipt_aliasing())
178 panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n");
179 }
95d6976d 180#endif
95d6976d
VG
181}
182
183#define OP_INV 0x1
184#define OP_FLUSH 0x2
185#define OP_FLUSH_N_INV 0x3
bd12976c
VG
186#define OP_INV_IC 0x4
187
188/*
189 * Common Helper for Line Operations on {I,D}-Cache
190 */
191static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
192 unsigned long sz, const int cacheop)
193{
194 unsigned int aux_cmd, aux_tag;
195 int num_lines;
d4599baf 196 const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
bd12976c
VG
197
198 if (cacheop == OP_INV_IC) {
199 aux_cmd = ARC_REG_IC_IVIL;
d7538636 200#if (CONFIG_ARC_MMU_VER > 2)
bd12976c 201 aux_tag = ARC_REG_IC_PTAG;
d7538636 202#endif
bd12976c
VG
203 }
204 else {
205 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
206 aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
d7538636 207#if (CONFIG_ARC_MMU_VER > 2)
bd12976c 208 aux_tag = ARC_REG_DC_PTAG;
d7538636 209#endif
bd12976c
VG
210 }
211
212 /* Ensure we properly floor/ceil the non-line aligned/sized requests
213 * and have @paddr - aligned to cache line and integral @num_lines.
214 * This however can be avoided for page sized since:
215 * -@paddr will be cache-line aligned already (being page aligned)
216 * -@sz will be integral multiple of line size (being page sized).
217 */
d4599baf 218 if (!full_page_op) {
bd12976c
VG
219 sz += paddr & ~CACHE_LINE_MASK;
220 paddr &= CACHE_LINE_MASK;
221 vaddr &= CACHE_LINE_MASK;
222 }
223
224 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
225
226#if (CONFIG_ARC_MMU_VER <= 2)
227 /* MMUv2 and before: paddr contains stuffed vaddrs bits */
228 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
d4599baf
VG
229#else
230 /* if V-P const for loop, PTAG can be written once outside loop */
231 if (full_page_op)
b053940d 232 write_aux_reg(aux_tag, paddr);
bd12976c
VG
233#endif
234
235 while (num_lines-- > 0) {
236#if (CONFIG_ARC_MMU_VER > 2)
237 /* MMUv3, cache ops require paddr seperately */
d4599baf
VG
238 if (!full_page_op) {
239 write_aux_reg(aux_tag, paddr);
240 paddr += L1_CACHE_BYTES;
241 }
bd12976c
VG
242
243 write_aux_reg(aux_cmd, vaddr);
244 vaddr += L1_CACHE_BYTES;
245#else
b053940d 246 write_aux_reg(aux_cmd, paddr);
bd12976c 247 paddr += L1_CACHE_BYTES;
d4599baf 248#endif
bd12976c
VG
249 }
250}
95d6976d
VG
251
252#ifdef CONFIG_ARC_HAS_DCACHE
253
254/***************************************************************
255 * Machine specific helpers for Entire D-Cache or Per Line ops
256 */
257
258static inline void wait_for_flush(void)
259{
260 while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS)
261 ;
262}
263
264/*
265 * Operation on Entire D-Cache
266 * @cacheop = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
267 * Note that constant propagation ensures all the checks are gone
268 * in generated code
269 */
270static inline void __dc_entire_op(const int cacheop)
271{
336e199e 272 unsigned int tmp = tmp;
95d6976d
VG
273 int aux;
274
95d6976d
VG
275 if (cacheop == OP_FLUSH_N_INV) {
276 /* Dcache provides 2 cmd: FLUSH or INV
277 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
278 * flush-n-inv is achieved by INV cmd but with IM=1
279 * Default INV sub-mode is DISCARD, which needs to be toggled
280 */
281 tmp = read_aux_reg(ARC_REG_DC_CTRL);
282 write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
283 }
284
285 if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
286 aux = ARC_REG_DC_IVDC;
287 else
288 aux = ARC_REG_DC_FLSH;
289
290 write_aux_reg(aux, 0x1);
291
292 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
293 wait_for_flush();
294
295 /* Switch back the DISCARD ONLY Invalidate mode */
296 if (cacheop == OP_FLUSH_N_INV)
297 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
95d6976d
VG
298}
299
4102b533 300/* For kernel mappings cache operation: index is same as paddr */
6ec18a81
VG
301#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
302
95d6976d
VG
303/*
304 * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
305 */
6ec18a81
VG
306static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
307 unsigned long sz, const int cacheop)
95d6976d
VG
308{
309 unsigned long flags, tmp = tmp;
95d6976d
VG
310
311 local_irq_save(flags);
312
313 if (cacheop == OP_FLUSH_N_INV) {
314 /*
315 * Dcache provides 2 cmd: FLUSH or INV
316 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
317 * flush-n-inv is achieved by INV cmd but with IM=1
318 * Default INV sub-mode is DISCARD, which needs to be toggled
319 */
320 tmp = read_aux_reg(ARC_REG_DC_CTRL);
321 write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
322 }
323
bd12976c 324 __cache_line_loop(paddr, vaddr, sz, cacheop);
95d6976d
VG
325
326 if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
327 wait_for_flush();
328
329 /* Switch back the DISCARD ONLY Invalidate mode */
330 if (cacheop == OP_FLUSH_N_INV)
331 write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
332
333 local_irq_restore(flags);
334}
335
336#else
337
338#define __dc_entire_op(cacheop)
6ec18a81
VG
339#define __dc_line_op(paddr, vaddr, sz, cacheop)
340#define __dc_line_op_k(paddr, sz, cacheop)
95d6976d
VG
341
342#endif /* CONFIG_ARC_HAS_DCACHE */
343
344
345#ifdef CONFIG_ARC_HAS_ICACHE
346
347/*
348 * I-Cache Aliasing in ARC700 VIPT caches
349 *
7f250a0f
VG
350 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
351 * The orig Cache Management Module "CDU" only required paddr to invalidate a
352 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
353 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
354 * the exact same line.
95d6976d 355 *
7f250a0f
VG
356 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
357 * paddr alone could not be used to correctly index the cache.
95d6976d
VG
358 *
359 * ------------------
360 * MMU v1/v2 (Fixed Page Size 8k)
361 * ------------------
362 * The solution was to provide CDU with these additonal vaddr bits. These
7f250a0f
VG
363 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
364 * standard page size of 8k.
95d6976d
VG
365 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
366 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
367 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
368 * represent the offset within cache-line. The adv of using this "clumsy"
7f250a0f
VG
369 * interface for additional info was no new reg was needed in CDU programming
370 * model.
95d6976d
VG
371 *
372 * 17:13 represented the max num of bits passable, actual bits needed were
373 * fewer, based on the num-of-aliases possible.
374 * -for 2 alias possibility, only bit 13 needed (32K cache)
375 * -for 4 alias possibility, bits 14:13 needed (64K cache)
376 *
95d6976d
VG
377 * ------------------
378 * MMU v3
379 * ------------------
7f250a0f
VG
380 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
381 * only support 8k (default), 16k and 4k.
95d6976d
VG
382 * However from hardware perspective, smaller page sizes aggrevate aliasing
383 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
384 * the existing scheme of piggybacking won't work for certain configurations.
385 * Two new registers IC_PTAG and DC_PTAG inttoduced.
386 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
387 */
388
389/***********************************************************
7f250a0f 390 * Machine specific helper for per line I-Cache invalidate.
95d6976d 391 */
a690984d 392static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
7f250a0f 393 unsigned long sz)
95d6976d
VG
394{
395 unsigned long flags;
95d6976d
VG
396
397 local_irq_save(flags);
bd12976c 398 __cache_line_loop(paddr, vaddr, sz, OP_INV_IC);
95d6976d
VG
399 local_irq_restore(flags);
400}
401
336e199e
VG
402static inline void __ic_entire_inv(void)
403{
404 write_aux_reg(ARC_REG_IC_IVIC, 1);
405 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
406}
407
95d6976d
VG
408#else
409
336e199e 410#define __ic_entire_inv()
95d6976d
VG
411#define __ic_line_inv_vaddr(pstart, vstart, sz)
412
413#endif /* CONFIG_ARC_HAS_ICACHE */
414
415
416/***********************************************************
417 * Exported APIs
418 */
419
4102b533
VG
420/*
421 * Handle cache congruency of kernel and userspace mappings of page when kernel
422 * writes-to/reads-from
423 *
424 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
425 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
426 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
427 * -In SMP, if hardware caches are coherent
428 *
429 * There's a corollary case, where kernel READs from a userspace mapped page.
430 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
431 */
95d6976d
VG
432void flush_dcache_page(struct page *page)
433{
4102b533
VG
434 struct address_space *mapping;
435
436 if (!cache_is_vipt_aliasing()) {
2ed21dae 437 clear_bit(PG_dc_clean, &page->flags);
4102b533
VG
438 return;
439 }
440
441 /* don't handle anon pages here */
442 mapping = page_mapping(page);
443 if (!mapping)
444 return;
445
446 /*
447 * pagecache page, file not yet mapped to userspace
448 * Make a note that K-mapping is dirty
449 */
450 if (!mapping_mapped(mapping)) {
2ed21dae 451 clear_bit(PG_dc_clean, &page->flags);
4102b533
VG
452 } else if (page_mapped(page)) {
453
454 /* kernel reading from page with U-mapping */
455 void *paddr = page_address(page);
456 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
457
458 if (addr_not_cache_congruent(paddr, vaddr))
459 __flush_dcache_page(paddr, vaddr);
460 }
95d6976d
VG
461}
462EXPORT_SYMBOL(flush_dcache_page);
463
464
465void dma_cache_wback_inv(unsigned long start, unsigned long sz)
466{
6ec18a81 467 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
95d6976d
VG
468}
469EXPORT_SYMBOL(dma_cache_wback_inv);
470
471void dma_cache_inv(unsigned long start, unsigned long sz)
472{
6ec18a81 473 __dc_line_op_k(start, sz, OP_INV);
95d6976d
VG
474}
475EXPORT_SYMBOL(dma_cache_inv);
476
477void dma_cache_wback(unsigned long start, unsigned long sz)
478{
6ec18a81 479 __dc_line_op_k(start, sz, OP_FLUSH);
95d6976d
VG
480}
481EXPORT_SYMBOL(dma_cache_wback);
482
483/*
7586bf72
VG
484 * This is API for making I/D Caches consistent when modifying
485 * kernel code (loadable modules, kprobes, kgdb...)
95d6976d
VG
486 * This is called on insmod, with kernel virtual address for CODE of
487 * the module. ARC cache maintenance ops require PHY address thus we
488 * need to convert vmalloc addr to PHY addr
489 */
490void flush_icache_range(unsigned long kstart, unsigned long kend)
491{
492 unsigned int tot_sz, off, sz;
493 unsigned long phy, pfn;
95d6976d
VG
494
495 /* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */
496
497 /* This is not the right API for user virtual address */
498 if (kstart < TASK_SIZE) {
499 BUG_ON("Flush icache range for user virtual addr space");
500 return;
501 }
502
503 /* Shortcut for bigger flush ranges.
504 * Here we don't care if this was kernel virtual or phy addr
505 */
506 tot_sz = kend - kstart;
507 if (tot_sz > PAGE_SIZE) {
508 flush_cache_all();
509 return;
510 }
511
512 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
513 if (likely(kstart > PAGE_OFFSET)) {
7586bf72
VG
514 /*
515 * The 2nd arg despite being paddr will be used to index icache
516 * This is OK since no alternate virtual mappings will exist
517 * given the callers for this case: kprobe/kgdb in built-in
518 * kernel code only.
519 */
94bad1af 520 __sync_icache_dcache(kstart, kstart, kend - kstart);
95d6976d
VG
521 return;
522 }
523
524 /*
525 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
526 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
527 * handling of kernel vaddr.
528 *
529 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
530 * it still needs to handle a 2 page scenario, where the range
531 * straddles across 2 virtual pages and hence need for loop
532 */
533 while (tot_sz > 0) {
534 off = kstart % PAGE_SIZE;
535 pfn = vmalloc_to_pfn((void *)kstart);
536 phy = (pfn << PAGE_SHIFT) + off;
537 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
94bad1af 538 __sync_icache_dcache(phy, kstart, sz);
95d6976d
VG
539 kstart += sz;
540 tot_sz -= sz;
541 }
542}
543
544/*
94bad1af
VG
545 * General purpose helper to make I and D cache lines consistent.
546 * @paddr is phy addr of region
4b06ff35
VG
547 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
548 * However in one instance, when called by kprobe (for a breakpt in
94bad1af
VG
549 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
550 * use a paddr to index the cache (despite VIPT). This is fine since since a
4b06ff35
VG
551 * builtin kernel page will not have any virtual mappings.
552 * kprobe on loadable module will be kernel vaddr.
95d6976d 553 */
94bad1af 554void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
95d6976d 555{
94bad1af
VG
556 unsigned long flags;
557
558 local_irq_save(flags);
559 __ic_line_inv_vaddr(paddr, vaddr, len);
f538881c 560 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
94bad1af 561 local_irq_restore(flags);
95d6976d
VG
562}
563
24603fdd
VG
564/* wrapper to compile time eliminate alignment checks in flush loop */
565void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
95d6976d 566{
24603fdd 567 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
95d6976d
VG
568}
569
6ec18a81
VG
570/*
571 * wrapper to clearout kernel or userspace mappings of a page
572 * For kernel mappings @vaddr == @paddr
573 */
de2a852c 574void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr)
eacd0e95 575{
6ec18a81 576 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
eacd0e95
VG
577}
578
95d6976d
VG
579noinline void flush_cache_all(void)
580{
581 unsigned long flags;
582
583 local_irq_save(flags);
584
336e199e 585 __ic_entire_inv();
95d6976d
VG
586 __dc_entire_op(OP_FLUSH_N_INV);
587
588 local_irq_restore(flags);
589
590}
591
4102b533
VG
592#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
593
594void flush_cache_mm(struct mm_struct *mm)
595{
596 flush_cache_all();
597}
598
599void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
600 unsigned long pfn)
601{
602 unsigned int paddr = pfn << PAGE_SHIFT;
603
5971bc71
VG
604 u_vaddr &= PAGE_MASK;
605
606 ___flush_dcache_page(paddr, u_vaddr);
607
608 if (vma->vm_flags & VM_EXEC)
609 __inv_icache_page(paddr, u_vaddr);
4102b533
VG
610}
611
612void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
613 unsigned long end)
614{
615 flush_cache_all();
616}
617
7bb66f6e
VG
618void flush_anon_page(struct vm_area_struct *vma, struct page *page,
619 unsigned long u_vaddr)
620{
621 /* TBD: do we really need to clear the kernel mapping */
622 __flush_dcache_page(page_address(page), u_vaddr);
623 __flush_dcache_page(page_address(page), page_address(page));
624
625}
626
627#endif
628
4102b533
VG
629void copy_user_highpage(struct page *to, struct page *from,
630 unsigned long u_vaddr, struct vm_area_struct *vma)
631{
632 void *kfrom = page_address(from);
633 void *kto = page_address(to);
634 int clean_src_k_mappings = 0;
635
636 /*
637 * If SRC page was already mapped in userspace AND it's U-mapping is
638 * not congruent with K-mapping, sync former to physical page so that
639 * K-mapping in memcpy below, sees the right data
640 *
641 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
642 * equally valid for SRC page as well
643 */
644 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
645 __flush_dcache_page(kfrom, u_vaddr);
646 clean_src_k_mappings = 1;
647 }
648
649 copy_page(kto, kfrom);
650
651 /*
652 * Mark DST page K-mapping as dirty for a later finalization by
653 * update_mmu_cache(). Although the finalization could have been done
654 * here as well (given that both vaddr/paddr are available).
655 * But update_mmu_cache() already has code to do that for other
656 * non copied user pages (e.g. read faults which wire in pagecache page
657 * directly).
658 */
2ed21dae 659 clear_bit(PG_dc_clean, &to->flags);
4102b533
VG
660
661 /*
662 * if SRC was already usermapped and non-congruent to kernel mapping
663 * sync the kernel mapping back to physical page
664 */
665 if (clean_src_k_mappings) {
666 __flush_dcache_page(kfrom, kfrom);
2ed21dae 667 set_bit(PG_dc_clean, &from->flags);
4102b533 668 } else {
2ed21dae 669 clear_bit(PG_dc_clean, &from->flags);
4102b533
VG
670 }
671}
672
673void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
674{
675 clear_page(to);
2ed21dae 676 clear_bit(PG_dc_clean, &page->flags);
4102b533
VG
677}
678
4102b533 679
95d6976d
VG
680/**********************************************************************
681 * Explicit Cache flush request from user space via syscall
682 * Needed for JITs which generate code on the fly
683 */
684SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
685{
686 /* TBD: optimize this */
687 flush_cache_all();
688 return 0;
689}