4 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/cache.h>
16 #include <linux/mmu_context.h>
17 #include <linux/syscalls.h>
18 #include <linux/uaccess.h>
19 #include <linux/pagemap.h>
20 #include <asm/cacheflush.h>
21 #include <asm/cachectl.h>
22 #include <asm/setup.h>
24 static int l2_line_sz
;
25 static int ioc_exists
;
26 int slc_enable
= 1, ioc_enable
= 0;
27 unsigned long perip_base
= ARC_UNCACHED_ADDR_SPACE
; /* legacy value for boot */
28 unsigned long perip_end
= 0xFFFFFFFF; /* legacy value */
30 void (*_cache_line_loop_ic_fn
)(phys_addr_t paddr
, unsigned long vaddr
,
31 unsigned long sz
, const int cacheop
);
33 void (*__dma_cache_wback_inv
)(phys_addr_t start
, unsigned long sz
);
34 void (*__dma_cache_inv
)(phys_addr_t start
, unsigned long sz
);
35 void (*__dma_cache_wback
)(phys_addr_t start
, unsigned long sz
);
37 char *arc_cache_mumbojumbo(int c
, char *buf
, int len
)
40 struct cpuinfo_arc_cache
*p
;
42 #define PR_CACHE(p, cfg, str) \
44 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
46 n += scnprintf(buf + n, len - n, \
47 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
48 (p)->sz_k, (p)->assoc, (p)->line_len, \
49 (p)->vipt ? "VIPT" : "PIPT", \
50 (p)->alias ? " aliasing" : "", \
53 PR_CACHE(&cpuinfo_arc700
[c
].icache
, CONFIG_ARC_HAS_ICACHE
, "I-Cache");
54 PR_CACHE(&cpuinfo_arc700
[c
].dcache
, CONFIG_ARC_HAS_DCACHE
, "D-Cache");
56 p
= &cpuinfo_arc700
[c
].slc
;
58 n
+= scnprintf(buf
+ n
, len
- n
,
59 "SLC\t\t: %uK, %uB Line%s\n",
60 p
->sz_k
, p
->line_len
, IS_USED_RUN(slc_enable
));
62 n
+= scnprintf(buf
+ n
, len
- n
, "Peripherals\t: %#lx%s%s\n",
64 IS_AVAIL3(ioc_exists
, ioc_enable
, ", IO-Coherency "));
70 * Read the Cache Build Confuration Registers, Decode them and save into
71 * the cpuinfo structure for later use.
72 * No Validation done here, simply read/convert the BCRs
74 static void read_decode_cache_bcr_arcv2(int cpu
)
76 struct cpuinfo_arc_cache
*p_slc
= &cpuinfo_arc700
[cpu
].slc
;
77 struct bcr_generic sbcr
;
80 #ifdef CONFIG_CPU_BIG_ENDIAN
81 unsigned int pad
:24, way
:2, lsz
:2, sz
:4;
83 unsigned int sz
:4, lsz
:2, way
:2, pad
:24;
87 struct bcr_clust_cfg
{
88 #ifdef CONFIG_CPU_BIG_ENDIAN
89 unsigned int pad
:7, c
:1, num_entries
:8, num_cores
:8, ver
:8;
91 unsigned int ver
:8, num_cores
:8, num_entries
:8, c
:1, pad
:7;
96 #ifdef CONFIG_CPU_BIG_ENDIAN
97 unsigned int start
:4, limit
:4, pad
:22, order
:1, disable
:1;
99 unsigned int disable
:1, order
:1, pad
:22, limit
:4, start
:4;
104 READ_BCR(ARC_REG_SLC_BCR
, sbcr
);
106 READ_BCR(ARC_REG_SLC_CFG
, slc_cfg
);
107 p_slc
->sz_k
= 128 << slc_cfg
.sz
;
108 l2_line_sz
= p_slc
->line_len
= (slc_cfg
.lsz
== 0) ? 128 : 64;
111 READ_BCR(ARC_REG_CLUSTER_BCR
, cbcr
);
117 /* HS 2.0 didn't have AUX_VOL */
118 if (cpuinfo_arc700
[cpu
].core
.family
> 0x51) {
119 READ_BCR(AUX_VOL
, vol
);
120 perip_base
= vol
.start
<< 28;
121 /* HS 3.0 has limit and strict-ordering fields */
122 if (cpuinfo_arc700
[cpu
].core
.family
> 0x52)
123 perip_end
= (vol
.limit
<< 28) - 1;
127 void read_decode_cache_bcr(void)
129 struct cpuinfo_arc_cache
*p_ic
, *p_dc
;
130 unsigned int cpu
= smp_processor_id();
132 #ifdef CONFIG_CPU_BIG_ENDIAN
133 unsigned int pad
:12, line_len
:4, sz
:4, config
:4, ver
:8;
135 unsigned int ver
:8, config
:4, sz
:4, line_len
:4, pad
:12;
139 p_ic
= &cpuinfo_arc700
[cpu
].icache
;
140 READ_BCR(ARC_REG_IC_BCR
, ibcr
);
146 BUG_ON(ibcr
.config
!= 3);
147 p_ic
->assoc
= 2; /* Fixed to 2w set assoc */
148 } else if (ibcr
.ver
>= 4) {
149 p_ic
->assoc
= 1 << ibcr
.config
; /* 1,2,4,8 */
152 p_ic
->line_len
= 8 << ibcr
.line_len
;
153 p_ic
->sz_k
= 1 << (ibcr
.sz
- 1);
155 p_ic
->alias
= p_ic
->sz_k
/p_ic
->assoc
/TO_KB(PAGE_SIZE
) > 1;
158 p_dc
= &cpuinfo_arc700
[cpu
].dcache
;
159 READ_BCR(ARC_REG_DC_BCR
, dbcr
);
165 BUG_ON(dbcr
.config
!= 2);
166 p_dc
->assoc
= 4; /* Fixed to 4w set assoc */
168 p_dc
->alias
= p_dc
->sz_k
/p_dc
->assoc
/TO_KB(PAGE_SIZE
) > 1;
169 } else if (dbcr
.ver
>= 4) {
170 p_dc
->assoc
= 1 << dbcr
.config
; /* 1,2,4,8 */
172 p_dc
->alias
= 0; /* PIPT so can't VIPT alias */
175 p_dc
->line_len
= 16 << dbcr
.line_len
;
176 p_dc
->sz_k
= 1 << (dbcr
.sz
- 1);
180 read_decode_cache_bcr_arcv2(cpu
);
184 * Line Operation on {I,D}-Cache
189 #define OP_FLUSH_N_INV 0x3
190 #define OP_INV_IC 0x4
193 * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
195 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
196 * The orig Cache Management Module "CDU" only required paddr to invalidate a
197 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
198 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
199 * the exact same line.
201 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
202 * paddr alone could not be used to correctly index the cache.
205 * MMU v1/v2 (Fixed Page Size 8k)
207 * The solution was to provide CDU with these additonal vaddr bits. These
208 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
209 * standard page size of 8k.
210 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
211 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
212 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
213 * represent the offset within cache-line. The adv of using this "clumsy"
214 * interface for additional info was no new reg was needed in CDU programming
217 * 17:13 represented the max num of bits passable, actual bits needed were
218 * fewer, based on the num-of-aliases possible.
219 * -for 2 alias possibility, only bit 13 needed (32K cache)
220 * -for 4 alias possibility, bits 14:13 needed (64K cache)
225 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
226 * only support 8k (default), 16k and 4k.
227 * However from hardware perspective, smaller page sizes aggravate aliasing
228 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
229 * the existing scheme of piggybacking won't work for certain configurations.
230 * Two new registers IC_PTAG and DC_PTAG inttoduced.
231 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
235 void __cache_line_loop_v2(phys_addr_t paddr
, unsigned long vaddr
,
236 unsigned long sz
, const int op
)
238 unsigned int aux_cmd
;
240 const int full_page
= __builtin_constant_p(sz
) && sz
== PAGE_SIZE
;
242 if (op
== OP_INV_IC
) {
243 aux_cmd
= ARC_REG_IC_IVIL
;
245 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
246 aux_cmd
= op
& OP_INV
? ARC_REG_DC_IVDL
: ARC_REG_DC_FLDL
;
249 /* Ensure we properly floor/ceil the non-line aligned/sized requests
250 * and have @paddr - aligned to cache line and integral @num_lines.
251 * This however can be avoided for page sized since:
252 * -@paddr will be cache-line aligned already (being page aligned)
253 * -@sz will be integral multiple of line size (being page sized).
256 sz
+= paddr
& ~CACHE_LINE_MASK
;
257 paddr
&= CACHE_LINE_MASK
;
258 vaddr
&= CACHE_LINE_MASK
;
261 num_lines
= DIV_ROUND_UP(sz
, L1_CACHE_BYTES
);
263 /* MMUv2 and before: paddr contains stuffed vaddrs bits */
264 paddr
|= (vaddr
>> PAGE_SHIFT
) & 0x1F;
266 while (num_lines
-- > 0) {
267 write_aux_reg(aux_cmd
, paddr
);
268 paddr
+= L1_CACHE_BYTES
;
273 * For ARC700 MMUv3 I-cache and D-cache flushes
274 * Also reused for HS38 aliasing I-cache configuration
277 void __cache_line_loop_v3(phys_addr_t paddr
, unsigned long vaddr
,
278 unsigned long sz
, const int op
)
280 unsigned int aux_cmd
, aux_tag
;
282 const int full_page
= __builtin_constant_p(sz
) && sz
== PAGE_SIZE
;
284 if (op
== OP_INV_IC
) {
285 aux_cmd
= ARC_REG_IC_IVIL
;
286 aux_tag
= ARC_REG_IC_PTAG
;
288 aux_cmd
= op
& OP_INV
? ARC_REG_DC_IVDL
: ARC_REG_DC_FLDL
;
289 aux_tag
= ARC_REG_DC_PTAG
;
292 /* Ensure we properly floor/ceil the non-line aligned/sized requests
293 * and have @paddr - aligned to cache line and integral @num_lines.
294 * This however can be avoided for page sized since:
295 * -@paddr will be cache-line aligned already (being page aligned)
296 * -@sz will be integral multiple of line size (being page sized).
299 sz
+= paddr
& ~CACHE_LINE_MASK
;
300 paddr
&= CACHE_LINE_MASK
;
301 vaddr
&= CACHE_LINE_MASK
;
303 num_lines
= DIV_ROUND_UP(sz
, L1_CACHE_BYTES
);
306 * MMUv3, cache ops require paddr in PTAG reg
307 * if V-P const for loop, PTAG can be written once outside loop
310 write_aux_reg(aux_tag
, paddr
);
313 * This is technically for MMU v4, using the MMU v3 programming model
314 * Special work for HS38 aliasing I-cache configuration with PAE40
315 * - upper 8 bits of paddr need to be written into PTAG_HI
316 * - (and needs to be written before the lower 32 bits)
317 * Note that PTAG_HI is hoisted outside the line loop
319 if (is_pae40_enabled() && op
== OP_INV_IC
)
320 write_aux_reg(ARC_REG_IC_PTAG_HI
, (u64
)paddr
>> 32);
322 while (num_lines
-- > 0) {
324 write_aux_reg(aux_tag
, paddr
);
325 paddr
+= L1_CACHE_BYTES
;
328 write_aux_reg(aux_cmd
, vaddr
);
329 vaddr
+= L1_CACHE_BYTES
;
334 * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT
335 * Here's how cache ops are implemented
337 * - D-cache: only paddr needed (in DC_IVDL/DC_FLDL)
338 * - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL)
339 * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG
340 * respectively, similar to MMU v3 programming model, hence
341 * __cache_line_loop_v3() is used)
343 * If PAE40 is enabled, independent of aliasing considerations, the higher bits
344 * needs to be written into PTAG_HI
347 void __cache_line_loop_v4(phys_addr_t paddr
, unsigned long vaddr
,
348 unsigned long sz
, const int cacheop
)
350 unsigned int aux_cmd
;
352 const int full_page_op
= __builtin_constant_p(sz
) && sz
== PAGE_SIZE
;
354 if (cacheop
== OP_INV_IC
) {
355 aux_cmd
= ARC_REG_IC_IVIL
;
357 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
358 aux_cmd
= cacheop
& OP_INV
? ARC_REG_DC_IVDL
: ARC_REG_DC_FLDL
;
361 /* Ensure we properly floor/ceil the non-line aligned/sized requests
362 * and have @paddr - aligned to cache line and integral @num_lines.
363 * This however can be avoided for page sized since:
364 * -@paddr will be cache-line aligned already (being page aligned)
365 * -@sz will be integral multiple of line size (being page sized).
368 sz
+= paddr
& ~CACHE_LINE_MASK
;
369 paddr
&= CACHE_LINE_MASK
;
372 num_lines
= DIV_ROUND_UP(sz
, L1_CACHE_BYTES
);
375 * For HS38 PAE40 configuration
376 * - upper 8 bits of paddr need to be written into PTAG_HI
377 * - (and needs to be written before the lower 32 bits)
379 if (is_pae40_enabled()) {
380 if (cacheop
== OP_INV_IC
)
382 * Non aliasing I-cache in HS38,
383 * aliasing I-cache handled in __cache_line_loop_v3()
385 write_aux_reg(ARC_REG_IC_PTAG_HI
, (u64
)paddr
>> 32);
387 write_aux_reg(ARC_REG_DC_PTAG_HI
, (u64
)paddr
>> 32);
390 while (num_lines
-- > 0) {
391 write_aux_reg(aux_cmd
, paddr
);
392 paddr
+= L1_CACHE_BYTES
;
396 #if (CONFIG_ARC_MMU_VER < 3)
397 #define __cache_line_loop __cache_line_loop_v2
398 #elif (CONFIG_ARC_MMU_VER == 3)
399 #define __cache_line_loop __cache_line_loop_v3
400 #elif (CONFIG_ARC_MMU_VER > 3)
401 #define __cache_line_loop __cache_line_loop_v4
404 #ifdef CONFIG_ARC_HAS_DCACHE
406 /***************************************************************
407 * Machine specific helpers for Entire D-Cache or Per Line ops
410 static inline void __before_dc_op(const int op
)
412 if (op
== OP_FLUSH_N_INV
) {
413 /* Dcache provides 2 cmd: FLUSH or INV
414 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
415 * flush-n-inv is achieved by INV cmd but with IM=1
416 * So toggle INV sub-mode depending on op request and default
418 const unsigned int ctl
= ARC_REG_DC_CTRL
;
419 write_aux_reg(ctl
, read_aux_reg(ctl
) | DC_CTRL_INV_MODE_FLUSH
);
423 static inline void __after_dc_op(const int op
)
426 const unsigned int ctl
= ARC_REG_DC_CTRL
;
429 /* flush / flush-n-inv both wait */
430 while ((reg
= read_aux_reg(ctl
)) & DC_CTRL_FLUSH_STATUS
)
433 /* Switch back to default Invalidate mode */
434 if (op
== OP_FLUSH_N_INV
)
435 write_aux_reg(ctl
, reg
& ~DC_CTRL_INV_MODE_FLUSH
);
440 * Operation on Entire D-Cache
441 * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
442 * Note that constant propagation ensures all the checks are gone
445 static inline void __dc_entire_op(const int op
)
451 if (op
& OP_INV
) /* Inv or flush-n-inv use same cmd reg */
452 aux
= ARC_REG_DC_IVDC
;
454 aux
= ARC_REG_DC_FLSH
;
456 write_aux_reg(aux
, 0x1);
461 /* For kernel mappings cache operation: index is same as paddr */
462 #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
465 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
467 static inline void __dc_line_op(phys_addr_t paddr
, unsigned long vaddr
,
468 unsigned long sz
, const int op
)
472 local_irq_save(flags
);
476 __cache_line_loop(paddr
, vaddr
, sz
, op
);
480 local_irq_restore(flags
);
485 #define __dc_entire_op(op)
486 #define __dc_line_op(paddr, vaddr, sz, op)
487 #define __dc_line_op_k(paddr, sz, op)
489 #endif /* CONFIG_ARC_HAS_DCACHE */
491 #ifdef CONFIG_ARC_HAS_ICACHE
493 static inline void __ic_entire_inv(void)
495 write_aux_reg(ARC_REG_IC_IVIC
, 1);
496 read_aux_reg(ARC_REG_IC_CTRL
); /* blocks */
500 __ic_line_inv_vaddr_local(phys_addr_t paddr
, unsigned long vaddr
,
505 local_irq_save(flags
);
506 (*_cache_line_loop_ic_fn
)(paddr
, vaddr
, sz
, OP_INV_IC
);
507 local_irq_restore(flags
);
512 #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
517 phys_addr_t paddr
, vaddr
;
521 static void __ic_line_inv_vaddr_helper(void *info
)
523 struct ic_inv_args
*ic_inv
= info
;
525 __ic_line_inv_vaddr_local(ic_inv
->paddr
, ic_inv
->vaddr
, ic_inv
->sz
);
528 static void __ic_line_inv_vaddr(phys_addr_t paddr
, unsigned long vaddr
,
531 struct ic_inv_args ic_inv
= {
537 on_each_cpu(__ic_line_inv_vaddr_helper
, &ic_inv
, 1);
540 #endif /* CONFIG_SMP */
542 #else /* !CONFIG_ARC_HAS_ICACHE */
544 #define __ic_entire_inv()
545 #define __ic_line_inv_vaddr(pstart, vstart, sz)
547 #endif /* CONFIG_ARC_HAS_ICACHE */
549 noinline
void slc_op(phys_addr_t paddr
, unsigned long sz
, const int op
)
551 #ifdef CONFIG_ISA_ARCV2
553 * SLC is shared between all cores and concurrent aux operations from
554 * multiple cores need to be serialized using a spinlock
555 * A concurrent operation can be silently ignored and/or the old/new
556 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
559 static DEFINE_SPINLOCK(lock
);
563 spin_lock_irqsave(&lock
, flags
);
566 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
567 * - b'000 (default) is Flush,
568 * - b'001 is Invalidate if CTRL.IM == 0
569 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
571 ctrl
= read_aux_reg(ARC_REG_SLC_CTRL
);
573 /* Don't rely on default value of IM bit */
574 if (!(op
& OP_FLUSH
)) /* i.e. OP_INV */
575 ctrl
&= ~SLC_CTRL_IM
; /* clear IM: Disable flush before Inv */
580 ctrl
|= SLC_CTRL_RGN_OP_INV
; /* Inv or flush-n-inv */
582 ctrl
&= ~SLC_CTRL_RGN_OP_INV
;
584 write_aux_reg(ARC_REG_SLC_CTRL
, ctrl
);
587 * Lower bits are ignored, no need to clip
588 * END needs to be setup before START (latter triggers the operation)
589 * END can't be same as START, so add (l2_line_sz - 1) to sz
591 write_aux_reg(ARC_REG_SLC_RGN_END
, (paddr
+ sz
+ l2_line_sz
- 1));
592 write_aux_reg(ARC_REG_SLC_RGN_START
, paddr
);
594 while (read_aux_reg(ARC_REG_SLC_CTRL
) & SLC_CTRL_BUSY
);
596 spin_unlock_irqrestore(&lock
, flags
);
600 /***********************************************************
605 * Handle cache congruency of kernel and userspace mappings of page when kernel
606 * writes-to/reads-from
608 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
609 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
610 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
611 * -In SMP, if hardware caches are coherent
613 * There's a corollary case, where kernel READs from a userspace mapped page.
614 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
616 void flush_dcache_page(struct page
*page
)
618 struct address_space
*mapping
;
620 if (!cache_is_vipt_aliasing()) {
621 clear_bit(PG_dc_clean
, &page
->flags
);
625 /* don't handle anon pages here */
626 mapping
= page_mapping(page
);
631 * pagecache page, file not yet mapped to userspace
632 * Make a note that K-mapping is dirty
634 if (!mapping_mapped(mapping
)) {
635 clear_bit(PG_dc_clean
, &page
->flags
);
636 } else if (page_mapcount(page
)) {
638 /* kernel reading from page with U-mapping */
639 phys_addr_t paddr
= (unsigned long)page_address(page
);
640 unsigned long vaddr
= page
->index
<< PAGE_SHIFT
;
642 if (addr_not_cache_congruent(paddr
, vaddr
))
643 __flush_dcache_page(paddr
, vaddr
);
646 EXPORT_SYMBOL(flush_dcache_page
);
649 * DMA ops for systems with L1 cache only
650 * Make memory coherent with L1 cache by flushing/invalidating L1 lines
652 static void __dma_cache_wback_inv_l1(phys_addr_t start
, unsigned long sz
)
654 __dc_line_op_k(start
, sz
, OP_FLUSH_N_INV
);
657 static void __dma_cache_inv_l1(phys_addr_t start
, unsigned long sz
)
659 __dc_line_op_k(start
, sz
, OP_INV
);
662 static void __dma_cache_wback_l1(phys_addr_t start
, unsigned long sz
)
664 __dc_line_op_k(start
, sz
, OP_FLUSH
);
668 * DMA ops for systems with both L1 and L2 caches, but without IOC
669 * Both L1 and L2 lines need to be explicitly flushed/invalidated
671 static void __dma_cache_wback_inv_slc(phys_addr_t start
, unsigned long sz
)
673 __dc_line_op_k(start
, sz
, OP_FLUSH_N_INV
);
674 slc_op(start
, sz
, OP_FLUSH_N_INV
);
677 static void __dma_cache_inv_slc(phys_addr_t start
, unsigned long sz
)
679 __dc_line_op_k(start
, sz
, OP_INV
);
680 slc_op(start
, sz
, OP_INV
);
683 static void __dma_cache_wback_slc(phys_addr_t start
, unsigned long sz
)
685 __dc_line_op_k(start
, sz
, OP_FLUSH
);
686 slc_op(start
, sz
, OP_FLUSH
);
690 * DMA ops for systems with IOC
691 * IOC hardware snoops all DMA traffic keeping the caches consistent with
692 * memory - eliding need for any explicit cache maintenance of DMA buffers
694 static void __dma_cache_wback_inv_ioc(phys_addr_t start
, unsigned long sz
) {}
695 static void __dma_cache_inv_ioc(phys_addr_t start
, unsigned long sz
) {}
696 static void __dma_cache_wback_ioc(phys_addr_t start
, unsigned long sz
) {}
701 void dma_cache_wback_inv(phys_addr_t start
, unsigned long sz
)
703 __dma_cache_wback_inv(start
, sz
);
705 EXPORT_SYMBOL(dma_cache_wback_inv
);
707 void dma_cache_inv(phys_addr_t start
, unsigned long sz
)
709 __dma_cache_inv(start
, sz
);
711 EXPORT_SYMBOL(dma_cache_inv
);
713 void dma_cache_wback(phys_addr_t start
, unsigned long sz
)
715 __dma_cache_wback(start
, sz
);
717 EXPORT_SYMBOL(dma_cache_wback
);
720 * This is API for making I/D Caches consistent when modifying
721 * kernel code (loadable modules, kprobes, kgdb...)
722 * This is called on insmod, with kernel virtual address for CODE of
723 * the module. ARC cache maintenance ops require PHY address thus we
724 * need to convert vmalloc addr to PHY addr
726 void flush_icache_range(unsigned long kstart
, unsigned long kend
)
730 WARN(kstart
< TASK_SIZE
, "%s() can't handle user vaddr", __func__
);
732 /* Shortcut for bigger flush ranges.
733 * Here we don't care if this was kernel virtual or phy addr
735 tot_sz
= kend
- kstart
;
736 if (tot_sz
> PAGE_SIZE
) {
741 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
742 if (likely(kstart
> PAGE_OFFSET
)) {
744 * The 2nd arg despite being paddr will be used to index icache
745 * This is OK since no alternate virtual mappings will exist
746 * given the callers for this case: kprobe/kgdb in built-in
749 __sync_icache_dcache(kstart
, kstart
, kend
- kstart
);
754 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
755 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
756 * handling of kernel vaddr.
758 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
759 * it still needs to handle a 2 page scenario, where the range
760 * straddles across 2 virtual pages and hence need for loop
763 unsigned int off
, sz
;
764 unsigned long phy
, pfn
;
766 off
= kstart
% PAGE_SIZE
;
767 pfn
= vmalloc_to_pfn((void *)kstart
);
768 phy
= (pfn
<< PAGE_SHIFT
) + off
;
769 sz
= min_t(unsigned int, tot_sz
, PAGE_SIZE
- off
);
770 __sync_icache_dcache(phy
, kstart
, sz
);
775 EXPORT_SYMBOL(flush_icache_range
);
778 * General purpose helper to make I and D cache lines consistent.
779 * @paddr is phy addr of region
780 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
781 * However in one instance, when called by kprobe (for a breakpt in
782 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
783 * use a paddr to index the cache (despite VIPT). This is fine since since a
784 * builtin kernel page will not have any virtual mappings.
785 * kprobe on loadable module will be kernel vaddr.
787 void __sync_icache_dcache(phys_addr_t paddr
, unsigned long vaddr
, int len
)
789 __dc_line_op(paddr
, vaddr
, len
, OP_FLUSH_N_INV
);
790 __ic_line_inv_vaddr(paddr
, vaddr
, len
);
793 /* wrapper to compile time eliminate alignment checks in flush loop */
794 void __inv_icache_page(phys_addr_t paddr
, unsigned long vaddr
)
796 __ic_line_inv_vaddr(paddr
, vaddr
, PAGE_SIZE
);
800 * wrapper to clearout kernel or userspace mappings of a page
801 * For kernel mappings @vaddr == @paddr
803 void __flush_dcache_page(phys_addr_t paddr
, unsigned long vaddr
)
805 __dc_line_op(paddr
, vaddr
& PAGE_MASK
, PAGE_SIZE
, OP_FLUSH_N_INV
);
808 noinline
void flush_cache_all(void)
812 local_irq_save(flags
);
815 __dc_entire_op(OP_FLUSH_N_INV
);
817 local_irq_restore(flags
);
821 #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
823 void flush_cache_mm(struct mm_struct
*mm
)
828 void flush_cache_page(struct vm_area_struct
*vma
, unsigned long u_vaddr
,
831 unsigned int paddr
= pfn
<< PAGE_SHIFT
;
833 u_vaddr
&= PAGE_MASK
;
835 __flush_dcache_page(paddr
, u_vaddr
);
837 if (vma
->vm_flags
& VM_EXEC
)
838 __inv_icache_page(paddr
, u_vaddr
);
841 void flush_cache_range(struct vm_area_struct
*vma
, unsigned long start
,
847 void flush_anon_page(struct vm_area_struct
*vma
, struct page
*page
,
848 unsigned long u_vaddr
)
850 /* TBD: do we really need to clear the kernel mapping */
851 __flush_dcache_page(page_address(page
), u_vaddr
);
852 __flush_dcache_page(page_address(page
), page_address(page
));
858 void copy_user_highpage(struct page
*to
, struct page
*from
,
859 unsigned long u_vaddr
, struct vm_area_struct
*vma
)
861 void *kfrom
= kmap_atomic(from
);
862 void *kto
= kmap_atomic(to
);
863 int clean_src_k_mappings
= 0;
866 * If SRC page was already mapped in userspace AND it's U-mapping is
867 * not congruent with K-mapping, sync former to physical page so that
868 * K-mapping in memcpy below, sees the right data
870 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
871 * equally valid for SRC page as well
873 * For !VIPT cache, all of this gets compiled out as
874 * addr_not_cache_congruent() is 0
876 if (page_mapcount(from
) && addr_not_cache_congruent(kfrom
, u_vaddr
)) {
877 __flush_dcache_page((unsigned long)kfrom
, u_vaddr
);
878 clean_src_k_mappings
= 1;
881 copy_page(kto
, kfrom
);
884 * Mark DST page K-mapping as dirty for a later finalization by
885 * update_mmu_cache(). Although the finalization could have been done
886 * here as well (given that both vaddr/paddr are available).
887 * But update_mmu_cache() already has code to do that for other
888 * non copied user pages (e.g. read faults which wire in pagecache page
891 clear_bit(PG_dc_clean
, &to
->flags
);
894 * if SRC was already usermapped and non-congruent to kernel mapping
895 * sync the kernel mapping back to physical page
897 if (clean_src_k_mappings
) {
898 __flush_dcache_page((unsigned long)kfrom
, (unsigned long)kfrom
);
899 set_bit(PG_dc_clean
, &from
->flags
);
901 clear_bit(PG_dc_clean
, &from
->flags
);
905 kunmap_atomic(kfrom
);
908 void clear_user_page(void *to
, unsigned long u_vaddr
, struct page
*page
)
911 clear_bit(PG_dc_clean
, &page
->flags
);
915 /**********************************************************************
916 * Explicit Cache flush request from user space via syscall
917 * Needed for JITs which generate code on the fly
919 SYSCALL_DEFINE3(cacheflush
, uint32_t, start
, uint32_t, sz
, uint32_t, flags
)
921 /* TBD: optimize this */
926 void arc_cache_init(void)
928 unsigned int __maybe_unused cpu
= smp_processor_id();
931 printk(arc_cache_mumbojumbo(0, str
, sizeof(str
)));
934 * Only master CPU needs to execute rest of function:
935 * - Assume SMP so all cores will have same cache config so
936 * any geomtry checks will be same for all
937 * - IOC setup / dma callbacks only need to be setup once
942 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE
)) {
943 struct cpuinfo_arc_cache
*ic
= &cpuinfo_arc700
[cpu
].icache
;
946 panic("cache support enabled but non-existent cache\n");
948 if (ic
->line_len
!= L1_CACHE_BYTES
)
949 panic("ICache line [%d] != kernel Config [%d]",
950 ic
->line_len
, L1_CACHE_BYTES
);
953 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
954 * pair to provide vaddr/paddr respectively, just as in MMU v3
956 if (is_isa_arcv2() && ic
->alias
)
957 _cache_line_loop_ic_fn
= __cache_line_loop_v3
;
959 _cache_line_loop_ic_fn
= __cache_line_loop
;
962 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE
)) {
963 struct cpuinfo_arc_cache
*dc
= &cpuinfo_arc700
[cpu
].dcache
;
966 panic("cache support enabled but non-existent cache\n");
968 if (dc
->line_len
!= L1_CACHE_BYTES
)
969 panic("DCache line [%d] != kernel Config [%d]",
970 dc
->line_len
, L1_CACHE_BYTES
);
972 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
973 if (is_isa_arcompact()) {
974 int handled
= IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING
);
975 int num_colors
= dc
->sz_k
/dc
->assoc
/TO_KB(PAGE_SIZE
);
979 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
980 if (CACHE_COLORS_NUM
!= num_colors
)
981 panic("CACHE_COLORS_NUM not optimized for config\n");
982 } else if (!dc
->alias
&& handled
) {
983 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
988 if (is_isa_arcv2() && l2_line_sz
&& !slc_enable
) {
990 /* IM set : flush before invalidate */
991 write_aux_reg(ARC_REG_SLC_CTRL
,
992 read_aux_reg(ARC_REG_SLC_CTRL
) | SLC_CTRL_IM
);
994 write_aux_reg(ARC_REG_SLC_INVALIDATE
, 1);
996 /* Important to wait for flush to complete */
997 while (read_aux_reg(ARC_REG_SLC_CTRL
) & SLC_CTRL_BUSY
);
998 write_aux_reg(ARC_REG_SLC_CTRL
,
999 read_aux_reg(ARC_REG_SLC_CTRL
) | SLC_CTRL_DISABLE
);
1002 if (is_isa_arcv2() && ioc_enable
) {
1003 /* IO coherency base - 0x8z */
1004 write_aux_reg(ARC_REG_IO_COH_AP0_BASE
, 0x80000);
1005 /* IO coherency aperture size - 512Mb: 0x8z-0xAz */
1006 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE
, 0x11);
1007 /* Enable partial writes */
1008 write_aux_reg(ARC_REG_IO_COH_PARTIAL
, 1);
1009 /* Enable IO coherency */
1010 write_aux_reg(ARC_REG_IO_COH_ENABLE
, 1);
1012 __dma_cache_wback_inv
= __dma_cache_wback_inv_ioc
;
1013 __dma_cache_inv
= __dma_cache_inv_ioc
;
1014 __dma_cache_wback
= __dma_cache_wback_ioc
;
1015 } else if (is_isa_arcv2() && l2_line_sz
&& slc_enable
) {
1016 __dma_cache_wback_inv
= __dma_cache_wback_inv_slc
;
1017 __dma_cache_inv
= __dma_cache_inv_slc
;
1018 __dma_cache_wback
= __dma_cache_wback_slc
;
1020 __dma_cache_wback_inv
= __dma_cache_wback_inv_l1
;
1021 __dma_cache_inv
= __dma_cache_inv_l1
;
1022 __dma_cache_wback
= __dma_cache_wback_l1
;