]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arc/mm/cache.c
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[mirror_ubuntu-artful-kernel.git] / arch / arc / mm / cache.c
1 /*
2 * ARC Cache Management
3 *
4 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12 #include <linux/module.h>
13 #include <linux/mm.h>
14 #include <linux/sched.h>
15 #include <linux/cache.h>
16 #include <linux/mmu_context.h>
17 #include <linux/syscalls.h>
18 #include <linux/uaccess.h>
19 #include <linux/pagemap.h>
20 #include <asm/cacheflush.h>
21 #include <asm/cachectl.h>
22 #include <asm/setup.h>
23
24 static int l2_line_sz;
25 static int ioc_exists;
26 int slc_enable = 1, ioc_enable = 1;
27 unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
28 unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
29
30 void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
31 unsigned long sz, const int cacheop);
32
33 void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
34 void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
35 void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
36
37 char *arc_cache_mumbojumbo(int c, char *buf, int len)
38 {
39 int n = 0;
40 struct cpuinfo_arc_cache *p;
41
42 #define PR_CACHE(p, cfg, str) \
43 if (!(p)->line_len) \
44 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
45 else \
46 n += scnprintf(buf + n, len - n, \
47 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \
48 (p)->sz_k, (p)->assoc, (p)->line_len, \
49 (p)->vipt ? "VIPT" : "PIPT", \
50 (p)->alias ? " aliasing" : "", \
51 IS_USED_CFG(cfg));
52
53 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
54 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
55
56 p = &cpuinfo_arc700[c].slc;
57 if (p->line_len)
58 n += scnprintf(buf + n, len - n,
59 "SLC\t\t: %uK, %uB Line%s\n",
60 p->sz_k, p->line_len, IS_USED_RUN(slc_enable));
61
62 n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
63 perip_base,
64 IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency "));
65
66 return buf;
67 }
68
69 /*
70 * Read the Cache Build Confuration Registers, Decode them and save into
71 * the cpuinfo structure for later use.
72 * No Validation done here, simply read/convert the BCRs
73 */
74 static void read_decode_cache_bcr_arcv2(int cpu)
75 {
76 struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc;
77 struct bcr_generic sbcr;
78
79 struct bcr_slc_cfg {
80 #ifdef CONFIG_CPU_BIG_ENDIAN
81 unsigned int pad:24, way:2, lsz:2, sz:4;
82 #else
83 unsigned int sz:4, lsz:2, way:2, pad:24;
84 #endif
85 } slc_cfg;
86
87 struct bcr_clust_cfg {
88 #ifdef CONFIG_CPU_BIG_ENDIAN
89 unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
90 #else
91 unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
92 #endif
93 } cbcr;
94
95 struct bcr_volatile {
96 #ifdef CONFIG_CPU_BIG_ENDIAN
97 unsigned int start:4, limit:4, pad:22, order:1, disable:1;
98 #else
99 unsigned int disable:1, order:1, pad:22, limit:4, start:4;
100 #endif
101 } vol;
102
103
104 READ_BCR(ARC_REG_SLC_BCR, sbcr);
105 if (sbcr.ver) {
106 READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
107 p_slc->sz_k = 128 << slc_cfg.sz;
108 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
109 }
110
111 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
112 if (cbcr.c)
113 ioc_exists = 1;
114 else
115 ioc_enable = 0;
116
117 /* HS 2.0 didn't have AUX_VOL */
118 if (cpuinfo_arc700[cpu].core.family > 0x51) {
119 READ_BCR(AUX_VOL, vol);
120 perip_base = vol.start << 28;
121 /* HS 3.0 has limit and strict-ordering fields */
122 if (cpuinfo_arc700[cpu].core.family > 0x52)
123 perip_end = (vol.limit << 28) - 1;
124 }
125 }
126
127 void read_decode_cache_bcr(void)
128 {
129 struct cpuinfo_arc_cache *p_ic, *p_dc;
130 unsigned int cpu = smp_processor_id();
131 struct bcr_cache {
132 #ifdef CONFIG_CPU_BIG_ENDIAN
133 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
134 #else
135 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
136 #endif
137 } ibcr, dbcr;
138
139 p_ic = &cpuinfo_arc700[cpu].icache;
140 READ_BCR(ARC_REG_IC_BCR, ibcr);
141
142 if (!ibcr.ver)
143 goto dc_chk;
144
145 if (ibcr.ver <= 3) {
146 BUG_ON(ibcr.config != 3);
147 p_ic->assoc = 2; /* Fixed to 2w set assoc */
148 } else if (ibcr.ver >= 4) {
149 p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */
150 }
151
152 p_ic->line_len = 8 << ibcr.line_len;
153 p_ic->sz_k = 1 << (ibcr.sz - 1);
154 p_ic->vipt = 1;
155 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
156
157 dc_chk:
158 p_dc = &cpuinfo_arc700[cpu].dcache;
159 READ_BCR(ARC_REG_DC_BCR, dbcr);
160
161 if (!dbcr.ver)
162 goto slc_chk;
163
164 if (dbcr.ver <= 3) {
165 BUG_ON(dbcr.config != 2);
166 p_dc->assoc = 4; /* Fixed to 4w set assoc */
167 p_dc->vipt = 1;
168 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
169 } else if (dbcr.ver >= 4) {
170 p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */
171 p_dc->vipt = 0;
172 p_dc->alias = 0; /* PIPT so can't VIPT alias */
173 }
174
175 p_dc->line_len = 16 << dbcr.line_len;
176 p_dc->sz_k = 1 << (dbcr.sz - 1);
177
178 slc_chk:
179 if (is_isa_arcv2())
180 read_decode_cache_bcr_arcv2(cpu);
181 }
182
183 /*
184 * Line Operation on {I,D}-Cache
185 */
186
187 #define OP_INV 0x1
188 #define OP_FLUSH 0x2
189 #define OP_FLUSH_N_INV 0x3
190 #define OP_INV_IC 0x4
191
192 /*
193 * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
194 *
195 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
196 * The orig Cache Management Module "CDU" only required paddr to invalidate a
197 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
198 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
199 * the exact same line.
200 *
201 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
202 * paddr alone could not be used to correctly index the cache.
203 *
204 * ------------------
205 * MMU v1/v2 (Fixed Page Size 8k)
206 * ------------------
207 * The solution was to provide CDU with these additonal vaddr bits. These
208 * would be bits [x:13], x would depend on cache-geometry, 13 comes from
209 * standard page size of 8k.
210 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
211 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
212 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
213 * represent the offset within cache-line. The adv of using this "clumsy"
214 * interface for additional info was no new reg was needed in CDU programming
215 * model.
216 *
217 * 17:13 represented the max num of bits passable, actual bits needed were
218 * fewer, based on the num-of-aliases possible.
219 * -for 2 alias possibility, only bit 13 needed (32K cache)
220 * -for 4 alias possibility, bits 14:13 needed (64K cache)
221 *
222 * ------------------
223 * MMU v3
224 * ------------------
225 * This ver of MMU supports variable page sizes (1k-16k): although Linux will
226 * only support 8k (default), 16k and 4k.
227 * However from hardware perspective, smaller page sizes aggravate aliasing
228 * meaning more vaddr bits needed to disambiguate the cache-line-op ;
229 * the existing scheme of piggybacking won't work for certain configurations.
230 * Two new registers IC_PTAG and DC_PTAG inttoduced.
231 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
232 */
233
234 static inline
235 void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
236 unsigned long sz, const int op)
237 {
238 unsigned int aux_cmd;
239 int num_lines;
240 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
241
242 if (op == OP_INV_IC) {
243 aux_cmd = ARC_REG_IC_IVIL;
244 } else {
245 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
246 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
247 }
248
249 /* Ensure we properly floor/ceil the non-line aligned/sized requests
250 * and have @paddr - aligned to cache line and integral @num_lines.
251 * This however can be avoided for page sized since:
252 * -@paddr will be cache-line aligned already (being page aligned)
253 * -@sz will be integral multiple of line size (being page sized).
254 */
255 if (!full_page) {
256 sz += paddr & ~CACHE_LINE_MASK;
257 paddr &= CACHE_LINE_MASK;
258 vaddr &= CACHE_LINE_MASK;
259 }
260
261 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
262
263 /* MMUv2 and before: paddr contains stuffed vaddrs bits */
264 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
265
266 while (num_lines-- > 0) {
267 write_aux_reg(aux_cmd, paddr);
268 paddr += L1_CACHE_BYTES;
269 }
270 }
271
272 /*
273 * For ARC700 MMUv3 I-cache and D-cache flushes
274 * - ARC700 programming model requires paddr and vaddr be passed in seperate
275 * AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the
276 * caches actually alias or not.
277 * - For HS38, only the aliasing I-cache configuration uses the PTAG reg
278 * (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
279 */
280 static inline
281 void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
282 unsigned long sz, const int op)
283 {
284 unsigned int aux_cmd, aux_tag;
285 int num_lines;
286 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
287
288 if (op == OP_INV_IC) {
289 aux_cmd = ARC_REG_IC_IVIL;
290 aux_tag = ARC_REG_IC_PTAG;
291 } else {
292 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
293 aux_tag = ARC_REG_DC_PTAG;
294 }
295
296 /* Ensure we properly floor/ceil the non-line aligned/sized requests
297 * and have @paddr - aligned to cache line and integral @num_lines.
298 * This however can be avoided for page sized since:
299 * -@paddr will be cache-line aligned already (being page aligned)
300 * -@sz will be integral multiple of line size (being page sized).
301 */
302 if (!full_page) {
303 sz += paddr & ~CACHE_LINE_MASK;
304 paddr &= CACHE_LINE_MASK;
305 vaddr &= CACHE_LINE_MASK;
306 }
307 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
308
309 /*
310 * MMUv3, cache ops require paddr in PTAG reg
311 * if V-P const for loop, PTAG can be written once outside loop
312 */
313 if (full_page)
314 write_aux_reg(aux_tag, paddr);
315
316 /*
317 * This is technically for MMU v4, using the MMU v3 programming model
318 * Special work for HS38 aliasing I-cache configuration with PAE40
319 * - upper 8 bits of paddr need to be written into PTAG_HI
320 * - (and needs to be written before the lower 32 bits)
321 * Note that PTAG_HI is hoisted outside the line loop
322 */
323 if (is_pae40_enabled() && op == OP_INV_IC)
324 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
325
326 while (num_lines-- > 0) {
327 if (!full_page) {
328 write_aux_reg(aux_tag, paddr);
329 paddr += L1_CACHE_BYTES;
330 }
331
332 write_aux_reg(aux_cmd, vaddr);
333 vaddr += L1_CACHE_BYTES;
334 }
335 }
336
337 /*
338 * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT
339 * Here's how cache ops are implemented
340 *
341 * - D-cache: only paddr needed (in DC_IVDL/DC_FLDL)
342 * - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL)
343 * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG
344 * respectively, similar to MMU v3 programming model, hence
345 * __cache_line_loop_v3() is used)
346 *
347 * If PAE40 is enabled, independent of aliasing considerations, the higher bits
348 * needs to be written into PTAG_HI
349 */
350 static inline
351 void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
352 unsigned long sz, const int cacheop)
353 {
354 unsigned int aux_cmd;
355 int num_lines;
356 const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE;
357
358 if (cacheop == OP_INV_IC) {
359 aux_cmd = ARC_REG_IC_IVIL;
360 } else {
361 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
362 aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
363 }
364
365 /* Ensure we properly floor/ceil the non-line aligned/sized requests
366 * and have @paddr - aligned to cache line and integral @num_lines.
367 * This however can be avoided for page sized since:
368 * -@paddr will be cache-line aligned already (being page aligned)
369 * -@sz will be integral multiple of line size (being page sized).
370 */
371 if (!full_page_op) {
372 sz += paddr & ~CACHE_LINE_MASK;
373 paddr &= CACHE_LINE_MASK;
374 }
375
376 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
377
378 /*
379 * For HS38 PAE40 configuration
380 * - upper 8 bits of paddr need to be written into PTAG_HI
381 * - (and needs to be written before the lower 32 bits)
382 */
383 if (is_pae40_enabled()) {
384 if (cacheop == OP_INV_IC)
385 /*
386 * Non aliasing I-cache in HS38,
387 * aliasing I-cache handled in __cache_line_loop_v3()
388 */
389 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
390 else
391 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
392 }
393
394 while (num_lines-- > 0) {
395 write_aux_reg(aux_cmd, paddr);
396 paddr += L1_CACHE_BYTES;
397 }
398 }
399
400 #if (CONFIG_ARC_MMU_VER < 3)
401 #define __cache_line_loop __cache_line_loop_v2
402 #elif (CONFIG_ARC_MMU_VER == 3)
403 #define __cache_line_loop __cache_line_loop_v3
404 #elif (CONFIG_ARC_MMU_VER > 3)
405 #define __cache_line_loop __cache_line_loop_v4
406 #endif
407
408 #ifdef CONFIG_ARC_HAS_DCACHE
409
410 /***************************************************************
411 * Machine specific helpers for Entire D-Cache or Per Line ops
412 */
413
414 static inline void __before_dc_op(const int op)
415 {
416 if (op == OP_FLUSH_N_INV) {
417 /* Dcache provides 2 cmd: FLUSH or INV
418 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
419 * flush-n-inv is achieved by INV cmd but with IM=1
420 * So toggle INV sub-mode depending on op request and default
421 */
422 const unsigned int ctl = ARC_REG_DC_CTRL;
423 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
424 }
425 }
426
427 static inline void __after_dc_op(const int op)
428 {
429 if (op & OP_FLUSH) {
430 const unsigned int ctl = ARC_REG_DC_CTRL;
431 unsigned int reg;
432
433 /* flush / flush-n-inv both wait */
434 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
435 ;
436
437 /* Switch back to default Invalidate mode */
438 if (op == OP_FLUSH_N_INV)
439 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
440 }
441 }
442
443 /*
444 * Operation on Entire D-Cache
445 * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
446 * Note that constant propagation ensures all the checks are gone
447 * in generated code
448 */
449 static inline void __dc_entire_op(const int op)
450 {
451 int aux;
452
453 __before_dc_op(op);
454
455 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
456 aux = ARC_REG_DC_IVDC;
457 else
458 aux = ARC_REG_DC_FLSH;
459
460 write_aux_reg(aux, 0x1);
461
462 __after_dc_op(op);
463 }
464
465 static inline void __dc_disable(void)
466 {
467 const int r = ARC_REG_DC_CTRL;
468
469 __dc_entire_op(OP_FLUSH_N_INV);
470 write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
471 }
472
473 static void __dc_enable(void)
474 {
475 const int r = ARC_REG_DC_CTRL;
476
477 write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
478 }
479
480 /* For kernel mappings cache operation: index is same as paddr */
481 #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
482
483 /*
484 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
485 */
486 static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
487 unsigned long sz, const int op)
488 {
489 unsigned long flags;
490
491 local_irq_save(flags);
492
493 __before_dc_op(op);
494
495 __cache_line_loop(paddr, vaddr, sz, op);
496
497 __after_dc_op(op);
498
499 local_irq_restore(flags);
500 }
501
502 #else
503
504 #define __dc_entire_op(op)
505 #define __dc_disable()
506 #define __dc_enable()
507 #define __dc_line_op(paddr, vaddr, sz, op)
508 #define __dc_line_op_k(paddr, sz, op)
509
510 #endif /* CONFIG_ARC_HAS_DCACHE */
511
512 #ifdef CONFIG_ARC_HAS_ICACHE
513
514 static inline void __ic_entire_inv(void)
515 {
516 write_aux_reg(ARC_REG_IC_IVIC, 1);
517 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
518 }
519
520 static inline void
521 __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
522 unsigned long sz)
523 {
524 unsigned long flags;
525
526 local_irq_save(flags);
527 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC);
528 local_irq_restore(flags);
529 }
530
531 #ifndef CONFIG_SMP
532
533 #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s)
534
535 #else
536
537 struct ic_inv_args {
538 phys_addr_t paddr, vaddr;
539 int sz;
540 };
541
542 static void __ic_line_inv_vaddr_helper(void *info)
543 {
544 struct ic_inv_args *ic_inv = info;
545
546 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
547 }
548
549 static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
550 unsigned long sz)
551 {
552 struct ic_inv_args ic_inv = {
553 .paddr = paddr,
554 .vaddr = vaddr,
555 .sz = sz
556 };
557
558 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
559 }
560
561 #endif /* CONFIG_SMP */
562
563 #else /* !CONFIG_ARC_HAS_ICACHE */
564
565 #define __ic_entire_inv()
566 #define __ic_line_inv_vaddr(pstart, vstart, sz)
567
568 #endif /* CONFIG_ARC_HAS_ICACHE */
569
570 noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
571 {
572 #ifdef CONFIG_ISA_ARCV2
573 /*
574 * SLC is shared between all cores and concurrent aux operations from
575 * multiple cores need to be serialized using a spinlock
576 * A concurrent operation can be silently ignored and/or the old/new
577 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
578 * below)
579 */
580 static DEFINE_SPINLOCK(lock);
581 unsigned long flags;
582 unsigned int ctrl;
583
584 spin_lock_irqsave(&lock, flags);
585
586 /*
587 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
588 * - b'000 (default) is Flush,
589 * - b'001 is Invalidate if CTRL.IM == 0
590 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
591 */
592 ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
593
594 /* Don't rely on default value of IM bit */
595 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
596 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
597 else
598 ctrl |= SLC_CTRL_IM;
599
600 if (op & OP_INV)
601 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
602 else
603 ctrl &= ~SLC_CTRL_RGN_OP_INV;
604
605 write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
606
607 /*
608 * Lower bits are ignored, no need to clip
609 * END needs to be setup before START (latter triggers the operation)
610 * END can't be same as START, so add (l2_line_sz - 1) to sz
611 */
612 write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
613 write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
614
615 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
616
617 spin_unlock_irqrestore(&lock, flags);
618 #endif
619 }
620
621 noinline static void slc_entire_op(const int op)
622 {
623 unsigned int ctrl, r = ARC_REG_SLC_CTRL;
624
625 ctrl = read_aux_reg(r);
626
627 if (!(op & OP_FLUSH)) /* i.e. OP_INV */
628 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
629 else
630 ctrl |= SLC_CTRL_IM;
631
632 write_aux_reg(r, ctrl);
633
634 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
635
636 /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
637 read_aux_reg(r);
638
639 /* Important to wait for flush to complete */
640 while (read_aux_reg(r) & SLC_CTRL_BUSY);
641 }
642
643 static inline void arc_slc_disable(void)
644 {
645 const int r = ARC_REG_SLC_CTRL;
646
647 slc_entire_op(OP_FLUSH_N_INV);
648 write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
649 }
650
651 static inline void arc_slc_enable(void)
652 {
653 const int r = ARC_REG_SLC_CTRL;
654
655 write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
656 }
657
658 /***********************************************************
659 * Exported APIs
660 */
661
662 /*
663 * Handle cache congruency of kernel and userspace mappings of page when kernel
664 * writes-to/reads-from
665 *
666 * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
667 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
668 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
669 * -In SMP, if hardware caches are coherent
670 *
671 * There's a corollary case, where kernel READs from a userspace mapped page.
672 * If the U-mapping is not congruent to to K-mapping, former needs flushing.
673 */
674 void flush_dcache_page(struct page *page)
675 {
676 struct address_space *mapping;
677
678 if (!cache_is_vipt_aliasing()) {
679 clear_bit(PG_dc_clean, &page->flags);
680 return;
681 }
682
683 /* don't handle anon pages here */
684 mapping = page_mapping(page);
685 if (!mapping)
686 return;
687
688 /*
689 * pagecache page, file not yet mapped to userspace
690 * Make a note that K-mapping is dirty
691 */
692 if (!mapping_mapped(mapping)) {
693 clear_bit(PG_dc_clean, &page->flags);
694 } else if (page_mapcount(page)) {
695
696 /* kernel reading from page with U-mapping */
697 phys_addr_t paddr = (unsigned long)page_address(page);
698 unsigned long vaddr = page->index << PAGE_SHIFT;
699
700 if (addr_not_cache_congruent(paddr, vaddr))
701 __flush_dcache_page(paddr, vaddr);
702 }
703 }
704 EXPORT_SYMBOL(flush_dcache_page);
705
706 /*
707 * DMA ops for systems with L1 cache only
708 * Make memory coherent with L1 cache by flushing/invalidating L1 lines
709 */
710 static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
711 {
712 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
713 }
714
715 static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
716 {
717 __dc_line_op_k(start, sz, OP_INV);
718 }
719
720 static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
721 {
722 __dc_line_op_k(start, sz, OP_FLUSH);
723 }
724
725 /*
726 * DMA ops for systems with both L1 and L2 caches, but without IOC
727 * Both L1 and L2 lines need to be explicitly flushed/invalidated
728 */
729 static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
730 {
731 __dc_line_op_k(start, sz, OP_FLUSH_N_INV);
732 slc_op(start, sz, OP_FLUSH_N_INV);
733 }
734
735 static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
736 {
737 __dc_line_op_k(start, sz, OP_INV);
738 slc_op(start, sz, OP_INV);
739 }
740
741 static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
742 {
743 __dc_line_op_k(start, sz, OP_FLUSH);
744 slc_op(start, sz, OP_FLUSH);
745 }
746
747 /*
748 * DMA ops for systems with IOC
749 * IOC hardware snoops all DMA traffic keeping the caches consistent with
750 * memory - eliding need for any explicit cache maintenance of DMA buffers
751 */
752 static void __dma_cache_wback_inv_ioc(phys_addr_t start, unsigned long sz) {}
753 static void __dma_cache_inv_ioc(phys_addr_t start, unsigned long sz) {}
754 static void __dma_cache_wback_ioc(phys_addr_t start, unsigned long sz) {}
755
756 /*
757 * Exported DMA API
758 */
759 void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
760 {
761 __dma_cache_wback_inv(start, sz);
762 }
763 EXPORT_SYMBOL(dma_cache_wback_inv);
764
765 void dma_cache_inv(phys_addr_t start, unsigned long sz)
766 {
767 __dma_cache_inv(start, sz);
768 }
769 EXPORT_SYMBOL(dma_cache_inv);
770
771 void dma_cache_wback(phys_addr_t start, unsigned long sz)
772 {
773 __dma_cache_wback(start, sz);
774 }
775 EXPORT_SYMBOL(dma_cache_wback);
776
777 /*
778 * This is API for making I/D Caches consistent when modifying
779 * kernel code (loadable modules, kprobes, kgdb...)
780 * This is called on insmod, with kernel virtual address for CODE of
781 * the module. ARC cache maintenance ops require PHY address thus we
782 * need to convert vmalloc addr to PHY addr
783 */
784 void flush_icache_range(unsigned long kstart, unsigned long kend)
785 {
786 unsigned int tot_sz;
787
788 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
789
790 /* Shortcut for bigger flush ranges.
791 * Here we don't care if this was kernel virtual or phy addr
792 */
793 tot_sz = kend - kstart;
794 if (tot_sz > PAGE_SIZE) {
795 flush_cache_all();
796 return;
797 }
798
799 /* Case: Kernel Phy addr (0x8000_0000 onwards) */
800 if (likely(kstart > PAGE_OFFSET)) {
801 /*
802 * The 2nd arg despite being paddr will be used to index icache
803 * This is OK since no alternate virtual mappings will exist
804 * given the callers for this case: kprobe/kgdb in built-in
805 * kernel code only.
806 */
807 __sync_icache_dcache(kstart, kstart, kend - kstart);
808 return;
809 }
810
811 /*
812 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
813 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
814 * handling of kernel vaddr.
815 *
816 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
817 * it still needs to handle a 2 page scenario, where the range
818 * straddles across 2 virtual pages and hence need for loop
819 */
820 while (tot_sz > 0) {
821 unsigned int off, sz;
822 unsigned long phy, pfn;
823
824 off = kstart % PAGE_SIZE;
825 pfn = vmalloc_to_pfn((void *)kstart);
826 phy = (pfn << PAGE_SHIFT) + off;
827 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
828 __sync_icache_dcache(phy, kstart, sz);
829 kstart += sz;
830 tot_sz -= sz;
831 }
832 }
833 EXPORT_SYMBOL(flush_icache_range);
834
835 /*
836 * General purpose helper to make I and D cache lines consistent.
837 * @paddr is phy addr of region
838 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
839 * However in one instance, when called by kprobe (for a breakpt in
840 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
841 * use a paddr to index the cache (despite VIPT). This is fine since since a
842 * builtin kernel page will not have any virtual mappings.
843 * kprobe on loadable module will be kernel vaddr.
844 */
845 void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
846 {
847 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
848 __ic_line_inv_vaddr(paddr, vaddr, len);
849 }
850
851 /* wrapper to compile time eliminate alignment checks in flush loop */
852 void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr)
853 {
854 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
855 }
856
857 /*
858 * wrapper to clearout kernel or userspace mappings of a page
859 * For kernel mappings @vaddr == @paddr
860 */
861 void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr)
862 {
863 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
864 }
865
866 noinline void flush_cache_all(void)
867 {
868 unsigned long flags;
869
870 local_irq_save(flags);
871
872 __ic_entire_inv();
873 __dc_entire_op(OP_FLUSH_N_INV);
874
875 local_irq_restore(flags);
876
877 }
878
879 #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
880
881 void flush_cache_mm(struct mm_struct *mm)
882 {
883 flush_cache_all();
884 }
885
886 void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
887 unsigned long pfn)
888 {
889 unsigned int paddr = pfn << PAGE_SHIFT;
890
891 u_vaddr &= PAGE_MASK;
892
893 __flush_dcache_page(paddr, u_vaddr);
894
895 if (vma->vm_flags & VM_EXEC)
896 __inv_icache_page(paddr, u_vaddr);
897 }
898
899 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
900 unsigned long end)
901 {
902 flush_cache_all();
903 }
904
905 void flush_anon_page(struct vm_area_struct *vma, struct page *page,
906 unsigned long u_vaddr)
907 {
908 /* TBD: do we really need to clear the kernel mapping */
909 __flush_dcache_page(page_address(page), u_vaddr);
910 __flush_dcache_page(page_address(page), page_address(page));
911
912 }
913
914 #endif
915
916 void copy_user_highpage(struct page *to, struct page *from,
917 unsigned long u_vaddr, struct vm_area_struct *vma)
918 {
919 void *kfrom = kmap_atomic(from);
920 void *kto = kmap_atomic(to);
921 int clean_src_k_mappings = 0;
922
923 /*
924 * If SRC page was already mapped in userspace AND it's U-mapping is
925 * not congruent with K-mapping, sync former to physical page so that
926 * K-mapping in memcpy below, sees the right data
927 *
928 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
929 * equally valid for SRC page as well
930 *
931 * For !VIPT cache, all of this gets compiled out as
932 * addr_not_cache_congruent() is 0
933 */
934 if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
935 __flush_dcache_page((unsigned long)kfrom, u_vaddr);
936 clean_src_k_mappings = 1;
937 }
938
939 copy_page(kto, kfrom);
940
941 /*
942 * Mark DST page K-mapping as dirty for a later finalization by
943 * update_mmu_cache(). Although the finalization could have been done
944 * here as well (given that both vaddr/paddr are available).
945 * But update_mmu_cache() already has code to do that for other
946 * non copied user pages (e.g. read faults which wire in pagecache page
947 * directly).
948 */
949 clear_bit(PG_dc_clean, &to->flags);
950
951 /*
952 * if SRC was already usermapped and non-congruent to kernel mapping
953 * sync the kernel mapping back to physical page
954 */
955 if (clean_src_k_mappings) {
956 __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom);
957 set_bit(PG_dc_clean, &from->flags);
958 } else {
959 clear_bit(PG_dc_clean, &from->flags);
960 }
961
962 kunmap_atomic(kto);
963 kunmap_atomic(kfrom);
964 }
965
966 void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
967 {
968 clear_page(to);
969 clear_bit(PG_dc_clean, &page->flags);
970 }
971
972
973 /**********************************************************************
974 * Explicit Cache flush request from user space via syscall
975 * Needed for JITs which generate code on the fly
976 */
977 SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
978 {
979 /* TBD: optimize this */
980 flush_cache_all();
981 return 0;
982 }
983
984 /*
985 * IO-Coherency (IOC) setup rules:
986 *
987 * 1. Needs to be at system level, so only once by Master core
988 * Non-Masters need not be accessing caches at that time
989 * - They are either HALT_ON_RESET and kick started much later or
990 * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
991 * doesn't perturb caches or coherency unit
992 *
993 * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
994 * otherwise any straggler data might behave strangely post IOC enabling
995 *
996 * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
997 * Coherency transactions
998 */
999 noinline void __init arc_ioc_setup(void)
1000 {
1001 unsigned int ap_sz;
1002
1003 /* Flush + invalidate + disable L1 dcache */
1004 __dc_disable();
1005
1006 /* Flush + invalidate SLC */
1007 if (read_aux_reg(ARC_REG_SLC_BCR))
1008 slc_entire_op(OP_FLUSH_N_INV);
1009
1010 /* IOC Aperture start: TDB: handle non default CONFIG_LINUX_LINK_BASE */
1011 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
1012
1013 /*
1014 * IOC Aperture size:
1015 * decoded as 2 ^ (SIZE + 2) KB: so setting 0x11 implies 512M
1016 * TBD: fix for PGU + 1GB of low mem
1017 * TBD: fix for PAE
1018 */
1019 ap_sz = order_base_2(arc_get_mem_sz()/1024) - 2;
1020 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, ap_sz);
1021
1022 write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
1023 write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
1024
1025 /* Re-enable L1 dcache */
1026 __dc_enable();
1027 }
1028
1029 void __init arc_cache_init_master(void)
1030 {
1031 unsigned int __maybe_unused cpu = smp_processor_id();
1032
1033 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
1034 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
1035
1036 if (!ic->line_len)
1037 panic("cache support enabled but non-existent cache\n");
1038
1039 if (ic->line_len != L1_CACHE_BYTES)
1040 panic("ICache line [%d] != kernel Config [%d]",
1041 ic->line_len, L1_CACHE_BYTES);
1042
1043 /*
1044 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
1045 * pair to provide vaddr/paddr respectively, just as in MMU v3
1046 */
1047 if (is_isa_arcv2() && ic->alias)
1048 _cache_line_loop_ic_fn = __cache_line_loop_v3;
1049 else
1050 _cache_line_loop_ic_fn = __cache_line_loop;
1051 }
1052
1053 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
1054 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
1055
1056 if (!dc->line_len)
1057 panic("cache support enabled but non-existent cache\n");
1058
1059 if (dc->line_len != L1_CACHE_BYTES)
1060 panic("DCache line [%d] != kernel Config [%d]",
1061 dc->line_len, L1_CACHE_BYTES);
1062
1063 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
1064 if (is_isa_arcompact()) {
1065 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
1066 int num_colors = dc->sz_k/dc->assoc/TO_KB(PAGE_SIZE);
1067
1068 if (dc->alias) {
1069 if (!handled)
1070 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1071 if (CACHE_COLORS_NUM != num_colors)
1072 panic("CACHE_COLORS_NUM not optimized for config\n");
1073 } else if (!dc->alias && handled) {
1074 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
1075 }
1076 }
1077 }
1078
1079 /* Note that SLC disable not formally supported till HS 3.0 */
1080 if (is_isa_arcv2() && l2_line_sz && !slc_enable)
1081 arc_slc_disable();
1082
1083 if (is_isa_arcv2() && ioc_enable)
1084 arc_ioc_setup();
1085
1086 if (is_isa_arcv2() && ioc_enable) {
1087 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
1088 __dma_cache_inv = __dma_cache_inv_ioc;
1089 __dma_cache_wback = __dma_cache_wback_ioc;
1090 } else if (is_isa_arcv2() && l2_line_sz && slc_enable) {
1091 __dma_cache_wback_inv = __dma_cache_wback_inv_slc;
1092 __dma_cache_inv = __dma_cache_inv_slc;
1093 __dma_cache_wback = __dma_cache_wback_slc;
1094 } else {
1095 __dma_cache_wback_inv = __dma_cache_wback_inv_l1;
1096 __dma_cache_inv = __dma_cache_inv_l1;
1097 __dma_cache_wback = __dma_cache_wback_l1;
1098 }
1099 }
1100
1101 void __ref arc_cache_init(void)
1102 {
1103 unsigned int __maybe_unused cpu = smp_processor_id();
1104 char str[256];
1105
1106 printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
1107
1108 /*
1109 * Only master CPU needs to execute rest of function:
1110 * - Assume SMP so all cores will have same cache config so
1111 * any geomtry checks will be same for all
1112 * - IOC setup / dma callbacks only need to be setup once
1113 */
1114 if (!cpu)
1115 arc_cache_init_master();
1116 }