]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/arm/mm/cache-v4wt.S
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
[mirror_ubuntu-jammy-kernel.git] / arch / arm / mm / cache-v4wt.S
CommitLineData
d2912cb1 1/* SPDX-License-Identifier: GPL-2.0-only */
1da177e4
LT
2/*
3 * linux/arch/arm/mm/cache-v4wt.S
4 *
5 * Copyright (C) 1997-2002 Russell king
6 *
1da177e4
LT
7 * ARMv4 write through cache operations support.
8 *
9 * We assume that the write buffer is not enabled.
10 */
11#include <linux/linkage.h>
12#include <linux/init.h>
6ebbf2ce 13#include <asm/assembler.h>
1da177e4
LT
14#include <asm/page.h>
15#include "proc-macros.S"
16
17/*
18 * The size of one data cache line.
19 */
20#define CACHE_DLINESIZE 32
21
22/*
23 * The number of data cache segments.
24 */
25#define CACHE_DSEGMENTS 8
26
27/*
28 * The number of lines in a cache segment.
29 */
30#define CACHE_DENTRIES 64
31
32/*
33 * This is the size at which it becomes more efficient to
34 * clean the whole cache, rather than using the individual
25985edc 35 * cache line maintenance instructions.
1da177e4
LT
36 *
37 * *** This needs benchmarking
38 */
39#define CACHE_DLIMIT 16384
40
c8c90860
MW
41/*
42 * flush_icache_all()
43 *
44 * Unconditionally clean and invalidate the entire icache.
45 */
46ENTRY(v4wt_flush_icache_all)
47 mov r0, #0
48 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
6ebbf2ce 49 ret lr
c8c90860
MW
50ENDPROC(v4wt_flush_icache_all)
51
1da177e4
LT
52/*
53 * flush_user_cache_all()
54 *
55 * Invalidate all cache entries in a particular address
56 * space.
57 */
58ENTRY(v4wt_flush_user_cache_all)
59 /* FALLTHROUGH */
60/*
61 * flush_kern_cache_all()
62 *
63 * Clean and invalidate the entire cache.
64 */
65ENTRY(v4wt_flush_kern_cache_all)
66 mov r2, #VM_EXEC
67 mov ip, #0
68__flush_whole_cache:
69 tst r2, #VM_EXEC
70 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
71 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
6ebbf2ce 72 ret lr
1da177e4
LT
73
74/*
75 * flush_user_cache_range(start, end, flags)
76 *
77 * Clean and invalidate a range of cache entries in the specified
78 * address space.
79 *
80 * - start - start address (inclusive, page aligned)
81 * - end - end address (exclusive, page aligned)
82 * - flags - vma_area_struct flags describing address space
83 */
84ENTRY(v4wt_flush_user_cache_range)
85 sub r3, r1, r0 @ calculate total size
86 cmp r3, #CACHE_DLIMIT
87 bhs __flush_whole_cache
88
891: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
90 tst r2, #VM_EXEC
91 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
92 add r0, r0, #CACHE_DLINESIZE
93 cmp r0, r1
94 blo 1b
6ebbf2ce 95 ret lr
1da177e4
LT
96
97/*
98 * coherent_kern_range(start, end)
99 *
100 * Ensure coherency between the Icache and the Dcache in the
101 * region described by start. If you have non-snooping
102 * Harvard caches, you need to implement this function.
103 *
104 * - start - virtual start address
105 * - end - virtual end address
106 */
107ENTRY(v4wt_coherent_kern_range)
108 /* FALLTRHOUGH */
109
110/*
111 * coherent_user_range(start, end)
112 *
113 * Ensure coherency between the Icache and the Dcache in the
114 * region described by start. If you have non-snooping
115 * Harvard caches, you need to implement this function.
116 *
117 * - start - virtual start address
118 * - end - virtual end address
119 */
120ENTRY(v4wt_coherent_user_range)
121 bic r0, r0, #CACHE_DLINESIZE - 1
1221: mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
123 add r0, r0, #CACHE_DLINESIZE
124 cmp r0, r1
125 blo 1b
c5102f59 126 mov r0, #0
6ebbf2ce 127 ret lr
1da177e4
LT
128
129/*
2c9b9c84 130 * flush_kern_dcache_area(void *addr, size_t size)
1da177e4
LT
131 *
132 * Ensure no D cache aliasing occurs, either with itself or
133 * the I cache
134 *
2c9b9c84
RK
135 * - addr - kernel address
136 * - size - region size
1da177e4 137 */
2c9b9c84 138ENTRY(v4wt_flush_kern_dcache_area)
1da177e4
LT
139 mov r2, #0
140 mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache
2c9b9c84 141 add r1, r0, r1
1da177e4
LT
142 /* fallthrough */
143
144/*
145 * dma_inv_range(start, end)
146 *
147 * Invalidate (discard) the specified virtual address range.
148 * May not write back any entries. If 'start' or 'end'
149 * are not cache line aligned, those lines must be written
150 * back.
151 *
152 * - start - virtual start address
153 * - end - virtual end address
154 */
702b94bf 155v4wt_dma_inv_range:
1da177e4
LT
156 bic r0, r0, #CACHE_DLINESIZE - 1
1571: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
158 add r0, r0, #CACHE_DLINESIZE
159 cmp r0, r1
160 blo 1b
6ebbf2ce 161 ret lr
1da177e4
LT
162
163/*
164 * dma_flush_range(start, end)
165 *
166 * Clean and invalidate the specified virtual address range.
167 *
168 * - start - virtual start address
169 * - end - virtual end address
170 */
171 .globl v4wt_dma_flush_range
172 .equ v4wt_dma_flush_range, v4wt_dma_inv_range
173
a9c9147e
RK
174/*
175 * dma_unmap_area(start, size, dir)
176 * - start - kernel virtual start address
177 * - size - size of region
178 * - dir - DMA direction
179 */
180ENTRY(v4wt_dma_unmap_area)
181 add r1, r1, r0
182 teq r2, #DMA_TO_DEVICE
183 bne v4wt_dma_inv_range
184 /* FALLTHROUGH */
185
186/*
187 * dma_map_area(start, size, dir)
188 * - start - kernel virtual start address
189 * - size - size of region
190 * - dir - DMA direction
191 */
192ENTRY(v4wt_dma_map_area)
6ebbf2ce 193 ret lr
a9c9147e
RK
194ENDPROC(v4wt_dma_unmap_area)
195ENDPROC(v4wt_dma_map_area)
196
031bd879
LP
197 .globl v4wt_flush_kern_cache_louis
198 .equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all
199
1da177e4
LT
200 __INITDATA
201
d5b5b2e2
DM
202 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
203 define_cache_functions v4wt