]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm64/mm/cache.S
Replace <asm/uaccess.h> with <linux/uaccess.h> globally
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / mm / cache.S
CommitLineData
f1a0c4aa
CM
1/*
2 * Cache maintenance
3 *
4 * Copyright (C) 2001 Deep Blue Solutions Ltd.
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
a2d25a53 20#include <linux/errno.h>
f1a0c4aa
CM
21#include <linux/linkage.h>
22#include <linux/init.h>
23#include <asm/assembler.h>
301bcfac 24#include <asm/cpufeature.h>
8d883b23 25#include <asm/alternative.h>
7c0f6ba6 26#include <linux/uaccess.h>
f1a0c4aa 27
f1a0c4aa
CM
28/*
29 * flush_icache_range(start,end)
30 *
31 * Ensure that the I and D caches are coherent within specified region.
32 * This is typically used when code has been written to a memory region,
33 * and will be executed.
34 *
35 * - start - virtual start address of region
36 * - end - virtual end address of region
37 */
38ENTRY(flush_icache_range)
39 /* FALLTHROUGH */
40
41/*
42 * __flush_cache_user_range(start,end)
43 *
44 * Ensure that the I and D caches are coherent within specified region.
45 * This is typically used when code has been written to a memory region,
46 * and will be executed.
47 *
48 * - start - virtual start address of region
49 * - end - virtual end address of region
50 */
51ENTRY(__flush_cache_user_range)
39bc88e5 52 uaccess_ttbr0_enable x2, x3
f1a0c4aa
CM
53 dcache_line_size x2, x3
54 sub x3, x2, #1
55 bic x4, x0, x3
561:
290622ef 57user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
f1a0c4aa
CM
58 add x4, x4, x2
59 cmp x4, x1
60 b.lo 1b
dc60b777 61 dsb ish
f1a0c4aa
CM
62
63 icache_line_size x2, x3
64 sub x3, x2, #1
65 bic x4, x0, x3
661:
67USER(9f, ic ivau, x4 ) // invalidate I line PoU
68 add x4, x4, x2
69 cmp x4, x1
70 b.lo 1b
dc60b777 71 dsb ish
f1a0c4aa 72 isb
a2d25a53 73 mov x0, #0
39bc88e5
CM
741:
75 uaccess_ttbr0_disable x1
a2d25a53
VM
76 ret
779:
78 mov x0, #-EFAULT
39bc88e5 79 b 1b
f1a0c4aa
CM
80ENDPROC(flush_icache_range)
81ENDPROC(__flush_cache_user_range)
82
83/*
03324e6e 84 * __flush_dcache_area(kaddr, size)
f1a0c4aa 85 *
0a28714c
AK
86 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
87 * are cleaned and invalidated to the PoC.
f1a0c4aa
CM
88 *
89 * - kaddr - kernel address
90 * - size - size in question
91 */
92ENTRY(__flush_dcache_area)
0a28714c 93 dcache_by_line_op civac, sy, x0, x1, x2, x3
f1a0c4aa 94 ret
20791846 95ENDPIPROC(__flush_dcache_area)
7363590d 96
0a28714c
AK
97/*
98 * __clean_dcache_area_pou(kaddr, size)
99 *
100 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
101 * are cleaned to the PoU.
102 *
103 * - kaddr - kernel address
104 * - size - size in question
105 */
106ENTRY(__clean_dcache_area_pou)
107 dcache_by_line_op cvau, ish, x0, x1, x2, x3
108 ret
109ENDPROC(__clean_dcache_area_pou)
110
c218bca7 111/*
d34fdb70
KL
112 * __dma_inv_area(start, size)
113 * - start - virtual start address of region
114 * - size - size in question
c218bca7 115 */
d34fdb70
KL
116__dma_inv_area:
117 add x1, x1, x0
c218bca7
CM
118 /* FALLTHROUGH */
119
7363590d 120/*
d34fdb70
KL
121 * __inval_cache_range(start, end)
122 * - start - start address of region
123 * - end - end address of region
7363590d 124 */
d34fdb70 125ENTRY(__inval_cache_range)
7363590d
CM
126 dcache_line_size x2, x3
127 sub x3, x2, #1
ebf81a93 128 tst x1, x3 // end cache line aligned?
7363590d 129 bic x1, x1, x3
ebf81a93
CM
130 b.eq 1f
131 dc civac, x1 // clean & invalidate D / U line
1321: tst x0, x3 // start cache line aligned?
133 bic x0, x0, x3
134 b.eq 2f
135 dc civac, x0 // clean & invalidate D / U line
136 b 3f
1372: dc ivac, x0 // invalidate D / U line
1383: add x0, x0, x2
7363590d 139 cmp x0, x1
ebf81a93 140 b.lo 2b
7363590d
CM
141 dsb sy
142 ret
20791846 143ENDPIPROC(__inval_cache_range)
d34fdb70
KL
144ENDPROC(__dma_inv_area)
145
146/*
147 * __clean_dcache_area_poc(kaddr, size)
148 *
149 * Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
150 * are cleaned to the PoC.
151 *
152 * - kaddr - kernel address
153 * - size - size in question
154 */
155ENTRY(__clean_dcache_area_poc)
156 /* FALLTHROUGH */
7363590d
CM
157
158/*
d34fdb70 159 * __dma_clean_area(start, size)
7363590d 160 * - start - virtual start address of region
d34fdb70 161 * - size - size in question
7363590d 162 */
d34fdb70
KL
163__dma_clean_area:
164 dcache_by_line_op cvac, sy, x0, x1, x2, x3
7363590d 165 ret
d34fdb70
KL
166ENDPIPROC(__clean_dcache_area_poc)
167ENDPROC(__dma_clean_area)
7363590d
CM
168
169/*
d34fdb70
KL
170 * __dma_flush_area(start, size)
171 *
172 * clean & invalidate D / U line
173 *
7363590d 174 * - start - virtual start address of region
d34fdb70 175 * - size - size in question
7363590d 176 */
d34fdb70
KL
177ENTRY(__dma_flush_area)
178 dcache_by_line_op civac, sy, x0, x1, x2, x3
7363590d 179 ret
d34fdb70 180ENDPIPROC(__dma_flush_area)
7363590d
CM
181
182/*
183 * __dma_map_area(start, size, dir)
184 * - start - kernel virtual start address
185 * - size - size of region
186 * - dir - DMA direction
187 */
188ENTRY(__dma_map_area)
7363590d 189 cmp w2, #DMA_FROM_DEVICE
d34fdb70
KL
190 b.eq __dma_inv_area
191 b __dma_clean_area
20791846 192ENDPIPROC(__dma_map_area)
7363590d
CM
193
194/*
195 * __dma_unmap_area(start, size, dir)
196 * - start - kernel virtual start address
197 * - size - size of region
198 * - dir - DMA direction
199 */
200ENTRY(__dma_unmap_area)
7363590d 201 cmp w2, #DMA_TO_DEVICE
d34fdb70 202 b.ne __dma_inv_area
7363590d 203 ret
20791846 204ENDPIPROC(__dma_unmap_area)