]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/arm64/include/asm/tlbflush.h
arm64: Work around Falkor erratum 1009
[mirror_ubuntu-hirsute-kernel.git] / arch / arm64 / include / asm / tlbflush.h
1 /*
2 * Based on arch/arm/include/asm/tlbflush.h
3 *
4 * Copyright (C) 1999-2003 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19 #ifndef __ASM_TLBFLUSH_H
20 #define __ASM_TLBFLUSH_H
21
22 #ifndef __ASSEMBLY__
23
24 #include <linux/sched.h>
25 #include <asm/cputype.h>
26
27 /*
28 * Raw TLBI operations.
29 *
30 * Where necessary, use the __tlbi() macro to avoid asm()
31 * boilerplate. Drivers and most kernel code should use the TLB
32 * management routines in preference to the macro below.
33 *
34 * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
35 * on whether a particular TLBI operation takes an argument or
36 * not. The macros handles invoking the asm with or without the
37 * register argument as appropriate.
38 */
39 #define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \
40 ALTERNATIVE("nop\n nop", \
41 "dsb ish\n tlbi " #op, \
42 ARM64_WORKAROUND_REPEAT_TLBI, \
43 CONFIG_QCOM_FALKOR_ERRATUM_1009) \
44 : : )
45
46 #define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \
47 ALTERNATIVE("nop\n nop", \
48 "dsb ish\n tlbi " #op ", %0", \
49 ARM64_WORKAROUND_REPEAT_TLBI, \
50 CONFIG_QCOM_FALKOR_ERRATUM_1009) \
51 : : "r" (arg))
52
53 #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
54
55 #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
56
57 /*
58 * TLB Management
59 * ==============
60 *
61 * The TLB specific code is expected to perform whatever tests it needs
62 * to determine if it should invalidate the TLB for each call. Start
63 * addresses are inclusive and end addresses are exclusive; it is safe to
64 * round these addresses down.
65 *
66 * flush_tlb_all()
67 *
68 * Invalidate the entire TLB.
69 *
70 * flush_tlb_mm(mm)
71 *
72 * Invalidate all TLB entries in a particular address space.
73 * - mm - mm_struct describing address space
74 *
75 * flush_tlb_range(mm,start,end)
76 *
77 * Invalidate a range of TLB entries in the specified address
78 * space.
79 * - mm - mm_struct describing address space
80 * - start - start address (may not be aligned)
81 * - end - end address (exclusive, may not be aligned)
82 *
83 * flush_tlb_page(vaddr,vma)
84 *
85 * Invalidate the specified page in the specified address range.
86 * - vaddr - virtual address (may not be aligned)
87 * - vma - vma_struct describing address range
88 *
89 * flush_kern_tlb_page(kaddr)
90 *
91 * Invalidate the TLB entry for the specified page. The address
92 * will be in the kernels virtual memory space. Current uses
93 * only require the D-TLB to be invalidated.
94 * - kaddr - Kernel virtual memory address
95 */
96 static inline void local_flush_tlb_all(void)
97 {
98 dsb(nshst);
99 __tlbi(vmalle1);
100 dsb(nsh);
101 isb();
102 }
103
104 static inline void flush_tlb_all(void)
105 {
106 dsb(ishst);
107 __tlbi(vmalle1is);
108 dsb(ish);
109 isb();
110 }
111
112 static inline void flush_tlb_mm(struct mm_struct *mm)
113 {
114 unsigned long asid = ASID(mm) << 48;
115
116 dsb(ishst);
117 __tlbi(aside1is, asid);
118 dsb(ish);
119 }
120
121 static inline void flush_tlb_page(struct vm_area_struct *vma,
122 unsigned long uaddr)
123 {
124 unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
125
126 dsb(ishst);
127 __tlbi(vale1is, addr);
128 dsb(ish);
129 }
130
131 /*
132 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
133 * necessarily a performance improvement.
134 */
135 #define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
136
137 static inline void __flush_tlb_range(struct vm_area_struct *vma,
138 unsigned long start, unsigned long end,
139 bool last_level)
140 {
141 unsigned long asid = ASID(vma->vm_mm) << 48;
142 unsigned long addr;
143
144 if ((end - start) > MAX_TLB_RANGE) {
145 flush_tlb_mm(vma->vm_mm);
146 return;
147 }
148
149 start = asid | (start >> 12);
150 end = asid | (end >> 12);
151
152 dsb(ishst);
153 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
154 if (last_level)
155 __tlbi(vale1is, addr);
156 else
157 __tlbi(vae1is, addr);
158 }
159 dsb(ish);
160 }
161
162 static inline void flush_tlb_range(struct vm_area_struct *vma,
163 unsigned long start, unsigned long end)
164 {
165 __flush_tlb_range(vma, start, end, false);
166 }
167
168 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
169 {
170 unsigned long addr;
171
172 if ((end - start) > MAX_TLB_RANGE) {
173 flush_tlb_all();
174 return;
175 }
176
177 start >>= 12;
178 end >>= 12;
179
180 dsb(ishst);
181 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
182 __tlbi(vaae1is, addr);
183 dsb(ish);
184 isb();
185 }
186
187 /*
188 * Used to invalidate the TLB (walk caches) corresponding to intermediate page
189 * table levels (pgd/pud/pmd).
190 */
191 static inline void __flush_tlb_pgtable(struct mm_struct *mm,
192 unsigned long uaddr)
193 {
194 unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
195
196 __tlbi(vae1is, addr);
197 dsb(ish);
198 }
199
200 #endif
201
202 #endif