]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - arch/powerpc/include/asm/book3s/64/kup.h
Merge tag 'powerpc-5.12-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[mirror_ubuntu-kernels.git] / arch / powerpc / include / asm / book3s / 64 / kup.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_KUP_H
3 #define _ASM_POWERPC_BOOK3S_64_KUP_H
4
5 #include <linux/const.h>
6 #include <asm/reg.h>
7
8 #define AMR_KUAP_BLOCK_READ UL(0x5455555555555555)
9 #define AMR_KUAP_BLOCK_WRITE UL(0xa8aaaaaaaaaaaaaa)
10 #define AMR_KUEP_BLOCKED UL(0x5455555555555555)
11 #define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
12
13 #ifdef __ASSEMBLY__
14
15 .macro kuap_user_restore gpr1, gpr2
16 #if defined(CONFIG_PPC_PKEY)
17 BEGIN_MMU_FTR_SECTION_NESTED(67)
18 b 100f // skip_restore_amr
19 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
20 /*
21 * AMR and IAMR are going to be different when
22 * returning to userspace.
23 */
24 ld \gpr1, STACK_REGS_AMR(r1)
25
26 /*
27 * If kuap feature is not enabled, do the mtspr
28 * only if AMR value is different.
29 */
30 BEGIN_MMU_FTR_SECTION_NESTED(68)
31 mfspr \gpr2, SPRN_AMR
32 cmpd \gpr1, \gpr2
33 beq 99f
34 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP, 68)
35
36 isync
37 mtspr SPRN_AMR, \gpr1
38 99:
39 /*
40 * Restore IAMR only when returning to userspace
41 */
42 ld \gpr1, STACK_REGS_IAMR(r1)
43
44 /*
45 * If kuep feature is not enabled, do the mtspr
46 * only if IAMR value is different.
47 */
48 BEGIN_MMU_FTR_SECTION_NESTED(69)
49 mfspr \gpr2, SPRN_IAMR
50 cmpd \gpr1, \gpr2
51 beq 100f
52 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP, 69)
53
54 isync
55 mtspr SPRN_IAMR, \gpr1
56
57 100: //skip_restore_amr
58 /* No isync required, see kuap_user_restore() */
59 #endif
60 .endm
61
62 .macro kuap_kernel_restore gpr1, gpr2
63 #if defined(CONFIG_PPC_PKEY)
64
65 BEGIN_MMU_FTR_SECTION_NESTED(67)
66 /*
67 * AMR is going to be mostly the same since we are
68 * returning to the kernel. Compare and do a mtspr.
69 */
70 ld \gpr2, STACK_REGS_AMR(r1)
71 mfspr \gpr1, SPRN_AMR
72 cmpd \gpr1, \gpr2
73 beq 100f
74 isync
75 mtspr SPRN_AMR, \gpr2
76 /*
77 * No isync required, see kuap_restore_amr()
78 * No need to restore IAMR when returning to kernel space.
79 */
80 100:
81 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
82 #endif
83 .endm
84
85 #ifdef CONFIG_PPC_KUAP
86 .macro kuap_check_amr gpr1, gpr2
87 #ifdef CONFIG_PPC_KUAP_DEBUG
88 BEGIN_MMU_FTR_SECTION_NESTED(67)
89 mfspr \gpr1, SPRN_AMR
90 /* Prevent access to userspace using any key values */
91 LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
92 999: tdne \gpr1, \gpr2
93 EMIT_BUG_ENTRY 999b, __FILE__, __LINE__, (BUGFLAG_WARNING | BUGFLAG_ONCE)
94 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 67)
95 #endif
96 .endm
97 #endif
98
99 /*
100 * if (pkey) {
101 *
102 * save AMR -> stack;
103 * if (kuap) {
104 * if (AMR != BLOCKED)
105 * KUAP_BLOCKED -> AMR;
106 * }
107 * if (from_user) {
108 * save IAMR -> stack;
109 * if (kuep) {
110 * KUEP_BLOCKED ->IAMR
111 * }
112 * }
113 * return;
114 * }
115 *
116 * if (kuap) {
117 * if (from_kernel) {
118 * save AMR -> stack;
119 * if (AMR != BLOCKED)
120 * KUAP_BLOCKED -> AMR;
121 * }
122 *
123 * }
124 */
125 .macro kuap_save_amr_and_lock gpr1, gpr2, use_cr, msr_pr_cr
126 #if defined(CONFIG_PPC_PKEY)
127
128 /*
129 * if both pkey and kuap is disabled, nothing to do
130 */
131 BEGIN_MMU_FTR_SECTION_NESTED(68)
132 b 100f // skip_save_amr
133 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY | MMU_FTR_BOOK3S_KUAP, 68)
134
135 /*
136 * if pkey is disabled and we are entering from userspace
137 * don't do anything.
138 */
139 BEGIN_MMU_FTR_SECTION_NESTED(67)
140 .ifnb \msr_pr_cr
141 /*
142 * Without pkey we are not changing AMR outside the kernel
143 * hence skip this completely.
144 */
145 bne \msr_pr_cr, 100f // from userspace
146 .endif
147 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY, 67)
148
149 /*
150 * pkey is enabled or pkey is disabled but entering from kernel
151 */
152 mfspr \gpr1, SPRN_AMR
153 std \gpr1, STACK_REGS_AMR(r1)
154
155 /*
156 * update kernel AMR with AMR_KUAP_BLOCKED only
157 * if KUAP feature is enabled
158 */
159 BEGIN_MMU_FTR_SECTION_NESTED(69)
160 LOAD_REG_IMMEDIATE(\gpr2, AMR_KUAP_BLOCKED)
161 cmpd \use_cr, \gpr1, \gpr2
162 beq \use_cr, 102f
163 /*
164 * We don't isync here because we very recently entered via an interrupt
165 */
166 mtspr SPRN_AMR, \gpr2
167 isync
168 102:
169 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP, 69)
170
171 /*
172 * if entering from kernel we don't need save IAMR
173 */
174 .ifnb \msr_pr_cr
175 beq \msr_pr_cr, 100f // from kernel space
176 mfspr \gpr1, SPRN_IAMR
177 std \gpr1, STACK_REGS_IAMR(r1)
178
179 /*
180 * update kernel IAMR with AMR_KUEP_BLOCKED only
181 * if KUEP feature is enabled
182 */
183 BEGIN_MMU_FTR_SECTION_NESTED(70)
184 LOAD_REG_IMMEDIATE(\gpr2, AMR_KUEP_BLOCKED)
185 mtspr SPRN_IAMR, \gpr2
186 isync
187 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP, 70)
188 .endif
189
190 100: // skip_save_amr
191 #endif
192 .endm
193
194 #else /* !__ASSEMBLY__ */
195
196 #include <linux/jump_label.h>
197
198 DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
199
200 #ifdef CONFIG_PPC_PKEY
201
202 extern u64 __ro_after_init default_uamor;
203 extern u64 __ro_after_init default_amr;
204 extern u64 __ro_after_init default_iamr;
205
206 #include <asm/mmu.h>
207 #include <asm/ptrace.h>
208
209 /* usage of kthread_use_mm() should inherit the
210 * AMR value of the operating address space. But, the AMR value is
211 * thread-specific and we inherit the address space and not thread
212 * access restrictions. Because of this ignore AMR value when accessing
213 * userspace via kernel thread.
214 */
215 static inline u64 current_thread_amr(void)
216 {
217 if (current->thread.regs)
218 return current->thread.regs->amr;
219 return default_amr;
220 }
221
222 static inline u64 current_thread_iamr(void)
223 {
224 if (current->thread.regs)
225 return current->thread.regs->iamr;
226 return default_iamr;
227 }
228 #endif /* CONFIG_PPC_PKEY */
229
230 #ifdef CONFIG_PPC_KUAP
231
232 static inline void kuap_user_restore(struct pt_regs *regs)
233 {
234 bool restore_amr = false, restore_iamr = false;
235 unsigned long amr, iamr;
236
237 if (!mmu_has_feature(MMU_FTR_PKEY))
238 return;
239
240 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
241 amr = mfspr(SPRN_AMR);
242 if (amr != regs->amr)
243 restore_amr = true;
244 } else {
245 restore_amr = true;
246 }
247
248 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP)) {
249 iamr = mfspr(SPRN_IAMR);
250 if (iamr != regs->iamr)
251 restore_iamr = true;
252 } else {
253 restore_iamr = true;
254 }
255
256
257 if (restore_amr || restore_iamr) {
258 isync();
259 if (restore_amr)
260 mtspr(SPRN_AMR, regs->amr);
261 if (restore_iamr)
262 mtspr(SPRN_IAMR, regs->iamr);
263 }
264 /*
265 * No isync required here because we are about to rfi
266 * back to previous context before any user accesses
267 * would be made, which is a CSI.
268 */
269 }
270
271 static inline void kuap_kernel_restore(struct pt_regs *regs,
272 unsigned long amr)
273 {
274 if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
275 if (unlikely(regs->amr != amr)) {
276 isync();
277 mtspr(SPRN_AMR, regs->amr);
278 /*
279 * No isync required here because we are about to rfi
280 * back to previous context before any user accesses
281 * would be made, which is a CSI.
282 */
283 }
284 }
285 /*
286 * No need to restore IAMR when returning to kernel space.
287 */
288 }
289
290 static inline unsigned long kuap_get_and_check_amr(void)
291 {
292 if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
293 unsigned long amr = mfspr(SPRN_AMR);
294 if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
295 WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
296 return amr;
297 }
298 return 0;
299 }
300
301 #else /* CONFIG_PPC_PKEY */
302
303 static inline void kuap_user_restore(struct pt_regs *regs)
304 {
305 }
306
307 static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
308 {
309 }
310
311 static inline unsigned long kuap_get_and_check_amr(void)
312 {
313 return 0;
314 }
315
316 #endif /* CONFIG_PPC_PKEY */
317
318
319 #ifdef CONFIG_PPC_KUAP
320
321 static inline void kuap_check_amr(void)
322 {
323 if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
324 WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
325 }
326
327 /*
328 * We support individually allowing read or write, but we don't support nesting
329 * because that would require an expensive read/modify write of the AMR.
330 */
331
332 static inline unsigned long get_kuap(void)
333 {
334 /*
335 * We return AMR_KUAP_BLOCKED when we don't support KUAP because
336 * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
337 * cause restore_user_access to do a flush.
338 *
339 * This has no effect in terms of actually blocking things on hash,
340 * so it doesn't break anything.
341 */
342 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
343 return AMR_KUAP_BLOCKED;
344
345 return mfspr(SPRN_AMR);
346 }
347
348 static inline void set_kuap(unsigned long value)
349 {
350 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
351 return;
352
353 /*
354 * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
355 * before and after the move to AMR. See table 6 on page 1134.
356 */
357 isync();
358 mtspr(SPRN_AMR, value);
359 isync();
360 }
361
362 static inline bool bad_kuap_fault(struct pt_regs *regs, unsigned long address,
363 bool is_write)
364 {
365 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
366 return false;
367 /*
368 * For radix this will be a storage protection fault (DSISR_PROTFAULT).
369 * For hash this will be a key fault (DSISR_KEYFAULT)
370 */
371 /*
372 * We do have exception table entry, but accessing the
373 * userspace results in fault. This could be because we
374 * didn't unlock the AMR or access is denied by userspace
375 * using a key value that blocks access. We are only interested
376 * in catching the use case of accessing without unlocking
377 * the AMR. Hence check for BLOCK_WRITE/READ against AMR.
378 */
379 if (is_write) {
380 return (regs->amr & AMR_KUAP_BLOCK_WRITE) == AMR_KUAP_BLOCK_WRITE;
381 }
382 return (regs->amr & AMR_KUAP_BLOCK_READ) == AMR_KUAP_BLOCK_READ;
383 }
384
385 static __always_inline void allow_user_access(void __user *to, const void __user *from,
386 unsigned long size, unsigned long dir)
387 {
388 unsigned long thread_amr = 0;
389
390 // This is written so we can resolve to a single case at build time
391 BUILD_BUG_ON(!__builtin_constant_p(dir));
392
393 if (mmu_has_feature(MMU_FTR_PKEY))
394 thread_amr = current_thread_amr();
395
396 if (dir == KUAP_READ)
397 set_kuap(thread_amr | AMR_KUAP_BLOCK_WRITE);
398 else if (dir == KUAP_WRITE)
399 set_kuap(thread_amr | AMR_KUAP_BLOCK_READ);
400 else if (dir == KUAP_READ_WRITE)
401 set_kuap(thread_amr);
402 else
403 BUILD_BUG();
404 }
405
406 #else /* CONFIG_PPC_KUAP */
407
408 static inline unsigned long get_kuap(void)
409 {
410 return AMR_KUAP_BLOCKED;
411 }
412
413 static inline void set_kuap(unsigned long value) { }
414
415 static __always_inline void allow_user_access(void __user *to, const void __user *from,
416 unsigned long size, unsigned long dir)
417 { }
418
419 #endif /* !CONFIG_PPC_KUAP */
420
421 static inline void prevent_user_access(void __user *to, const void __user *from,
422 unsigned long size, unsigned long dir)
423 {
424 set_kuap(AMR_KUAP_BLOCKED);
425 if (static_branch_unlikely(&uaccess_flush_key))
426 do_uaccess_flush();
427 }
428
429 static inline unsigned long prevent_user_access_return(void)
430 {
431 unsigned long flags = get_kuap();
432
433 set_kuap(AMR_KUAP_BLOCKED);
434 if (static_branch_unlikely(&uaccess_flush_key))
435 do_uaccess_flush();
436
437 return flags;
438 }
439
440 static inline void restore_user_access(unsigned long flags)
441 {
442 set_kuap(flags);
443 if (static_branch_unlikely(&uaccess_flush_key) && flags == AMR_KUAP_BLOCKED)
444 do_uaccess_flush();
445 }
446 #endif /* __ASSEMBLY__ */
447
448 #endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */