1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_KUP_H
3 #define _ASM_POWERPC_BOOK3S_64_KUP_H
5 #include <linux/const.h>
8 #define AMR_KUAP_BLOCK_READ UL(0x5455555555555555)
9 #define AMR_KUAP_BLOCK_WRITE UL(0xa8aaaaaaaaaaaaaa)
10 #define AMR_KUEP_BLOCKED UL(0x5455555555555555)
11 #define AMR_KUAP_BLOCKED (AMR_KUAP_BLOCK_READ | AMR_KUAP_BLOCK_WRITE)
15 .macro kuap_user_restore gpr1
, gpr2
16 #if defined(CONFIG_PPC_PKEY)
17 BEGIN_MMU_FTR_SECTION_NESTED(67)
18 b
100f
// skip_restore_amr
19 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY
, 67)
21 * AMR and IAMR are going to be different when
22 * returning to userspace.
24 ld \gpr1
, STACK_REGS_AMR(r1
)
27 * If kuap feature is not enabled, do the mtspr
28 * only if AMR value is different.
30 BEGIN_MMU_FTR_SECTION_NESTED(68)
34 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUAP
, 68)
40 * Restore IAMR only when returning to userspace
42 ld \gpr1
, STACK_REGS_IAMR(r1
)
45 * If kuep feature is not enabled, do the mtspr
46 * only if IAMR value is different.
48 BEGIN_MMU_FTR_SECTION_NESTED(69)
49 mfspr \gpr2
, SPRN_IAMR
52 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_BOOK3S_KUEP
, 69)
55 mtspr SPRN_IAMR
, \gpr1
57 100: //skip_restore_amr
58 /* No isync required, see kuap_user_restore() */
62 .macro kuap_kernel_restore gpr1
, gpr2
63 #if defined(CONFIG_PPC_PKEY)
65 BEGIN_MMU_FTR_SECTION_NESTED(67)
67 * AMR is going to be mostly the same since we are
68 * returning to the kernel. Compare and do a mtspr.
70 ld \gpr2
, STACK_REGS_AMR(r1
)
77 * No isync required, see kuap_restore_amr()
78 * No need to restore IAMR when returning to kernel space.
81 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP
, 67)
85 #ifdef CONFIG_PPC_KUAP
86 .macro kuap_check_amr gpr1
, gpr2
87 #ifdef CONFIG_PPC_KUAP_DEBUG
88 BEGIN_MMU_FTR_SECTION_NESTED(67)
90 /* Prevent access to userspace using any key values */
91 LOAD_REG_IMMEDIATE(\gpr2
, AMR_KUAP_BLOCKED
)
92 999: tdne \gpr1
, \gpr2
93 EMIT_BUG_ENTRY
999b
, __FILE__
, __LINE__
, (BUGFLAG_WARNING
| BUGFLAG_ONCE
)
94 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP
, 67)
104 * if (AMR != BLOCKED)
105 * KUAP_BLOCKED -> AMR;
108 * save IAMR -> stack;
110 * KUEP_BLOCKED ->IAMR
119 * if (AMR != BLOCKED)
120 * KUAP_BLOCKED -> AMR;
125 .macro kuap_save_amr_and_lock gpr1
, gpr2
, use_cr
, msr_pr_cr
126 #if defined(CONFIG_PPC_PKEY)
129 * if both pkey and kuap is disabled, nothing to do
131 BEGIN_MMU_FTR_SECTION_NESTED(68)
132 b
100f
// skip_save_amr
133 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY
| MMU_FTR_BOOK3S_KUAP
, 68)
136 * if pkey is disabled and we are entering from userspace
139 BEGIN_MMU_FTR_SECTION_NESTED(67)
142 * Without pkey we are not changing AMR outside the kernel
143 * hence skip this completely.
145 bne \msr_pr_cr
, 100f
// from userspace
147 END_MMU_FTR_SECTION_NESTED_IFCLR(MMU_FTR_PKEY
, 67)
150 * pkey is enabled or pkey is disabled but entering from kernel
152 mfspr \gpr1
, SPRN_AMR
153 std \gpr1
, STACK_REGS_AMR(r1
)
156 * update kernel AMR with AMR_KUAP_BLOCKED only
157 * if KUAP feature is enabled
159 BEGIN_MMU_FTR_SECTION_NESTED(69)
160 LOAD_REG_IMMEDIATE(\gpr2
, AMR_KUAP_BLOCKED
)
161 cmpd \use_cr
, \gpr1
, \gpr2
164 * We don't isync here because we very recently entered via an interrupt
166 mtspr SPRN_AMR
, \gpr2
169 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUAP
, 69)
172 * if entering from kernel we don't need save IAMR
175 beq \msr_pr_cr
, 100f
// from kernel space
176 mfspr \gpr1
, SPRN_IAMR
177 std \gpr1
, STACK_REGS_IAMR(r1
)
180 * update kernel IAMR with AMR_KUEP_BLOCKED only
181 * if KUEP feature is enabled
183 BEGIN_MMU_FTR_SECTION_NESTED(70)
184 LOAD_REG_IMMEDIATE(\gpr2
, AMR_KUEP_BLOCKED
)
185 mtspr SPRN_IAMR
, \gpr2
187 END_MMU_FTR_SECTION_NESTED_IFSET(MMU_FTR_BOOK3S_KUEP
, 70)
190 100: // skip_save_amr
194 #else /* !__ASSEMBLY__ */
196 #include <linux/jump_label.h>
198 DECLARE_STATIC_KEY_FALSE(uaccess_flush_key
);
200 #ifdef CONFIG_PPC_PKEY
202 extern u64 __ro_after_init default_uamor
;
203 extern u64 __ro_after_init default_amr
;
204 extern u64 __ro_after_init default_iamr
;
207 #include <asm/ptrace.h>
209 /* usage of kthread_use_mm() should inherit the
210 * AMR value of the operating address space. But, the AMR value is
211 * thread-specific and we inherit the address space and not thread
212 * access restrictions. Because of this ignore AMR value when accessing
213 * userspace via kernel thread.
215 static inline u64
current_thread_amr(void)
217 if (current
->thread
.regs
)
218 return current
->thread
.regs
->amr
;
222 static inline u64
current_thread_iamr(void)
224 if (current
->thread
.regs
)
225 return current
->thread
.regs
->iamr
;
228 #endif /* CONFIG_PPC_PKEY */
230 #ifdef CONFIG_PPC_KUAP
232 static inline void kuap_user_restore(struct pt_regs
*regs
)
234 bool restore_amr
= false, restore_iamr
= false;
235 unsigned long amr
, iamr
;
237 if (!mmu_has_feature(MMU_FTR_PKEY
))
240 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP
)) {
241 amr
= mfspr(SPRN_AMR
);
242 if (amr
!= regs
->amr
)
248 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUEP
)) {
249 iamr
= mfspr(SPRN_IAMR
);
250 if (iamr
!= regs
->iamr
)
257 if (restore_amr
|| restore_iamr
) {
260 mtspr(SPRN_AMR
, regs
->amr
);
262 mtspr(SPRN_IAMR
, regs
->iamr
);
265 * No isync required here because we are about to rfi
266 * back to previous context before any user accesses
267 * would be made, which is a CSI.
271 static inline void kuap_kernel_restore(struct pt_regs
*regs
,
274 if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP
)) {
275 if (unlikely(regs
->amr
!= amr
)) {
277 mtspr(SPRN_AMR
, regs
->amr
);
279 * No isync required here because we are about to rfi
280 * back to previous context before any user accesses
281 * would be made, which is a CSI.
286 * No need to restore IAMR when returning to kernel space.
290 static inline unsigned long kuap_get_and_check_amr(void)
292 if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP
)) {
293 unsigned long amr
= mfspr(SPRN_AMR
);
294 if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG
)) /* kuap_check_amr() */
295 WARN_ON_ONCE(amr
!= AMR_KUAP_BLOCKED
);
301 #else /* CONFIG_PPC_PKEY */
303 static inline void kuap_user_restore(struct pt_regs
*regs
)
307 static inline void kuap_kernel_restore(struct pt_regs
*regs
, unsigned long amr
)
311 static inline unsigned long kuap_get_and_check_amr(void)
316 #endif /* CONFIG_PPC_PKEY */
319 #ifdef CONFIG_PPC_KUAP
321 static inline void kuap_check_amr(void)
323 if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG
) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP
))
324 WARN_ON_ONCE(mfspr(SPRN_AMR
) != AMR_KUAP_BLOCKED
);
328 * We support individually allowing read or write, but we don't support nesting
329 * because that would require an expensive read/modify write of the AMR.
332 static inline unsigned long get_kuap(void)
335 * We return AMR_KUAP_BLOCKED when we don't support KUAP because
336 * prevent_user_access_return needs to return AMR_KUAP_BLOCKED to
337 * cause restore_user_access to do a flush.
339 * This has no effect in terms of actually blocking things on hash,
340 * so it doesn't break anything.
342 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP
))
343 return AMR_KUAP_BLOCKED
;
345 return mfspr(SPRN_AMR
);
348 static inline void set_kuap(unsigned long value
)
350 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP
))
354 * ISA v3.0B says we need a CSI (Context Synchronising Instruction) both
355 * before and after the move to AMR. See table 6 on page 1134.
358 mtspr(SPRN_AMR
, value
);
362 static inline bool bad_kuap_fault(struct pt_regs
*regs
, unsigned long address
,
365 if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP
))
368 * For radix this will be a storage protection fault (DSISR_PROTFAULT).
369 * For hash this will be a key fault (DSISR_KEYFAULT)
372 * We do have exception table entry, but accessing the
373 * userspace results in fault. This could be because we
374 * didn't unlock the AMR or access is denied by userspace
375 * using a key value that blocks access. We are only interested
376 * in catching the use case of accessing without unlocking
377 * the AMR. Hence check for BLOCK_WRITE/READ against AMR.
380 return (regs
->amr
& AMR_KUAP_BLOCK_WRITE
) == AMR_KUAP_BLOCK_WRITE
;
382 return (regs
->amr
& AMR_KUAP_BLOCK_READ
) == AMR_KUAP_BLOCK_READ
;
385 static __always_inline
void allow_user_access(void __user
*to
, const void __user
*from
,
386 unsigned long size
, unsigned long dir
)
388 unsigned long thread_amr
= 0;
390 // This is written so we can resolve to a single case at build time
391 BUILD_BUG_ON(!__builtin_constant_p(dir
));
393 if (mmu_has_feature(MMU_FTR_PKEY
))
394 thread_amr
= current_thread_amr();
396 if (dir
== KUAP_READ
)
397 set_kuap(thread_amr
| AMR_KUAP_BLOCK_WRITE
);
398 else if (dir
== KUAP_WRITE
)
399 set_kuap(thread_amr
| AMR_KUAP_BLOCK_READ
);
400 else if (dir
== KUAP_READ_WRITE
)
401 set_kuap(thread_amr
);
406 #else /* CONFIG_PPC_KUAP */
408 static inline unsigned long get_kuap(void)
410 return AMR_KUAP_BLOCKED
;
413 static inline void set_kuap(unsigned long value
) { }
415 static __always_inline
void allow_user_access(void __user
*to
, const void __user
*from
,
416 unsigned long size
, unsigned long dir
)
419 #endif /* !CONFIG_PPC_KUAP */
421 static inline void prevent_user_access(void __user
*to
, const void __user
*from
,
422 unsigned long size
, unsigned long dir
)
424 set_kuap(AMR_KUAP_BLOCKED
);
425 if (static_branch_unlikely(&uaccess_flush_key
))
429 static inline unsigned long prevent_user_access_return(void)
431 unsigned long flags
= get_kuap();
433 set_kuap(AMR_KUAP_BLOCKED
);
434 if (static_branch_unlikely(&uaccess_flush_key
))
440 static inline void restore_user_access(unsigned long flags
)
443 if (static_branch_unlikely(&uaccess_flush_key
) && flags
== AMR_KUAP_BLOCKED
)
446 #endif /* __ASSEMBLY__ */
448 #endif /* _ASM_POWERPC_BOOK3S_64_KUP_H */