1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2017 Arm Ltd.
3 #define pr_fmt(fmt) "sdei: " fmt
5 #include <linux/arm_sdei.h>
6 #include <linux/hardirq.h>
7 #include <linux/irqflags.h>
8 #include <linux/sched/task_stack.h>
9 #include <linux/uaccess.h>
11 #include <asm/alternative.h>
12 #include <asm/kprobes.h>
13 #include <asm/ptrace.h>
14 #include <asm/sysreg.h>
15 #include <asm/vmap_stack.h>
17 unsigned long sdei_exit_mode
;
20 * VMAP'd stacks checking for stack overflow on exception using sp as a scratch
21 * register, meaning SDEI has to switch to its own stack. We need two stacks as
22 * a critical event may interrupt a normal event that has just taken a
23 * synchronous exception, and is using sp as scratch register. For a critical
24 * event interrupting a normal event, we can't reliably tell if we were on the
26 * For now, we allocate stacks when the driver is probed.
28 DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr
);
29 DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr
);
31 #ifdef CONFIG_VMAP_STACK
32 DEFINE_PER_CPU(unsigned long *, sdei_stack_normal_ptr
);
33 DEFINE_PER_CPU(unsigned long *, sdei_stack_critical_ptr
);
36 static void _free_sdei_stack(unsigned long * __percpu
*ptr
, int cpu
)
40 p
= per_cpu(*ptr
, cpu
);
42 per_cpu(*ptr
, cpu
) = NULL
;
47 static void free_sdei_stacks(void)
51 for_each_possible_cpu(cpu
) {
52 _free_sdei_stack(&sdei_stack_normal_ptr
, cpu
);
53 _free_sdei_stack(&sdei_stack_critical_ptr
, cpu
);
57 static int _init_sdei_stack(unsigned long * __percpu
*ptr
, int cpu
)
61 p
= arch_alloc_vmap_stack(SDEI_STACK_SIZE
, cpu_to_node(cpu
));
64 per_cpu(*ptr
, cpu
) = p
;
69 static int init_sdei_stacks(void)
74 for_each_possible_cpu(cpu
) {
75 err
= _init_sdei_stack(&sdei_stack_normal_ptr
, cpu
);
78 err
= _init_sdei_stack(&sdei_stack_critical_ptr
, cpu
);
89 bool _on_sdei_stack(unsigned long sp
)
91 unsigned long low
, high
;
93 if (!IS_ENABLED(CONFIG_VMAP_STACK
))
96 low
= (unsigned long)raw_cpu_read(sdei_stack_critical_ptr
);
97 high
= low
+ SDEI_STACK_SIZE
;
99 if (low
<= sp
&& sp
< high
)
102 low
= (unsigned long)raw_cpu_read(sdei_stack_normal_ptr
);
103 high
= low
+ SDEI_STACK_SIZE
;
105 return (low
<= sp
&& sp
< high
);
108 unsigned long sdei_arch_get_entry_point(int conduit
)
111 * SDEI works between adjacent exception levels. If we booted at EL1 we
112 * assume a hypervisor is marshalling events. If we booted at EL2 and
113 * dropped to EL1 because we don't support VHE, then we can't support
116 if (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) {
117 pr_err("Not supported on this hardware/boot configuration\n");
121 if (IS_ENABLED(CONFIG_VMAP_STACK
)) {
122 if (init_sdei_stacks())
126 sdei_exit_mode
= (conduit
== CONDUIT_HVC
) ? SDEI_EXIT_HVC
: SDEI_EXIT_SMC
;
127 return (unsigned long)__sdei_asm_handler
;
131 * __sdei_handler() returns one of:
132 * SDEI_EV_HANDLED - success, return to the interrupted context.
133 * SDEI_EV_FAILED - failure, return this error code to firmare.
134 * virtual-address - success, return to this address.
136 static __kprobes
unsigned long _sdei_handler(struct pt_regs
*regs
,
137 struct sdei_registered_event
*arg
)
141 const int clobbered_registers
= 4;
142 u64 elr
= read_sysreg(elr_el1
);
143 u32 kernel_mode
= read_sysreg(CurrentEL
) | 1; /* +SPSel */
144 unsigned long vbar
= read_sysreg(vbar_el1
);
146 /* Retrieve the missing registers values */
147 for (i
= 0; i
< clobbered_registers
; i
++) {
148 /* from within the handler, this call always succeeds */
149 sdei_api_event_context(i
, ®s
->regs
[i
]);
153 * We didn't take an exception to get here, set PAN. UAO will be cleared
154 * by sdei_event_handler()s set_fs(USER_DS) call.
156 __uaccess_enable_hw_pan();
158 err
= sdei_event_handler(regs
, arg
);
160 return SDEI_EV_FAILED
;
162 if (elr
!= read_sysreg(elr_el1
)) {
164 * We took a synchronous exception from the SDEI handler.
165 * This could deadlock, and if you interrupt KVM it will
168 pr_warn("unsafe: exception during handler\n");
171 mode
= regs
->pstate
& (PSR_MODE32_BIT
| PSR_MODE_MASK
);
174 * If we interrupted the kernel with interrupts masked, we always go
175 * back to wherever we came from.
177 if (mode
== kernel_mode
&& !interrupts_enabled(regs
))
178 return SDEI_EV_HANDLED
;
181 * Otherwise, we pretend this was an IRQ. This lets user space tasks
182 * receive signals before we return to them, and KVM to invoke it's
183 * world switch to do the same.
185 * See DDI0487B.a Table D1-7 'Vector offsets from vector table base
188 if (mode
== kernel_mode
)
190 else if (mode
& PSR_MODE32_BIT
)
197 asmlinkage __kprobes notrace
unsigned long
198 __sdei_handler(struct pt_regs
*regs
, struct sdei_registered_event
*arg
)
201 bool do_nmi_exit
= false;
204 * nmi_enter() deals with printk() re-entrance and use of RCU when
205 * RCU believed this CPU was idle. Because critical events can
206 * interrupt normal events, we may already be in_nmi().
213 ret
= _sdei_handler(regs
, arg
);