]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/riscv/kernel/entry.S
powerpc/cacheinfo: Remove double free
[mirror_ubuntu-hirsute-kernel.git] / arch / riscv / kernel / entry.S
1 /*
2 * Copyright (C) 2012 Regents of the University of California
3 * Copyright (C) 2017 SiFive
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15 #include <linux/init.h>
16 #include <linux/linkage.h>
17
18 #include <asm/asm.h>
19 #include <asm/csr.h>
20 #include <asm/unistd.h>
21 #include <asm/thread_info.h>
22 #include <asm/asm-offsets.h>
23
24 .text
25 .altmacro
26
27 /*
28 * Prepares to enter a system call or exception by saving all registers to the
29 * stack.
30 */
31 .macro SAVE_ALL
32 LOCAL _restore_kernel_tpsp
33 LOCAL _save_context
34
35 /*
36 * If coming from userspace, preserve the user thread pointer and load
37 * the kernel thread pointer. If we came from the kernel, sscratch
38 * will contain 0, and we should continue on the current TP.
39 */
40 csrrw tp, sscratch, tp
41 bnez tp, _save_context
42
43 _restore_kernel_tpsp:
44 csrr tp, sscratch
45 REG_S sp, TASK_TI_KERNEL_SP(tp)
46 _save_context:
47 REG_S sp, TASK_TI_USER_SP(tp)
48 REG_L sp, TASK_TI_KERNEL_SP(tp)
49 addi sp, sp, -(PT_SIZE_ON_STACK)
50 REG_S x1, PT_RA(sp)
51 REG_S x3, PT_GP(sp)
52 REG_S x5, PT_T0(sp)
53 REG_S x6, PT_T1(sp)
54 REG_S x7, PT_T2(sp)
55 REG_S x8, PT_S0(sp)
56 REG_S x9, PT_S1(sp)
57 REG_S x10, PT_A0(sp)
58 REG_S x11, PT_A1(sp)
59 REG_S x12, PT_A2(sp)
60 REG_S x13, PT_A3(sp)
61 REG_S x14, PT_A4(sp)
62 REG_S x15, PT_A5(sp)
63 REG_S x16, PT_A6(sp)
64 REG_S x17, PT_A7(sp)
65 REG_S x18, PT_S2(sp)
66 REG_S x19, PT_S3(sp)
67 REG_S x20, PT_S4(sp)
68 REG_S x21, PT_S5(sp)
69 REG_S x22, PT_S6(sp)
70 REG_S x23, PT_S7(sp)
71 REG_S x24, PT_S8(sp)
72 REG_S x25, PT_S9(sp)
73 REG_S x26, PT_S10(sp)
74 REG_S x27, PT_S11(sp)
75 REG_S x28, PT_T3(sp)
76 REG_S x29, PT_T4(sp)
77 REG_S x30, PT_T5(sp)
78 REG_S x31, PT_T6(sp)
79
80 /*
81 * Disable user-mode memory access as it should only be set in the
82 * actual user copy routines.
83 *
84 * Disable the FPU to detect illegal usage of floating point in kernel
85 * space.
86 */
87 li t0, SR_SUM | SR_FS
88
89 REG_L s0, TASK_TI_USER_SP(tp)
90 csrrc s1, sstatus, t0
91 csrr s2, sepc
92 csrr s3, sbadaddr
93 csrr s4, scause
94 csrr s5, sscratch
95 REG_S s0, PT_SP(sp)
96 REG_S s1, PT_SSTATUS(sp)
97 REG_S s2, PT_SEPC(sp)
98 REG_S s3, PT_SBADADDR(sp)
99 REG_S s4, PT_SCAUSE(sp)
100 REG_S s5, PT_TP(sp)
101 .endm
102
103 /*
104 * Prepares to return from a system call or exception by restoring all
105 * registers from the stack.
106 */
107 .macro RESTORE_ALL
108 REG_L a0, PT_SSTATUS(sp)
109 REG_L a2, PT_SEPC(sp)
110 csrw sstatus, a0
111 csrw sepc, a2
112
113 REG_L x1, PT_RA(sp)
114 REG_L x3, PT_GP(sp)
115 REG_L x4, PT_TP(sp)
116 REG_L x5, PT_T0(sp)
117 REG_L x6, PT_T1(sp)
118 REG_L x7, PT_T2(sp)
119 REG_L x8, PT_S0(sp)
120 REG_L x9, PT_S1(sp)
121 REG_L x10, PT_A0(sp)
122 REG_L x11, PT_A1(sp)
123 REG_L x12, PT_A2(sp)
124 REG_L x13, PT_A3(sp)
125 REG_L x14, PT_A4(sp)
126 REG_L x15, PT_A5(sp)
127 REG_L x16, PT_A6(sp)
128 REG_L x17, PT_A7(sp)
129 REG_L x18, PT_S2(sp)
130 REG_L x19, PT_S3(sp)
131 REG_L x20, PT_S4(sp)
132 REG_L x21, PT_S5(sp)
133 REG_L x22, PT_S6(sp)
134 REG_L x23, PT_S7(sp)
135 REG_L x24, PT_S8(sp)
136 REG_L x25, PT_S9(sp)
137 REG_L x26, PT_S10(sp)
138 REG_L x27, PT_S11(sp)
139 REG_L x28, PT_T3(sp)
140 REG_L x29, PT_T4(sp)
141 REG_L x30, PT_T5(sp)
142 REG_L x31, PT_T6(sp)
143
144 REG_L x2, PT_SP(sp)
145 .endm
146
147 #if !IS_ENABLED(CONFIG_PREEMPT)
148 .set resume_kernel, restore_all
149 #endif
150
151 ENTRY(handle_exception)
152 SAVE_ALL
153
154 /*
155 * Set sscratch register to 0, so that if a recursive exception
156 * occurs, the exception vector knows it came from the kernel
157 */
158 csrw sscratch, x0
159
160 /* Load the global pointer */
161 .option push
162 .option norelax
163 la gp, __global_pointer$
164 .option pop
165
166 la ra, ret_from_exception
167 /*
168 * MSB of cause differentiates between
169 * interrupts and exceptions
170 */
171 bge s4, zero, 1f
172
173 /* Handle interrupts */
174 move a0, sp /* pt_regs */
175 tail do_IRQ
176 1:
177 /* Exceptions run with interrupts enabled */
178 csrs sstatus, SR_SIE
179
180 /* Handle syscalls */
181 li t0, EXC_SYSCALL
182 beq s4, t0, handle_syscall
183
184 /* Handle other exceptions */
185 slli t0, s4, RISCV_LGPTR
186 la t1, excp_vect_table
187 la t2, excp_vect_table_end
188 move a0, sp /* pt_regs */
189 add t0, t1, t0
190 /* Check if exception code lies within bounds */
191 bgeu t0, t2, 1f
192 REG_L t0, 0(t0)
193 jr t0
194 1:
195 tail do_trap_unknown
196
197 handle_syscall:
198 /* save the initial A0 value (needed in signal handlers) */
199 REG_S a0, PT_ORIG_A0(sp)
200 /*
201 * Advance SEPC to avoid executing the original
202 * scall instruction on sret
203 */
204 addi s2, s2, 0x4
205 REG_S s2, PT_SEPC(sp)
206 /* Trace syscalls, but only if requested by the user. */
207 REG_L t0, TASK_TI_FLAGS(tp)
208 andi t0, t0, _TIF_SYSCALL_WORK
209 bnez t0, handle_syscall_trace_enter
210 check_syscall_nr:
211 /* Check to make sure we don't jump to a bogus syscall number. */
212 li t0, __NR_syscalls
213 la s0, sys_ni_syscall
214 /* Syscall number held in a7 */
215 bgeu a7, t0, 1f
216 la s0, sys_call_table
217 slli t0, a7, RISCV_LGPTR
218 add s0, s0, t0
219 REG_L s0, 0(s0)
220 1:
221 jalr s0
222
223 ret_from_syscall:
224 /* Set user a0 to kernel a0 */
225 REG_S a0, PT_A0(sp)
226 /* Trace syscalls, but only if requested by the user. */
227 REG_L t0, TASK_TI_FLAGS(tp)
228 andi t0, t0, _TIF_SYSCALL_WORK
229 bnez t0, handle_syscall_trace_exit
230
231 ret_from_exception:
232 REG_L s0, PT_SSTATUS(sp)
233 csrc sstatus, SR_SIE
234 andi s0, s0, SR_SPP
235 bnez s0, resume_kernel
236
237 resume_userspace:
238 /* Interrupts must be disabled here so flags are checked atomically */
239 REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
240 andi s1, s0, _TIF_WORK_MASK
241 bnez s1, work_pending
242
243 /* Save unwound kernel stack pointer in thread_info */
244 addi s0, sp, PT_SIZE_ON_STACK
245 REG_S s0, TASK_TI_KERNEL_SP(tp)
246
247 /*
248 * Save TP into sscratch, so we can find the kernel data structures
249 * again.
250 */
251 csrw sscratch, tp
252
253 restore_all:
254 RESTORE_ALL
255 sret
256
257 #if IS_ENABLED(CONFIG_PREEMPT)
258 resume_kernel:
259 REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
260 bnez s0, restore_all
261 need_resched:
262 REG_L s0, TASK_TI_FLAGS(tp)
263 andi s0, s0, _TIF_NEED_RESCHED
264 beqz s0, restore_all
265 call preempt_schedule_irq
266 j need_resched
267 #endif
268
269 work_pending:
270 /* Enter slow path for supplementary processing */
271 la ra, ret_from_exception
272 andi s1, s0, _TIF_NEED_RESCHED
273 bnez s1, work_resched
274 work_notifysig:
275 /* Handle pending signals and notify-resume requests */
276 csrs sstatus, SR_SIE /* Enable interrupts for do_notify_resume() */
277 move a0, sp /* pt_regs */
278 move a1, s0 /* current_thread_info->flags */
279 tail do_notify_resume
280 work_resched:
281 tail schedule
282
283 /* Slow paths for ptrace. */
284 handle_syscall_trace_enter:
285 move a0, sp
286 call do_syscall_trace_enter
287 REG_L a0, PT_A0(sp)
288 REG_L a1, PT_A1(sp)
289 REG_L a2, PT_A2(sp)
290 REG_L a3, PT_A3(sp)
291 REG_L a4, PT_A4(sp)
292 REG_L a5, PT_A5(sp)
293 REG_L a6, PT_A6(sp)
294 REG_L a7, PT_A7(sp)
295 j check_syscall_nr
296 handle_syscall_trace_exit:
297 move a0, sp
298 call do_syscall_trace_exit
299 j ret_from_exception
300
301 END(handle_exception)
302
303 ENTRY(ret_from_fork)
304 la ra, ret_from_exception
305 tail schedule_tail
306 ENDPROC(ret_from_fork)
307
308 ENTRY(ret_from_kernel_thread)
309 call schedule_tail
310 /* Call fn(arg) */
311 la ra, ret_from_exception
312 move a0, s1
313 jr s0
314 ENDPROC(ret_from_kernel_thread)
315
316
317 /*
318 * Integer register context switch
319 * The callee-saved registers must be saved and restored.
320 *
321 * a0: previous task_struct (must be preserved across the switch)
322 * a1: next task_struct
323 *
324 * The value of a0 and a1 must be preserved by this function, as that's how
325 * arguments are passed to schedule_tail.
326 */
327 ENTRY(__switch_to)
328 /* Save context into prev->thread */
329 li a4, TASK_THREAD_RA
330 add a3, a0, a4
331 add a4, a1, a4
332 REG_S ra, TASK_THREAD_RA_RA(a3)
333 REG_S sp, TASK_THREAD_SP_RA(a3)
334 REG_S s0, TASK_THREAD_S0_RA(a3)
335 REG_S s1, TASK_THREAD_S1_RA(a3)
336 REG_S s2, TASK_THREAD_S2_RA(a3)
337 REG_S s3, TASK_THREAD_S3_RA(a3)
338 REG_S s4, TASK_THREAD_S4_RA(a3)
339 REG_S s5, TASK_THREAD_S5_RA(a3)
340 REG_S s6, TASK_THREAD_S6_RA(a3)
341 REG_S s7, TASK_THREAD_S7_RA(a3)
342 REG_S s8, TASK_THREAD_S8_RA(a3)
343 REG_S s9, TASK_THREAD_S9_RA(a3)
344 REG_S s10, TASK_THREAD_S10_RA(a3)
345 REG_S s11, TASK_THREAD_S11_RA(a3)
346 /* Restore context from next->thread */
347 REG_L ra, TASK_THREAD_RA_RA(a4)
348 REG_L sp, TASK_THREAD_SP_RA(a4)
349 REG_L s0, TASK_THREAD_S0_RA(a4)
350 REG_L s1, TASK_THREAD_S1_RA(a4)
351 REG_L s2, TASK_THREAD_S2_RA(a4)
352 REG_L s3, TASK_THREAD_S3_RA(a4)
353 REG_L s4, TASK_THREAD_S4_RA(a4)
354 REG_L s5, TASK_THREAD_S5_RA(a4)
355 REG_L s6, TASK_THREAD_S6_RA(a4)
356 REG_L s7, TASK_THREAD_S7_RA(a4)
357 REG_L s8, TASK_THREAD_S8_RA(a4)
358 REG_L s9, TASK_THREAD_S9_RA(a4)
359 REG_L s10, TASK_THREAD_S10_RA(a4)
360 REG_L s11, TASK_THREAD_S11_RA(a4)
361 /* Swap the CPU entry around. */
362 lw a3, TASK_TI_CPU(a0)
363 lw a4, TASK_TI_CPU(a1)
364 sw a3, TASK_TI_CPU(a1)
365 sw a4, TASK_TI_CPU(a0)
366 #if TASK_TI != 0
367 #error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work."
368 addi tp, a1, TASK_TI
369 #else
370 move tp, a1
371 #endif
372 ret
373 ENDPROC(__switch_to)
374
375 .section ".rodata"
376 /* Exception vector table */
377 ENTRY(excp_vect_table)
378 RISCV_PTR do_trap_insn_misaligned
379 RISCV_PTR do_trap_insn_fault
380 RISCV_PTR do_trap_insn_illegal
381 RISCV_PTR do_trap_break
382 RISCV_PTR do_trap_load_misaligned
383 RISCV_PTR do_trap_load_fault
384 RISCV_PTR do_trap_store_misaligned
385 RISCV_PTR do_trap_store_fault
386 RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
387 RISCV_PTR do_trap_ecall_s
388 RISCV_PTR do_trap_unknown
389 RISCV_PTR do_trap_ecall_m
390 RISCV_PTR do_page_fault /* instruction page fault */
391 RISCV_PTR do_page_fault /* load page fault */
392 RISCV_PTR do_trap_unknown
393 RISCV_PTR do_page_fault /* store page fault */
394 excp_vect_table_end:
395 END(excp_vect_table)