]>
Commit | Line | Data |
---|---|---|
08dbd0f8 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
e49ee290 RK |
2 | /* |
3 | * Event entry/exit for Hexagon | |
4 | * | |
7c6a5df4 | 5 | * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. |
e49ee290 RK |
6 | */ |
7 | ||
8 | #include <asm/asm-offsets.h> /* assembly-safer versions of C defines */ | |
9 | #include <asm/mem-layout.h> /* sigh, except for page_offset */ | |
10 | #include <asm/hexagon_vm.h> | |
11 | #include <asm/thread_info.h> | |
12 | ||
13 | /* | |
14 | * Entry into guest-mode Linux under Hexagon Virtual Machine. | |
15 | * Stack pointer points to event record - build pt_regs on top of it, | |
16 | * set up a plausible C stack frame, and dispatch to the C handler. | |
17 | * On return, do vmrte virtual instruction with SP where we started. | |
18 | * | |
19 | * VM Spec 0.5 uses a trap to fetch HVM record now. | |
20 | */ | |
21 | ||
22 | /* | |
23 | * Save full register state, while setting up thread_info struct | |
24 | * pointer derived from kernel stack pointer in THREADINFO_REG | |
25 | * register, putting prior thread_info.regs pointer in a callee-save | |
26 | * register (R24, which had better not ever be assigned to THREADINFO_REG), | |
27 | * and updating thread_info.regs to point to current stack frame, | |
28 | * so as to support nested events in kernel mode. | |
29 | * | |
30 | * As this is common code, we set the pt_regs system call number | |
31 | * to -1 for all events. It will be replaced with the system call | |
32 | * number in the case where we decode a system call (trap0(#1)). | |
33 | */ | |
34 | ||
60c4ba99 | 35 | #if CONFIG_HEXAGON_ARCH_VERSION < 4 |
e49ee290 | 36 | #define save_pt_regs()\ |
60c4ba99 RK |
37 | memd(R0 + #_PT_R3130) = R31:30; \ |
38 | { memw(R0 + #_PT_R2928) = R28; \ | |
39 | R31 = memw(R0 + #_PT_ER_VMPSP); }\ | |
40 | { memw(R0 + #(_PT_R2928 + 4)) = R31; \ | |
41 | R31 = ugp; } \ | |
42 | { memd(R0 + #_PT_R2726) = R27:26; \ | |
43 | R30 = gp ; } \ | |
44 | memd(R0 + #_PT_R2524) = R25:24; \ | |
45 | memd(R0 + #_PT_R2322) = R23:22; \ | |
46 | memd(R0 + #_PT_R2120) = R21:20; \ | |
47 | memd(R0 + #_PT_R1918) = R19:18; \ | |
48 | memd(R0 + #_PT_R1716) = R17:16; \ | |
49 | memd(R0 + #_PT_R1514) = R15:14; \ | |
50 | memd(R0 + #_PT_R1312) = R13:12; \ | |
51 | { memd(R0 + #_PT_R1110) = R11:10; \ | |
52 | R15 = lc0; } \ | |
53 | { memd(R0 + #_PT_R0908) = R9:8; \ | |
54 | R14 = sa0; } \ | |
55 | { memd(R0 + #_PT_R0706) = R7:6; \ | |
56 | R13 = lc1; } \ | |
57 | { memd(R0 + #_PT_R0504) = R5:4; \ | |
58 | R12 = sa1; } \ | |
59 | { memd(R0 + #_PT_GPUGP) = R31:30; \ | |
60 | R11 = m1; \ | |
61 | R2.H = #HI(_THREAD_SIZE); } \ | |
62 | { memd(R0 + #_PT_LC0SA0) = R15:14; \ | |
63 | R10 = m0; \ | |
64 | R2.L = #LO(_THREAD_SIZE); } \ | |
65 | { memd(R0 + #_PT_LC1SA1) = R13:12; \ | |
66 | R15 = p3:0; \ | |
67 | R2 = neg(R2); } \ | |
68 | { memd(R0 + #_PT_M1M0) = R11:10; \ | |
69 | R14 = usr; \ | |
70 | R2 = and(R0,R2); } \ | |
71 | { memd(R0 + #_PT_PREDSUSR) = R15:14; \ | |
72 | THREADINFO_REG = R2; } \ | |
73 | { r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \ | |
74 | memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \ | |
75 | R2 = #-1; } \ | |
76 | { memw(R0 + #_PT_SYSCALL_NR) = R2; \ | |
77 | R30 = #0; } | |
78 | #else | |
79 | /* V4+ */ | |
80 | /* the # ## # syntax inserts a literal ## */ | |
81 | #define save_pt_regs()\ | |
82 | { memd(R0 + #_PT_R3130) = R31:30; \ | |
83 | R30 = memw(R0 + #_PT_ER_VMPSP); }\ | |
e49ee290 | 84 | { memw(R0 + #_PT_R2928) = R28; \ |
60c4ba99 RK |
85 | memw(R0 + #(_PT_R2928 + 4)) = R30; }\ |
86 | { R31:30 = C11:10; \ | |
87 | memd(R0 + #_PT_R2726) = R27:26; \ | |
88 | memd(R0 + #_PT_R2524) = R25:24; }\ | |
89 | { memd(R0 + #_PT_R2322) = R23:22; \ | |
90 | memd(R0 + #_PT_R2120) = R21:20; }\ | |
91 | { memd(R0 + #_PT_R1918) = R19:18; \ | |
92 | memd(R0 + #_PT_R1716) = R17:16; }\ | |
93 | { memd(R0 + #_PT_R1514) = R15:14; \ | |
94 | memd(R0 + #_PT_R1312) = R13:12; \ | |
95 | R17:16 = C13:12; }\ | |
e49ee290 | 96 | { memd(R0 + #_PT_R1110) = R11:10; \ |
60c4ba99 RK |
97 | memd(R0 + #_PT_R0908) = R9:8; \ |
98 | R15:14 = C1:0; } \ | |
e49ee290 | 99 | { memd(R0 + #_PT_R0706) = R7:6; \ |
60c4ba99 RK |
100 | memd(R0 + #_PT_R0504) = R5:4; \ |
101 | R13:12 = C3:2; } \ | |
102 | { memd(R0 + #_PT_GPUGP) = R31:30; \ | |
103 | memd(R0 + #_PT_LC0SA0) = R15:14; \ | |
104 | R11:10 = C7:6; }\ | |
105 | { THREADINFO_REG = and(R0, # ## #-_THREAD_SIZE); \ | |
106 | memd(R0 + #_PT_LC1SA1) = R13:12; \ | |
107 | R15 = p3:0; }\ | |
e49ee290 | 108 | { memd(R0 + #_PT_M1M0) = R11:10; \ |
60c4ba99 | 109 | memw(R0 + #_PT_PREDSUSR + 4) = R15; }\ |
e49ee290 RK |
110 | { r24 = memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS); \ |
111 | memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R0; \ | |
112 | R2 = #-1; } \ | |
113 | { memw(R0 + #_PT_SYSCALL_NR) = R2; \ | |
60c4ba99 | 114 | memd(R0 + #_PT_CS1CS0) = R17:16; \ |
e49ee290 | 115 | R30 = #0; } |
60c4ba99 | 116 | #endif |
e49ee290 RK |
117 | |
118 | /* | |
119 | * Restore registers and thread_info.regs state. THREADINFO_REG | |
120 | * is assumed to still be sane, and R24 to have been correctly | |
121 | * preserved. Don't restore R29 (SP) until later. | |
122 | */ | |
123 | ||
60c4ba99 | 124 | #if CONFIG_HEXAGON_ARCH_VERSION < 4 |
e49ee290 RK |
125 | #define restore_pt_regs() \ |
126 | { memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \ | |
127 | R15:14 = memd(R0 + #_PT_PREDSUSR); } \ | |
128 | { R11:10 = memd(R0 + #_PT_M1M0); \ | |
129 | p3:0 = R15; } \ | |
130 | { R13:12 = memd(R0 + #_PT_LC1SA1); \ | |
131 | usr = R14; } \ | |
132 | { R15:14 = memd(R0 + #_PT_LC0SA0); \ | |
133 | m1 = R11; } \ | |
134 | { R3:2 = memd(R0 + #_PT_R0302); \ | |
135 | m0 = R10; } \ | |
136 | { R5:4 = memd(R0 + #_PT_R0504); \ | |
137 | lc1 = R13; } \ | |
138 | { R7:6 = memd(R0 + #_PT_R0706); \ | |
139 | sa1 = R12; } \ | |
140 | { R9:8 = memd(R0 + #_PT_R0908); \ | |
141 | lc0 = R15; } \ | |
142 | { R11:10 = memd(R0 + #_PT_R1110); \ | |
143 | sa0 = R14; } \ | |
144 | { R13:12 = memd(R0 + #_PT_R1312); \ | |
145 | R15:14 = memd(R0 + #_PT_R1514); } \ | |
146 | { R17:16 = memd(R0 + #_PT_R1716); \ | |
147 | R19:18 = memd(R0 + #_PT_R1918); } \ | |
148 | { R21:20 = memd(R0 + #_PT_R2120); \ | |
149 | R23:22 = memd(R0 + #_PT_R2322); } \ | |
150 | { R25:24 = memd(R0 + #_PT_R2524); \ | |
151 | R27:26 = memd(R0 + #_PT_R2726); } \ | |
60c4ba99 | 152 | R31:30 = memd(R0 + #_PT_GPUGP); \ |
e49ee290 RK |
153 | { R28 = memw(R0 + #_PT_R2928); \ |
154 | ugp = R31; } \ | |
155 | { R31:30 = memd(R0 + #_PT_R3130); \ | |
156 | gp = R30; } | |
60c4ba99 RK |
157 | #else |
158 | /* V4+ */ | |
159 | #define restore_pt_regs() \ | |
160 | { memw(THREADINFO_REG + #_THREAD_INFO_PT_REGS) = R24; \ | |
161 | R15:14 = memd(R0 + #_PT_PREDSUSR); } \ | |
162 | { R11:10 = memd(R0 + #_PT_M1M0); \ | |
163 | R13:12 = memd(R0 + #_PT_LC1SA1); \ | |
164 | p3:0 = R15; } \ | |
165 | { R15:14 = memd(R0 + #_PT_LC0SA0); \ | |
166 | R3:2 = memd(R0 + #_PT_R0302); \ | |
167 | usr = R14; } \ | |
168 | { R5:4 = memd(R0 + #_PT_R0504); \ | |
169 | R7:6 = memd(R0 + #_PT_R0706); \ | |
170 | C7:6 = R11:10; }\ | |
171 | { R9:8 = memd(R0 + #_PT_R0908); \ | |
172 | R11:10 = memd(R0 + #_PT_R1110); \ | |
173 | C3:2 = R13:12; }\ | |
174 | { R13:12 = memd(R0 + #_PT_R1312); \ | |
175 | R15:14 = memd(R0 + #_PT_R1514); \ | |
176 | C1:0 = R15:14; }\ | |
177 | { R17:16 = memd(R0 + #_PT_R1716); \ | |
178 | R19:18 = memd(R0 + #_PT_R1918); } \ | |
179 | { R21:20 = memd(R0 + #_PT_R2120); \ | |
180 | R23:22 = memd(R0 + #_PT_R2322); } \ | |
181 | { R25:24 = memd(R0 + #_PT_R2524); \ | |
182 | R27:26 = memd(R0 + #_PT_R2726); } \ | |
183 | R31:30 = memd(R0 + #_PT_CS1CS0); \ | |
184 | { C13:12 = R31:30; \ | |
185 | R31:30 = memd(R0 + #_PT_GPUGP) ; \ | |
186 | R28 = memw(R0 + #_PT_R2928); }\ | |
187 | { C11:10 = R31:30; \ | |
188 | R31:30 = memd(R0 + #_PT_R3130); } | |
189 | #endif | |
e49ee290 RK |
190 | |
191 | /* | |
192 | * Clears off enough space for the rest of pt_regs; evrec is a part | |
193 | * of pt_regs in HVM mode. Save R0/R1, set handler's address in R1. | |
194 | * R0 is the address of pt_regs and is the parameter to save_pt_regs. | |
195 | */ | |
196 | ||
197 | /* | |
198 | * Since the HVM isn't automagically pushing the EVREC onto the stack anymore, | |
199 | * we'll subract the entire size out and then fill it in ourselves. | |
200 | * Need to save off R0, R1, R2, R3 immediately. | |
201 | */ | |
202 | ||
60c4ba99 | 203 | #if CONFIG_HEXAGON_ARCH_VERSION < 4 |
e49ee290 RK |
204 | #define vm_event_entry(CHandler) \ |
205 | { \ | |
206 | R29 = add(R29, #-(_PT_REGS_SIZE)); \ | |
207 | memd(R29 + #(_PT_R0100 + -_PT_REGS_SIZE)) = R1:0; \ | |
208 | } \ | |
209 | { \ | |
210 | memd(R29 +#_PT_R0302) = R3:2; \ | |
211 | } \ | |
212 | trap1(#HVM_TRAP1_VMGETREGS); \ | |
213 | { \ | |
214 | memd(R29 + #_PT_ER_VMEL) = R1:0; \ | |
215 | R0 = R29; \ | |
216 | R1.L = #LO(CHandler); \ | |
217 | } \ | |
218 | { \ | |
219 | memd(R29 + #_PT_ER_VMPSP) = R3:2; \ | |
220 | R1.H = #HI(CHandler); \ | |
221 | jump event_dispatch; \ | |
222 | } | |
60c4ba99 RK |
223 | #else |
224 | /* V4+ */ | |
225 | /* turn on I$ prefetch early */ | |
226 | /* the # ## # syntax inserts a literal ## */ | |
227 | #define vm_event_entry(CHandler) \ | |
228 | { \ | |
229 | R29 = add(R29, #-(_PT_REGS_SIZE)); \ | |
230 | memd(R29 + #(_PT_R0100 + -_PT_REGS_SIZE)) = R1:0; \ | |
231 | memd(R29 + #(_PT_R0302 + -_PT_REGS_SIZE)) = R3:2; \ | |
232 | R0 = usr; \ | |
233 | } \ | |
234 | { \ | |
235 | memw(R29 + #_PT_PREDSUSR) = R0; \ | |
236 | R0 = setbit(R0, #16); \ | |
237 | } \ | |
238 | usr = R0; \ | |
239 | R1:0 = G1:0; \ | |
240 | { \ | |
241 | memd(R29 + #_PT_ER_VMEL) = R1:0; \ | |
242 | R1 = # ## #(CHandler); \ | |
243 | R3:2 = G3:2; \ | |
244 | } \ | |
245 | { \ | |
246 | R0 = R29; \ | |
247 | memd(R29 + #_PT_ER_VMPSP) = R3:2; \ | |
248 | jump event_dispatch; \ | |
249 | } | |
250 | #endif | |
e49ee290 RK |
251 | |
252 | .text | |
253 | /* | |
254 | * Do bulk save/restore in one place. | |
255 | * Adds a jump to dispatch latency, but | |
256 | * saves hundreds of bytes. | |
257 | */ | |
258 | ||
259 | event_dispatch: | |
260 | save_pt_regs() | |
261 | callr r1 | |
262 | ||
263 | /* | |
a11e67c2 RK |
264 | * Coming back from the C-world, our thread info pointer |
265 | * should be in the designated register (usually R19) | |
266 | * | |
e49ee290 RK |
267 | * If we were in kernel mode, we don't need to check scheduler |
268 | * or signals if CONFIG_PREEMPT is not set. If set, then it has | |
269 | * to jump to a need_resched kind of block. | |
270 | * BTW, CONFIG_PREEMPT is not supported yet. | |
271 | */ | |
272 | ||
273 | #ifdef CONFIG_PREEMPT | |
274 | R0 = #VM_INT_DISABLE | |
275 | trap1(#HVM_TRAP1_VMSETIE) | |
276 | #endif | |
277 | ||
278 | /* "Nested control path" -- if the previous mode was kernel */ | |
60c4ba99 | 279 | { |
a11e67c2 | 280 | R0 = memw(R29 + #_PT_ER_VMEST); |
13a95c48 | 281 | R26.L = #LO(do_work_pending); |
60c4ba99 | 282 | } |
e49ee290 | 283 | { |
a11e67c2 RK |
284 | P0 = tstbit(R0, #HVM_VMEST_UM_SFT); |
285 | if (!P0.new) jump:nt restore_all; | |
13a95c48 | 286 | R26.H = #HI(do_work_pending); |
a11e67c2 | 287 | R0 = #VM_INT_DISABLE; |
60c4ba99 | 288 | } |
e49ee290 RK |
289 | |
290 | /* | |
a11e67c2 RK |
291 | * Check also the return from fork/system call, normally coming back from |
292 | * user mode | |
293 | * | |
13a95c48 | 294 | * R26 needs to have do_work_pending, and R0 should have VM_INT_DISABLE |
e49ee290 | 295 | */ |
e49ee290 | 296 | |
a11e67c2 RK |
297 | check_work_pending: |
298 | /* Disable interrupts while checking TIF */ | |
299 | trap1(#HVM_TRAP1_VMSETIE) | |
e49ee290 | 300 | { |
a11e67c2 RK |
301 | R0 = R29; /* regs should still be at top of stack */ |
302 | R1 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS); | |
13a95c48 | 303 | callr R26; |
e49ee290 | 304 | } |
e49ee290 | 305 | |
60c4ba99 | 306 | { |
a11e67c2 RK |
307 | P0 = cmp.eq(R0, #0); if (!P0.new) jump:nt check_work_pending; |
308 | R0 = #VM_INT_DISABLE; | |
60c4ba99 | 309 | } |
e49ee290 RK |
310 | |
311 | restore_all: | |
a11e67c2 RK |
312 | /* |
313 | * Disable interrupts, if they weren't already, before reg restore. | |
314 | * R0 gets preloaded with #VM_INT_DISABLE before we get here. | |
315 | */ | |
e49ee290 RK |
316 | trap1(#HVM_TRAP1_VMSETIE) |
317 | ||
318 | /* do the setregs here for VM 0.5 */ | |
319 | /* R29 here should already be pointing at pt_regs */ | |
60c4ba99 RK |
320 | { |
321 | R1:0 = memd(R29 + #_PT_ER_VMEL); | |
322 | R3:2 = memd(R29 + #_PT_ER_VMPSP); | |
323 | } | |
324 | #if CONFIG_HEXAGON_ARCH_VERSION < 4 | |
e49ee290 | 325 | trap1(#HVM_TRAP1_VMSETREGS); |
60c4ba99 RK |
326 | #else |
327 | G1:0 = R1:0; | |
328 | G3:2 = R3:2; | |
329 | #endif | |
e49ee290 RK |
330 | |
331 | R0 = R29 | |
332 | restore_pt_regs() | |
60c4ba99 RK |
333 | { |
334 | R1:0 = memd(R29 + #_PT_R0100); | |
335 | R29 = add(R29, #_PT_REGS_SIZE); | |
336 | } | |
e49ee290 RK |
337 | trap1(#HVM_TRAP1_VMRTE) |
338 | /* Notreached */ | |
339 | ||
a11e67c2 | 340 | |
e49ee290 RK |
341 | .globl _K_enter_genex |
342 | _K_enter_genex: | |
343 | vm_event_entry(do_genex) | |
344 | ||
345 | .globl _K_enter_interrupt | |
346 | _K_enter_interrupt: | |
347 | vm_event_entry(arch_do_IRQ) | |
348 | ||
349 | .globl _K_enter_trap0 | |
350 | _K_enter_trap0: | |
351 | vm_event_entry(do_trap0) | |
352 | ||
353 | .globl _K_enter_machcheck | |
354 | _K_enter_machcheck: | |
355 | vm_event_entry(do_machcheck) | |
356 | ||
7777746c RK |
357 | .globl _K_enter_debug |
358 | _K_enter_debug: | |
359 | vm_event_entry(do_debug_exception) | |
e49ee290 RK |
360 | |
361 | .globl ret_from_fork | |
362 | ret_from_fork: | |
a11e67c2 | 363 | { |
3981c472 | 364 | call schedule_tail |
13a95c48 | 365 | R26.H = #HI(do_work_pending); |
a11e67c2 RK |
366 | } |
367 | { | |
3981c472 | 368 | P0 = cmp.eq(R24, #0); |
13a95c48 | 369 | R26.L = #LO(do_work_pending); |
a11e67c2 | 370 | R0 = #VM_INT_DISABLE; |
3981c472 | 371 | } |
cdd4d535 | 372 | if (P0) jump check_work_pending |
3981c472 RK |
373 | { |
374 | R0 = R25; | |
375 | callr R24 | |
376 | } | |
377 | { | |
378 | jump check_work_pending | |
379 | R0 = #VM_INT_DISABLE; | |
a11e67c2 | 380 | } |