]>
Commit | Line | Data |
---|---|---|
0be7320a | 1 | /* |
7b7293ae | 2 | * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S |
0be7320a CM |
3 | * |
4 | * Copyright (C) 1996-2000 Russell King | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | #ifndef __ASSEMBLY__ | |
20 | #error "Only include this from assembly code" | |
21 | #endif | |
22 | ||
f3e39273 MZ |
23 | #ifndef __ASM_ASSEMBLER_H |
24 | #define __ASM_ASSEMBLER_H | |
25 | ||
386b3c7b MR |
26 | #include <asm-generic/export.h> |
27 | ||
7b7293ae | 28 | #include <asm/asm-offsets.h> |
823066d9 | 29 | #include <asm/cpufeature.h> |
e28cc025 | 30 | #include <asm/debug-monitors.h> |
5003dbde | 31 | #include <asm/page.h> |
7b7293ae | 32 | #include <asm/pgtable-hwdef.h> |
0be7320a | 33 | #include <asm/ptrace.h> |
2a283070 | 34 | #include <asm/thread_info.h> |
0be7320a | 35 | |
0fbeb318 JM |
36 | .macro save_and_disable_daif, flags |
37 | mrs \flags, daif | |
38 | msr daifset, #0xf | |
39 | .endm | |
40 | ||
41 | .macro disable_daif | |
42 | msr daifset, #0xf | |
43 | .endm | |
44 | ||
45 | .macro enable_daif | |
46 | msr daifclr, #0xf | |
47 | .endm | |
48 | ||
49 | .macro restore_daif, flags:req | |
50 | msr daif, \flags | |
51 | .endm | |
52 | ||
b55a5a1b JM |
53 | /* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */ |
54 | .macro inherit_daif, pstate:req, tmp:req | |
55 | and \tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT) | |
56 | msr daif, \tmp | |
57 | .endm | |
58 | ||
b282e1ce JM |
59 | /* IRQ is the lowest priority flag, unconditionally unmask the rest. */ |
60 | .macro enable_da_f | |
61 | msr daifclr, #(8 | 4 | 1) | |
62 | .endm | |
63 | ||
0be7320a CM |
64 | /* |
65 | * Enable and disable interrupts. | |
66 | */ | |
67 | .macro disable_irq | |
68 | msr daifset, #2 | |
69 | .endm | |
70 | ||
71 | .macro enable_irq | |
72 | msr daifclr, #2 | |
73 | .endm | |
74 | ||
4b65a5db CM |
75 | .macro save_and_disable_irq, flags |
76 | mrs \flags, daif | |
77 | msr daifset, #2 | |
78 | .endm | |
79 | ||
80 | .macro restore_irq, flags | |
81 | msr daif, \flags | |
82 | .endm | |
83 | ||
0be7320a CM |
84 | .macro enable_dbg |
85 | msr daifclr, #8 | |
86 | .endm | |
87 | ||
2a283070 WD |
88 | .macro disable_step_tsk, flgs, tmp |
89 | tbz \flgs, #TIF_SINGLESTEP, 9990f | |
0be7320a | 90 | mrs \tmp, mdscr_el1 |
e28cc025 | 91 | bic \tmp, \tmp, #DBG_MDSCR_SS |
0be7320a | 92 | msr mdscr_el1, \tmp |
2a283070 WD |
93 | isb // Synchronise with enable_dbg |
94 | 9990: | |
0be7320a CM |
95 | .endm |
96 | ||
84d0fb1b | 97 | /* call with daif masked */ |
2a283070 WD |
98 | .macro enable_step_tsk, flgs, tmp |
99 | tbz \flgs, #TIF_SINGLESTEP, 9990f | |
0be7320a | 100 | mrs \tmp, mdscr_el1 |
e28cc025 | 101 | orr \tmp, \tmp, #DBG_MDSCR_SS |
0be7320a | 102 | msr mdscr_el1, \tmp |
2a283070 | 103 | 9990: |
0be7320a CM |
104 | .endm |
105 | ||
0be7320a CM |
106 | /* |
107 | * SMP data memory barrier | |
108 | */ | |
109 | .macro smp_dmb, opt | |
0be7320a | 110 | dmb \opt |
0be7320a CM |
111 | .endm |
112 | ||
68ddbf09 JM |
113 | /* |
114 | * RAS Error Synchronization barrier | |
115 | */ | |
116 | .macro esb | |
117 | hint #16 | |
118 | .endm | |
119 | ||
669474e7 WD |
120 | /* |
121 | * Value prediction barrier | |
122 | */ | |
123 | .macro csdb | |
124 | hint #20 | |
125 | .endm | |
126 | ||
bd4fb6d2 WD |
127 | /* |
128 | * Speculation barrier | |
129 | */ | |
130 | .macro sb | |
131 | alternative_if_not ARM64_HAS_SB | |
132 | dsb nsh | |
133 | isb | |
134 | alternative_else | |
135 | SB_BARRIER_INSN | |
136 | nop | |
137 | alternative_endif | |
138 | .endm | |
139 | ||
6314d90e WD |
140 | /* |
141 | * Sanitise a 64-bit bounded index wrt speculation, returning zero if out | |
142 | * of bounds. | |
143 | */ | |
144 | .macro mask_nospec64, idx, limit, tmp | |
145 | sub \tmp, \idx, \limit | |
146 | bic \tmp, \tmp, \idx | |
147 | and \idx, \idx, \tmp, asr #63 | |
148 | csdb | |
149 | .endm | |
150 | ||
f99a250c WD |
151 | /* |
152 | * NOP sequence | |
153 | */ | |
154 | .macro nops, num | |
155 | .rept \num | |
156 | nop | |
157 | .endr | |
158 | .endm | |
159 | ||
6c94f27a AB |
160 | /* |
161 | * Emit an entry into the exception table | |
162 | */ | |
163 | .macro _asm_extable, from, to | |
164 | .pushsection __ex_table, "a" | |
165 | .align 3 | |
166 | .long (\from - .), (\to - .) | |
167 | .popsection | |
168 | .endm | |
169 | ||
0be7320a CM |
170 | #define USER(l, x...) \ |
171 | 9999: x; \ | |
6c94f27a | 172 | _asm_extable 9999b, l |
0be7320a CM |
173 | |
174 | /* | |
175 | * Register aliases. | |
176 | */ | |
177 | lr .req x30 // link register | |
dc637f1f MZ |
178 | |
179 | /* | |
180 | * Vector entry | |
181 | */ | |
182 | .macro ventry label | |
183 | .align 7 | |
184 | b \label | |
185 | .endm | |
e68bedaa ML |
186 | |
187 | /* | |
188 | * Select code when configured for BE. | |
189 | */ | |
190 | #ifdef CONFIG_CPU_BIG_ENDIAN | |
191 | #define CPU_BE(code...) code | |
192 | #else | |
193 | #define CPU_BE(code...) | |
194 | #endif | |
195 | ||
196 | /* | |
197 | * Select code when configured for LE. | |
198 | */ | |
199 | #ifdef CONFIG_CPU_BIG_ENDIAN | |
200 | #define CPU_LE(code...) | |
201 | #else | |
202 | #define CPU_LE(code...) code | |
203 | #endif | |
204 | ||
55b89540 ML |
205 | /* |
206 | * Define a macro that constructs a 64-bit value by concatenating two | |
207 | * 32-bit registers. Note that on big endian systems the order of the | |
208 | * registers is swapped. | |
209 | */ | |
210 | #ifndef CONFIG_CPU_BIG_ENDIAN | |
211 | .macro regs_to_64, rd, lbits, hbits | |
212 | #else | |
213 | .macro regs_to_64, rd, hbits, lbits | |
214 | #endif | |
215 | orr \rd, \lbits, \hbits, lsl #32 | |
216 | .endm | |
f3e39273 | 217 | |
b784a5d9 AB |
218 | /* |
219 | * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where | |
350e1dad | 220 | * <symbol> is within the range +/- 4 GB of the PC. |
b784a5d9 AB |
221 | */ |
222 | /* | |
223 | * @dst: destination register (64 bit wide) | |
224 | * @sym: name of the symbol | |
b784a5d9 | 225 | */ |
41c066f2 | 226 | .macro adr_l, dst, sym |
b784a5d9 AB |
227 | adrp \dst, \sym |
228 | add \dst, \dst, :lo12:\sym | |
b784a5d9 AB |
229 | .endm |
230 | ||
231 | /* | |
232 | * @dst: destination register (32 or 64 bit wide) | |
233 | * @sym: name of the symbol | |
234 | * @tmp: optional 64-bit scratch register to be used if <dst> is a | |
235 | * 32-bit wide register, in which case it cannot be used to hold | |
236 | * the address | |
237 | */ | |
238 | .macro ldr_l, dst, sym, tmp= | |
239 | .ifb \tmp | |
240 | adrp \dst, \sym | |
241 | ldr \dst, [\dst, :lo12:\sym] | |
242 | .else | |
243 | adrp \tmp, \sym | |
244 | ldr \dst, [\tmp, :lo12:\sym] | |
245 | .endif | |
246 | .endm | |
247 | ||
248 | /* | |
249 | * @src: source register (32 or 64 bit wide) | |
250 | * @sym: name of the symbol | |
251 | * @tmp: mandatory 64-bit scratch register to calculate the address | |
252 | * while <src> needs to be preserved. | |
253 | */ | |
254 | .macro str_l, src, sym, tmp | |
255 | adrp \tmp, \sym | |
256 | str \src, [\tmp, :lo12:\sym] | |
257 | .endm | |
258 | ||
aa4d5d3c | 259 | /* |
350e1dad | 260 | * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP) |
aa4d5d3c | 261 | * @sym: The name of the per-cpu variable |
aa4d5d3c JM |
262 | * @tmp: scratch register |
263 | */ | |
1b7e2296 | 264 | .macro adr_this_cpu, dst, sym, tmp |
8ea41b11 AB |
265 | adrp \tmp, \sym |
266 | add \dst, \tmp, #:lo12:\sym | |
6d99b689 | 267 | alternative_if_not ARM64_HAS_VIRT_HOST_EXTN |
aa4d5d3c | 268 | mrs \tmp, tpidr_el1 |
6d99b689 JM |
269 | alternative_else |
270 | mrs \tmp, tpidr_el2 | |
271 | alternative_endif | |
1b7e2296 MR |
272 | add \dst, \dst, \tmp |
273 | .endm | |
274 | ||
275 | /* | |
276 | * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id())) | |
277 | * @sym: The name of the per-cpu variable | |
278 | * @tmp: scratch register | |
279 | */ | |
280 | .macro ldr_this_cpu dst, sym, tmp | |
281 | adr_l \dst, \sym | |
6d99b689 | 282 | alternative_if_not ARM64_HAS_VIRT_HOST_EXTN |
1b7e2296 | 283 | mrs \tmp, tpidr_el1 |
6d99b689 JM |
284 | alternative_else |
285 | mrs \tmp, tpidr_el2 | |
286 | alternative_endif | |
1b7e2296 | 287 | ldr \dst, [\dst, \tmp] |
aa4d5d3c JM |
288 | .endm |
289 | ||
7b7293ae GL |
290 | /* |
291 | * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm) | |
292 | */ | |
293 | .macro vma_vm_mm, rd, rn | |
294 | ldr \rd, [\rn, #VMA_VM_MM] | |
295 | .endm | |
296 | ||
297 | /* | |
298 | * mmid - get context id from mm pointer (mm->context.id) | |
299 | */ | |
300 | .macro mmid, rd, rn | |
301 | ldr \rd, [\rn, #MM_CONTEXT_ID] | |
302 | .endm | |
116c81f4 | 303 | /* |
880f7cc4 WD |
304 | * read_ctr - read CTR_EL0. If the system has mismatched register fields, |
305 | * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val | |
116c81f4 SP |
306 | */ |
307 | .macro read_ctr, reg | |
880f7cc4 | 308 | alternative_if_not ARM64_MISMATCHED_CACHE_TYPE |
116c81f4 SP |
309 | mrs \reg, ctr_el0 // read CTR |
310 | nop | |
311 | alternative_else | |
312 | ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL | |
313 | alternative_endif | |
314 | .endm | |
315 | ||
7b7293ae GL |
316 | |
317 | /* | |
072f0a63 SP |
318 | * raw_dcache_line_size - get the minimum D-cache line size on this CPU |
319 | * from the CTR register. | |
7b7293ae | 320 | */ |
072f0a63 | 321 | .macro raw_dcache_line_size, reg, tmp |
7b7293ae GL |
322 | mrs \tmp, ctr_el0 // read CTR |
323 | ubfm \tmp, \tmp, #16, #19 // cache line size encoding | |
324 | mov \reg, #4 // bytes per word | |
325 | lsl \reg, \reg, \tmp // actual cache line size | |
326 | .endm | |
327 | ||
328 | /* | |
072f0a63 | 329 | * dcache_line_size - get the safe D-cache line size across all CPUs |
7b7293ae | 330 | */ |
072f0a63 | 331 | .macro dcache_line_size, reg, tmp |
116c81f4 SP |
332 | read_ctr \tmp |
333 | ubfm \tmp, \tmp, #16, #19 // cache line size encoding | |
334 | mov \reg, #4 // bytes per word | |
335 | lsl \reg, \reg, \tmp // actual cache line size | |
072f0a63 SP |
336 | .endm |
337 | ||
338 | /* | |
339 | * raw_icache_line_size - get the minimum I-cache line size on this CPU | |
340 | * from the CTR register. | |
341 | */ | |
342 | .macro raw_icache_line_size, reg, tmp | |
7b7293ae GL |
343 | mrs \tmp, ctr_el0 // read CTR |
344 | and \tmp, \tmp, #0xf // cache line size encoding | |
345 | mov \reg, #4 // bytes per word | |
346 | lsl \reg, \reg, \tmp // actual cache line size | |
347 | .endm | |
348 | ||
072f0a63 SP |
349 | /* |
350 | * icache_line_size - get the safe I-cache line size across all CPUs | |
351 | */ | |
352 | .macro icache_line_size, reg, tmp | |
116c81f4 SP |
353 | read_ctr \tmp |
354 | and \tmp, \tmp, #0xf // cache line size encoding | |
355 | mov \reg, #4 // bytes per word | |
356 | lsl \reg, \reg, \tmp // actual cache line size | |
072f0a63 SP |
357 | .endm |
358 | ||
7b7293ae | 359 | /* |
67e7fdfc | 360 | * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map |
7b7293ae | 361 | */ |
67e7fdfc SC |
362 | .macro tcr_set_t0sz, valreg, t0sz |
363 | bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH | |
7b7293ae GL |
364 | .endm |
365 | ||
787fd1d0 KM |
366 | /* |
367 | * tcr_compute_pa_size - set TCR.(I)PS to the highest supported | |
368 | * ID_AA64MMFR0_EL1.PARange value | |
369 | * | |
370 | * tcr: register with the TCR_ELx value to be updated | |
39610a68 | 371 | * pos: IPS or PS bitfield position |
787fd1d0 KM |
372 | * tmp{0,1}: temporary registers |
373 | */ | |
374 | .macro tcr_compute_pa_size, tcr, pos, tmp0, tmp1 | |
375 | mrs \tmp0, ID_AA64MMFR0_EL1 | |
376 | // Narrow PARange to fit the PS field in TCR_ELx | |
377 | ubfx \tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3 | |
378 | mov \tmp1, #ID_AA64MMFR0_PARANGE_MAX | |
379 | cmp \tmp0, \tmp1 | |
380 | csel \tmp0, \tmp1, \tmp0, hi | |
381 | bfi \tcr, \tmp0, \pos, #3 | |
7b7293ae GL |
382 | .endm |
383 | ||
384 | /* | |
385 | * Macro to perform a data cache maintenance for the interval | |
386 | * [kaddr, kaddr + size) | |
387 | * | |
388 | * op: operation passed to dc instruction | |
389 | * domain: domain used in dsb instruciton | |
390 | * kaddr: starting virtual address of the region | |
391 | * size: size of the region | |
392 | * Corrupts: kaddr, size, tmp1, tmp2 | |
393 | */ | |
33309ecd WD |
394 | .macro __dcache_op_workaround_clean_cache, op, kaddr |
395 | alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE | |
396 | dc \op, \kaddr | |
397 | alternative_else | |
398 | dc civac, \kaddr | |
399 | alternative_endif | |
400 | .endm | |
401 | ||
7b7293ae GL |
402 | .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2 |
403 | dcache_line_size \tmp1, \tmp2 | |
404 | add \size, \kaddr, \size | |
405 | sub \tmp2, \tmp1, #1 | |
406 | bic \kaddr, \kaddr, \tmp2 | |
823066d9 | 407 | 9998: |
33309ecd WD |
408 | .ifc \op, cvau |
409 | __dcache_op_workaround_clean_cache \op, \kaddr | |
410 | .else | |
411 | .ifc \op, cvac | |
412 | __dcache_op_workaround_clean_cache \op, \kaddr | |
413 | .else | |
414 | .ifc \op, cvap | |
415 | sys 3, c7, c12, 1, \kaddr // dc cvap | |
823066d9 AP |
416 | .else |
417 | dc \op, \kaddr | |
418 | .endif | |
33309ecd WD |
419 | .endif |
420 | .endif | |
7b7293ae GL |
421 | add \kaddr, \kaddr, \tmp1 |
422 | cmp \kaddr, \size | |
423 | b.lo 9998b | |
424 | dsb \domain | |
425 | .endm | |
426 | ||
4fee9473 MZ |
427 | /* |
428 | * Macro to perform an instruction cache maintenance for the interval | |
429 | * [start, end) | |
430 | * | |
431 | * start, end: virtual addresses describing the region | |
432 | * label: A label to branch to on user fault. | |
433 | * Corrupts: tmp1, tmp2 | |
434 | */ | |
435 | .macro invalidate_icache_by_line start, end, tmp1, tmp2, label | |
436 | icache_line_size \tmp1, \tmp2 | |
437 | sub \tmp2, \tmp1, #1 | |
438 | bic \tmp2, \start, \tmp2 | |
439 | 9997: | |
440 | USER(\label, ic ivau, \tmp2) // invalidate I line PoU | |
441 | add \tmp2, \tmp2, \tmp1 | |
442 | cmp \tmp2, \end | |
443 | b.lo 9997b | |
444 | dsb ish | |
445 | isb | |
446 | .endm | |
447 | ||
7b7293ae GL |
448 | /* |
449 | * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present | |
450 | */ | |
451 | .macro reset_pmuserenr_el0, tmpreg | |
452 | mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer | |
453 | sbfx \tmpreg, \tmpreg, #8, #4 | |
454 | cmp \tmpreg, #1 // Skip if no PMU present | |
455 | b.lt 9000f | |
456 | msr pmuserenr_el0, xzr // Disable PMU access from EL0 | |
457 | 9000: | |
458 | .endm | |
459 | ||
5003dbde GL |
460 | /* |
461 | * copy_page - copy src to dest using temp registers t1-t8 | |
462 | */ | |
463 | .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req | |
464 | 9998: ldp \t1, \t2, [\src] | |
465 | ldp \t3, \t4, [\src, #16] | |
466 | ldp \t5, \t6, [\src, #32] | |
467 | ldp \t7, \t8, [\src, #48] | |
468 | add \src, \src, #64 | |
469 | stnp \t1, \t2, [\dest] | |
470 | stnp \t3, \t4, [\dest, #16] | |
471 | stnp \t5, \t6, [\dest, #32] | |
472 | stnp \t7, \t8, [\dest, #48] | |
473 | add \dest, \dest, #64 | |
474 | tst \src, #(PAGE_SIZE - 1) | |
475 | b.ne 9998b | |
476 | .endm | |
477 | ||
20791846 AB |
478 | /* |
479 | * Annotate a function as position independent, i.e., safe to be called before | |
480 | * the kernel virtual mapping is activated. | |
481 | */ | |
482 | #define ENDPIPROC(x) \ | |
483 | .globl __pi_##x; \ | |
484 | .type __pi_##x, %function; \ | |
485 | .set __pi_##x, x; \ | |
486 | .size __pi_##x, . - x; \ | |
487 | ENDPROC(x) | |
488 | ||
ed84b4e9 MR |
489 | /* |
490 | * Annotate a function as being unsuitable for kprobes. | |
491 | */ | |
492 | #ifdef CONFIG_KPROBES | |
493 | #define NOKPROBE(x) \ | |
494 | .pushsection "_kprobe_blacklist", "aw"; \ | |
495 | .quad x; \ | |
496 | .popsection; | |
497 | #else | |
498 | #define NOKPROBE(x) | |
499 | #endif | |
386b3c7b MR |
500 | |
501 | #ifdef CONFIG_KASAN | |
502 | #define EXPORT_SYMBOL_NOKASAN(name) | |
503 | #else | |
504 | #define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name) | |
505 | #endif | |
506 | ||
6ad1fe5d AB |
507 | /* |
508 | * Emit a 64-bit absolute little endian symbol reference in a way that | |
509 | * ensures that it will be resolved at build time, even when building a | |
510 | * PIE binary. This requires cooperation from the linker script, which | |
511 | * must emit the lo32/hi32 halves individually. | |
512 | */ | |
513 | .macro le64sym, sym | |
514 | .long \sym\()_lo32 | |
515 | .long \sym\()_hi32 | |
516 | .endm | |
517 | ||
30b5ba5c AB |
518 | /* |
519 | * mov_q - move an immediate constant into a 64-bit register using | |
520 | * between 2 and 4 movz/movk instructions (depending on the | |
521 | * magnitude and sign of the operand) | |
522 | */ | |
523 | .macro mov_q, reg, val | |
524 | .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff) | |
525 | movz \reg, :abs_g1_s:\val | |
526 | .else | |
527 | .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff) | |
528 | movz \reg, :abs_g2_s:\val | |
529 | .else | |
530 | movz \reg, :abs_g3:\val | |
531 | movk \reg, :abs_g2_nc:\val | |
532 | .endif | |
533 | movk \reg, :abs_g1_nc:\val | |
534 | .endif | |
535 | movk \reg, :abs_g0_nc:\val | |
536 | .endm | |
537 | ||
4b65a5db CM |
538 | /* |
539 | * Return the current thread_info. | |
540 | */ | |
541 | .macro get_thread_info, rd | |
542 | mrs \rd, sp_el0 | |
543 | .endm | |
544 | ||
e842dfb5 SC |
545 | /* |
546 | * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD. | |
547 | * orr is used as it can cover the immediate value (and is idempotent). | |
548 | * In future this may be nop'ed out when dealing with 52-bit kernel VAs. | |
549 | * ttbr: Value of ttbr to set, modified. | |
550 | */ | |
551 | .macro offset_ttbr1, ttbr | |
68d23da4 | 552 | #ifdef CONFIG_ARM64_USER_VA_BITS_52 |
e842dfb5 SC |
553 | orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET |
554 | #endif | |
555 | .endm | |
556 | ||
557 | /* | |
558 | * Perform the reverse of offset_ttbr1. | |
559 | * bic is used as it can cover the immediate value and, in future, won't need | |
560 | * to be nop'ed out when dealing with 52-bit kernel VAs. | |
561 | */ | |
562 | .macro restore_ttbr1, ttbr | |
68d23da4 | 563 | #ifdef CONFIG_ARM64_USER_VA_BITS_52 |
e842dfb5 SC |
564 | bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET |
565 | #endif | |
566 | .endm | |
567 | ||
529c4b05 KM |
568 | /* |
569 | * Arrange a physical address in a TTBR register, taking care of 52-bit | |
570 | * addresses. | |
571 | * | |
572 | * phys: physical address, preserved | |
573 | * ttbr: returns the TTBR value | |
574 | */ | |
fa0465fc | 575 | .macro phys_to_ttbr, ttbr, phys |
529c4b05 KM |
576 | #ifdef CONFIG_ARM64_PA_BITS_52 |
577 | orr \ttbr, \phys, \phys, lsr #46 | |
578 | and \ttbr, \ttbr, #TTBR_BADDR_MASK_52 | |
579 | #else | |
580 | mov \ttbr, \phys | |
581 | #endif | |
582 | .endm | |
583 | ||
79ddab3b WD |
584 | .macro phys_to_pte, pte, phys |
585 | #ifdef CONFIG_ARM64_PA_BITS_52 | |
586 | /* | |
587 | * We assume \phys is 64K aligned and this is guaranteed by only | |
588 | * supporting this configuration with 64K pages. | |
589 | */ | |
590 | orr \pte, \phys, \phys, lsr #36 | |
591 | and \pte, \pte, #PTE_ADDR_MASK | |
592 | #else | |
593 | mov \pte, \phys | |
594 | #endif | |
595 | .endm | |
596 | ||
f992b4df WD |
597 | .macro pte_to_phys, phys, pte |
598 | #ifdef CONFIG_ARM64_PA_BITS_52 | |
599 | ubfiz \phys, \pte, #(48 - 16 - 12), #16 | |
600 | bfxil \phys, \pte, #16, #32 | |
601 | lsl \phys, \phys, #16 | |
602 | #else | |
603 | and \phys, \pte, #PTE_ADDR_MASK | |
604 | #endif | |
605 | .endm | |
606 | ||
3060e9f0 SD |
607 | /** |
608 | * Errata workaround prior to disable MMU. Insert an ISB immediately prior | |
609 | * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0. | |
610 | */ | |
611 | .macro pre_disable_mmu_workaround | |
612 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041 | |
613 | isb | |
614 | #endif | |
615 | .endm | |
616 | ||
0f468e22 AB |
617 | /* |
618 | * frame_push - Push @regcount callee saved registers to the stack, | |
619 | * starting at x19, as well as x29/x30, and set x29 to | |
620 | * the new value of sp. Add @extra bytes of stack space | |
621 | * for locals. | |
622 | */ | |
623 | .macro frame_push, regcount:req, extra | |
624 | __frame st, \regcount, \extra | |
625 | .endm | |
626 | ||
627 | /* | |
628 | * frame_pop - Pop the callee saved registers from the stack that were | |
629 | * pushed in the most recent call to frame_push, as well | |
630 | * as x29/x30 and any extra stack space that may have been | |
631 | * allocated. | |
632 | */ | |
633 | .macro frame_pop | |
634 | __frame ld | |
635 | .endm | |
636 | ||
637 | .macro __frame_regs, reg1, reg2, op, num | |
638 | .if .Lframe_regcount == \num | |
639 | \op\()r \reg1, [sp, #(\num + 1) * 8] | |
640 | .elseif .Lframe_regcount > \num | |
641 | \op\()p \reg1, \reg2, [sp, #(\num + 1) * 8] | |
642 | .endif | |
643 | .endm | |
644 | ||
645 | .macro __frame, op, regcount, extra=0 | |
646 | .ifc \op, st | |
647 | .if (\regcount) < 0 || (\regcount) > 10 | |
648 | .error "regcount should be in the range [0 ... 10]" | |
649 | .endif | |
650 | .if ((\extra) % 16) != 0 | |
651 | .error "extra should be a multiple of 16 bytes" | |
652 | .endif | |
653 | .ifdef .Lframe_regcount | |
654 | .if .Lframe_regcount != -1 | |
655 | .error "frame_push/frame_pop may not be nested" | |
656 | .endif | |
657 | .endif | |
658 | .set .Lframe_regcount, \regcount | |
659 | .set .Lframe_extra, \extra | |
660 | .set .Lframe_local_offset, ((\regcount + 3) / 2) * 16 | |
661 | stp x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]! | |
662 | mov x29, sp | |
663 | .endif | |
664 | ||
665 | __frame_regs x19, x20, \op, 1 | |
666 | __frame_regs x21, x22, \op, 3 | |
667 | __frame_regs x23, x24, \op, 5 | |
668 | __frame_regs x25, x26, \op, 7 | |
669 | __frame_regs x27, x28, \op, 9 | |
670 | ||
671 | .ifc \op, ld | |
672 | .if .Lframe_regcount == -1 | |
673 | .error "frame_push/frame_pop may not be nested" | |
674 | .endif | |
675 | ldp x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra | |
676 | .set .Lframe_regcount, -1 | |
677 | .endif | |
678 | .endm | |
679 | ||
24534b35 AB |
680 | /* |
681 | * Check whether to yield to another runnable task from kernel mode NEON code | |
682 | * (which runs with preemption disabled). | |
683 | * | |
684 | * if_will_cond_yield_neon | |
685 | * // pre-yield patchup code | |
686 | * do_cond_yield_neon | |
687 | * // post-yield patchup code | |
688 | * endif_yield_neon <label> | |
689 | * | |
690 | * where <label> is optional, and marks the point where execution will resume | |
691 | * after a yield has been performed. If omitted, execution resumes right after | |
692 | * the endif_yield_neon invocation. Note that the entire sequence, including | |
693 | * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT | |
694 | * is not defined. | |
695 | * | |
696 | * As a convenience, in the case where no patchup code is required, the above | |
697 | * sequence may be abbreviated to | |
698 | * | |
699 | * cond_yield_neon <label> | |
700 | * | |
701 | * Note that the patchup code does not support assembler directives that change | |
702 | * the output section, any use of such directives is undefined. | |
703 | * | |
704 | * The yield itself consists of the following: | |
705 | * - Check whether the preempt count is exactly 1, in which case disabling | |
706 | * preemption once will make the task preemptible. If this is not the case, | |
707 | * yielding is pointless. | |
708 | * - Check whether TIF_NEED_RESCHED is set, and if so, disable and re-enable | |
709 | * kernel mode NEON (which will trigger a reschedule), and branch to the | |
710 | * yield fixup code. | |
711 | * | |
712 | * This macro sequence may clobber all CPU state that is not guaranteed by the | |
713 | * AAPCS to be preserved across an ordinary function call. | |
714 | */ | |
715 | ||
716 | .macro cond_yield_neon, lbl | |
717 | if_will_cond_yield_neon | |
718 | do_cond_yield_neon | |
719 | endif_yield_neon \lbl | |
720 | .endm | |
721 | ||
722 | .macro if_will_cond_yield_neon | |
723 | #ifdef CONFIG_PREEMPT | |
724 | get_thread_info x0 | |
7faa313f WD |
725 | ldr x0, [x0, #TSK_TI_PREEMPT] |
726 | sub x0, x0, #PREEMPT_DISABLE_OFFSET | |
727 | cbz x0, .Lyield_\@ | |
24534b35 AB |
728 | /* fall through to endif_yield_neon */ |
729 | .subsection 1 | |
730 | .Lyield_\@ : | |
731 | #else | |
732 | .section ".discard.cond_yield_neon", "ax" | |
733 | #endif | |
734 | .endm | |
735 | ||
736 | .macro do_cond_yield_neon | |
737 | bl kernel_neon_end | |
738 | bl kernel_neon_begin | |
739 | .endm | |
740 | ||
741 | .macro endif_yield_neon, lbl | |
742 | .ifnb \lbl | |
743 | b \lbl | |
744 | .else | |
745 | b .Lyield_out_\@ | |
746 | .endif | |
747 | .previous | |
748 | .Lyield_out_\@ : | |
749 | .endm | |
750 | ||
f3e39273 | 751 | #endif /* __ASM_ASSEMBLER_H */ |