]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/arm64/include/asm/assembler.h
arm64: Add work around for Arm Cortex-A55 Erratum 1024718
[mirror_ubuntu-bionic-kernel.git] / arch / arm64 / include / asm / assembler.h
CommitLineData
0be7320a 1/*
7b7293ae 2 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
0be7320a
CM
3 *
4 * Copyright (C) 1996-2000 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19#ifndef __ASSEMBLY__
20#error "Only include this from assembly code"
21#endif
22
f3e39273
MZ
23#ifndef __ASM_ASSEMBLER_H
24#define __ASM_ASSEMBLER_H
25
7b7293ae 26#include <asm/asm-offsets.h>
823066d9 27#include <asm/cpufeature.h>
9b0f46dd 28#include <asm/cputype.h>
e28cc025 29#include <asm/debug-monitors.h>
5003dbde 30#include <asm/page.h>
7b7293ae 31#include <asm/pgtable-hwdef.h>
0be7320a 32#include <asm/ptrace.h>
2a283070 33#include <asm/thread_info.h>
0be7320a 34
0fbeb318
JM
35 .macro save_and_disable_daif, flags
36 mrs \flags, daif
37 msr daifset, #0xf
38 .endm
39
40 .macro disable_daif
41 msr daifset, #0xf
42 .endm
43
44 .macro enable_daif
45 msr daifclr, #0xf
46 .endm
47
48 .macro restore_daif, flags:req
49 msr daif, \flags
50 .endm
51
b55a5a1b
JM
52 /* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */
53 .macro inherit_daif, pstate:req, tmp:req
54 and \tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
55 msr daif, \tmp
56 .endm
57
b282e1ce
JM
58 /* IRQ is the lowest priority flag, unconditionally unmask the rest. */
59 .macro enable_da_f
60 msr daifclr, #(8 | 4 | 1)
61 .endm
62
0be7320a
CM
63/*
64 * Enable and disable interrupts.
65 */
66 .macro disable_irq
67 msr daifset, #2
68 .endm
69
70 .macro enable_irq
71 msr daifclr, #2
72 .endm
73
4b65a5db
CM
74 .macro save_and_disable_irq, flags
75 mrs \flags, daif
76 msr daifset, #2
77 .endm
78
79 .macro restore_irq, flags
80 msr daif, \flags
81 .endm
82
0be7320a
CM
83 .macro enable_dbg
84 msr daifclr, #8
85 .endm
86
2a283070
WD
87 .macro disable_step_tsk, flgs, tmp
88 tbz \flgs, #TIF_SINGLESTEP, 9990f
0be7320a 89 mrs \tmp, mdscr_el1
e28cc025 90 bic \tmp, \tmp, #DBG_MDSCR_SS
0be7320a 91 msr mdscr_el1, \tmp
2a283070
WD
92 isb // Synchronise with enable_dbg
939990:
0be7320a
CM
94 .endm
95
84d0fb1b 96 /* call with daif masked */
2a283070
WD
97 .macro enable_step_tsk, flgs, tmp
98 tbz \flgs, #TIF_SINGLESTEP, 9990f
0be7320a 99 mrs \tmp, mdscr_el1
e28cc025 100 orr \tmp, \tmp, #DBG_MDSCR_SS
0be7320a 101 msr mdscr_el1, \tmp
2a283070 1029990:
0be7320a
CM
103 .endm
104
0be7320a
CM
105/*
106 * SMP data memory barrier
107 */
108 .macro smp_dmb, opt
0be7320a 109 dmb \opt
0be7320a 110 .endm
47be510b
JM
111
112/*
113 * RAS Error Synchronization barrier
114 */
115 .macro esb
116 hint #16
117 .endm
0be7320a 118
c55cf8e6
WD
119/*
120 * Value prediction barrier
121 */
122 .macro csdb
123 hint #20
124 .endm
125
05ce179c
WD
126/*
127 * Sanitise a 64-bit bounded index wrt speculation, returning zero if out
128 * of bounds.
129 */
130 .macro mask_nospec64, idx, limit, tmp
131 sub \tmp, \idx, \limit
132 bic \tmp, \tmp, \idx
133 and \idx, \idx, \tmp, asr #63
134 csdb
135 .endm
136
f99a250c
WD
137/*
138 * NOP sequence
139 */
140 .macro nops, num
141 .rept \num
142 nop
143 .endr
144 .endm
145
6c94f27a
AB
146/*
147 * Emit an entry into the exception table
148 */
149 .macro _asm_extable, from, to
150 .pushsection __ex_table, "a"
151 .align 3
152 .long (\from - .), (\to - .)
153 .popsection
154 .endm
155
0be7320a
CM
156#define USER(l, x...) \
1579999: x; \
6c94f27a 158 _asm_extable 9999b, l
0be7320a
CM
159
160/*
161 * Register aliases.
162 */
163lr .req x30 // link register
dc637f1f
MZ
164
165/*
166 * Vector entry
167 */
168 .macro ventry label
169 .align 7
170 b \label
171 .endm
e68bedaa
ML
172
173/*
174 * Select code when configured for BE.
175 */
176#ifdef CONFIG_CPU_BIG_ENDIAN
177#define CPU_BE(code...) code
178#else
179#define CPU_BE(code...)
180#endif
181
182/*
183 * Select code when configured for LE.
184 */
185#ifdef CONFIG_CPU_BIG_ENDIAN
186#define CPU_LE(code...)
187#else
188#define CPU_LE(code...) code
189#endif
190
55b89540
ML
191/*
192 * Define a macro that constructs a 64-bit value by concatenating two
193 * 32-bit registers. Note that on big endian systems the order of the
194 * registers is swapped.
195 */
196#ifndef CONFIG_CPU_BIG_ENDIAN
197 .macro regs_to_64, rd, lbits, hbits
198#else
199 .macro regs_to_64, rd, hbits, lbits
200#endif
201 orr \rd, \lbits, \hbits, lsl #32
202 .endm
f3e39273 203
b784a5d9
AB
204/*
205 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
41c066f2
AB
206 * <symbol> is within the range +/- 4 GB of the PC when running
207 * in core kernel context. In module context, a movz/movk sequence
208 * is used, since modules may be loaded far away from the kernel
209 * when KASLR is in effect.
b784a5d9
AB
210 */
211 /*
212 * @dst: destination register (64 bit wide)
213 * @sym: name of the symbol
b784a5d9 214 */
41c066f2
AB
215 .macro adr_l, dst, sym
216#ifndef MODULE
b784a5d9
AB
217 adrp \dst, \sym
218 add \dst, \dst, :lo12:\sym
41c066f2
AB
219#else
220 movz \dst, #:abs_g3:\sym
221 movk \dst, #:abs_g2_nc:\sym
222 movk \dst, #:abs_g1_nc:\sym
223 movk \dst, #:abs_g0_nc:\sym
224#endif
b784a5d9
AB
225 .endm
226
227 /*
228 * @dst: destination register (32 or 64 bit wide)
229 * @sym: name of the symbol
230 * @tmp: optional 64-bit scratch register to be used if <dst> is a
231 * 32-bit wide register, in which case it cannot be used to hold
232 * the address
233 */
234 .macro ldr_l, dst, sym, tmp=
41c066f2 235#ifndef MODULE
b784a5d9
AB
236 .ifb \tmp
237 adrp \dst, \sym
238 ldr \dst, [\dst, :lo12:\sym]
239 .else
240 adrp \tmp, \sym
241 ldr \dst, [\tmp, :lo12:\sym]
242 .endif
41c066f2
AB
243#else
244 .ifb \tmp
245 adr_l \dst, \sym
246 ldr \dst, [\dst]
247 .else
248 adr_l \tmp, \sym
249 ldr \dst, [\tmp]
250 .endif
251#endif
b784a5d9
AB
252 .endm
253
254 /*
255 * @src: source register (32 or 64 bit wide)
256 * @sym: name of the symbol
257 * @tmp: mandatory 64-bit scratch register to calculate the address
258 * while <src> needs to be preserved.
259 */
260 .macro str_l, src, sym, tmp
41c066f2 261#ifndef MODULE
b784a5d9
AB
262 adrp \tmp, \sym
263 str \src, [\tmp, :lo12:\sym]
41c066f2
AB
264#else
265 adr_l \tmp, \sym
266 str \src, [\tmp]
267#endif
b784a5d9
AB
268 .endm
269
aa4d5d3c 270 /*
8ea41b11
AB
271 * @dst: Result of per_cpu(sym, smp_processor_id()), can be SP for
272 * non-module code
aa4d5d3c 273 * @sym: The name of the per-cpu variable
aa4d5d3c
JM
274 * @tmp: scratch register
275 */
1b7e2296 276 .macro adr_this_cpu, dst, sym, tmp
8ea41b11
AB
277#ifndef MODULE
278 adrp \tmp, \sym
279 add \dst, \tmp, #:lo12:\sym
280#else
1b7e2296 281 adr_l \dst, \sym
8ea41b11 282#endif
53390852 283alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
aa4d5d3c 284 mrs \tmp, tpidr_el1
53390852
JM
285alternative_else
286 mrs \tmp, tpidr_el2
287alternative_endif
1b7e2296
MR
288 add \dst, \dst, \tmp
289 .endm
290
291 /*
292 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
293 * @sym: The name of the per-cpu variable
294 * @tmp: scratch register
295 */
296 .macro ldr_this_cpu dst, sym, tmp
297 adr_l \dst, \sym
53390852 298alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
1b7e2296 299 mrs \tmp, tpidr_el1
53390852
JM
300alternative_else
301 mrs \tmp, tpidr_el2
302alternative_endif
1b7e2296 303 ldr \dst, [\dst, \tmp]
aa4d5d3c
JM
304 .endm
305
7b7293ae
GL
306/*
307 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
308 */
309 .macro vma_vm_mm, rd, rn
310 ldr \rd, [\rn, #VMA_VM_MM]
311 .endm
312
313/*
314 * mmid - get context id from mm pointer (mm->context.id)
315 */
316 .macro mmid, rd, rn
317 ldr \rd, [\rn, #MM_CONTEXT_ID]
318 .endm
116c81f4
SP
319/*
320 * read_ctr - read CTR_EL0. If the system has mismatched
321 * cache line sizes, provide the system wide safe value
322 * from arm64_ftr_reg_ctrel0.sys_val
323 */
324 .macro read_ctr, reg
325alternative_if_not ARM64_MISMATCHED_CACHE_LINE_SIZE
326 mrs \reg, ctr_el0 // read CTR
327 nop
328alternative_else
329 ldr_l \reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
330alternative_endif
331 .endm
332
7b7293ae
GL
333
334/*
072f0a63
SP
335 * raw_dcache_line_size - get the minimum D-cache line size on this CPU
336 * from the CTR register.
7b7293ae 337 */
072f0a63 338 .macro raw_dcache_line_size, reg, tmp
7b7293ae
GL
339 mrs \tmp, ctr_el0 // read CTR
340 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
341 mov \reg, #4 // bytes per word
342 lsl \reg, \reg, \tmp // actual cache line size
343 .endm
344
345/*
072f0a63 346 * dcache_line_size - get the safe D-cache line size across all CPUs
7b7293ae 347 */
072f0a63 348 .macro dcache_line_size, reg, tmp
116c81f4
SP
349 read_ctr \tmp
350 ubfm \tmp, \tmp, #16, #19 // cache line size encoding
351 mov \reg, #4 // bytes per word
352 lsl \reg, \reg, \tmp // actual cache line size
072f0a63
SP
353 .endm
354
355/*
356 * raw_icache_line_size - get the minimum I-cache line size on this CPU
357 * from the CTR register.
358 */
359 .macro raw_icache_line_size, reg, tmp
7b7293ae
GL
360 mrs \tmp, ctr_el0 // read CTR
361 and \tmp, \tmp, #0xf // cache line size encoding
362 mov \reg, #4 // bytes per word
363 lsl \reg, \reg, \tmp // actual cache line size
364 .endm
365
072f0a63
SP
366/*
367 * icache_line_size - get the safe I-cache line size across all CPUs
368 */
369 .macro icache_line_size, reg, tmp
116c81f4
SP
370 read_ctr \tmp
371 and \tmp, \tmp, #0xf // cache line size encoding
372 mov \reg, #4 // bytes per word
373 lsl \reg, \reg, \tmp // actual cache line size
072f0a63
SP
374 .endm
375
7b7293ae
GL
376/*
377 * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map
378 */
379 .macro tcr_set_idmap_t0sz, valreg, tmpreg
380#ifndef CONFIG_ARM64_VA_BITS_48
381 ldr_l \tmpreg, idmap_t0sz
382 bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
383#endif
384 .endm
385
386/*
387 * Macro to perform a data cache maintenance for the interval
388 * [kaddr, kaddr + size)
389 *
390 * op: operation passed to dc instruction
391 * domain: domain used in dsb instruciton
392 * kaddr: starting virtual address of the region
393 * size: size of the region
394 * Corrupts: kaddr, size, tmp1, tmp2
395 */
396 .macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
397 dcache_line_size \tmp1, \tmp2
398 add \size, \kaddr, \size
399 sub \tmp2, \tmp1, #1
400 bic \kaddr, \kaddr, \tmp2
823066d9
AP
4019998:
402 .if (\op == cvau || \op == cvac)
403alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
404 dc \op, \kaddr
405alternative_else
406 dc civac, \kaddr
d50e071f
RM
407alternative_endif
408 .elseif (\op == cvap)
409alternative_if ARM64_HAS_DCPOP
410 sys 3, c7, c12, 1, \kaddr // dc cvap
411alternative_else
412 dc cvac, \kaddr
823066d9
AP
413alternative_endif
414 .else
415 dc \op, \kaddr
416 .endif
7b7293ae
GL
417 add \kaddr, \kaddr, \tmp1
418 cmp \kaddr, \size
419 b.lo 9998b
420 dsb \domain
421 .endm
422
423/*
424 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
425 */
426 .macro reset_pmuserenr_el0, tmpreg
427 mrs \tmpreg, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
428 sbfx \tmpreg, \tmpreg, #8, #4
429 cmp \tmpreg, #1 // Skip if no PMU present
430 b.lt 9000f
431 msr pmuserenr_el0, xzr // Disable PMU access from EL0
4329000:
433 .endm
434
5003dbde
GL
435/*
436 * copy_page - copy src to dest using temp registers t1-t8
437 */
438 .macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
4399998: ldp \t1, \t2, [\src]
440 ldp \t3, \t4, [\src, #16]
441 ldp \t5, \t6, [\src, #32]
442 ldp \t7, \t8, [\src, #48]
443 add \src, \src, #64
444 stnp \t1, \t2, [\dest]
445 stnp \t3, \t4, [\dest, #16]
446 stnp \t5, \t6, [\dest, #32]
447 stnp \t7, \t8, [\dest, #48]
448 add \dest, \dest, #64
449 tst \src, #(PAGE_SIZE - 1)
450 b.ne 9998b
451 .endm
452
20791846
AB
453/*
454 * Annotate a function as position independent, i.e., safe to be called before
455 * the kernel virtual mapping is activated.
456 */
457#define ENDPIPROC(x) \
458 .globl __pi_##x; \
459 .type __pi_##x, %function; \
460 .set __pi_##x, x; \
461 .size __pi_##x, . - x; \
462 ENDPROC(x)
463
ed84b4e9
MR
464/*
465 * Annotate a function as being unsuitable for kprobes.
466 */
467#ifdef CONFIG_KPROBES
468#define NOKPROBE(x) \
469 .pushsection "_kprobe_blacklist", "aw"; \
470 .quad x; \
471 .popsection;
472#else
473#define NOKPROBE(x)
474#endif
6ad1fe5d
AB
475 /*
476 * Emit a 64-bit absolute little endian symbol reference in a way that
477 * ensures that it will be resolved at build time, even when building a
478 * PIE binary. This requires cooperation from the linker script, which
479 * must emit the lo32/hi32 halves individually.
480 */
481 .macro le64sym, sym
482 .long \sym\()_lo32
483 .long \sym\()_hi32
484 .endm
485
30b5ba5c
AB
486 /*
487 * mov_q - move an immediate constant into a 64-bit register using
488 * between 2 and 4 movz/movk instructions (depending on the
489 * magnitude and sign of the operand)
490 */
491 .macro mov_q, reg, val
492 .if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
493 movz \reg, :abs_g1_s:\val
494 .else
495 .if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
496 movz \reg, :abs_g2_s:\val
497 .else
498 movz \reg, :abs_g3:\val
499 movk \reg, :abs_g2_nc:\val
500 .endif
501 movk \reg, :abs_g1_nc:\val
502 .endif
503 movk \reg, :abs_g0_nc:\val
504 .endm
505
4b65a5db
CM
506/*
507 * Return the current thread_info.
508 */
509 .macro get_thread_info, rd
510 mrs \rd, sp_el0
511 .endm
512
5c10f9ca
WD
513 .macro pte_to_phys, phys, pte
514 and \phys, \pte, #(((1 << (48 - PAGE_SHIFT)) - 1) << PAGE_SHIFT)
515 .endm
516
932b50c7
SD
517/**
518 * Errata workaround prior to disable MMU. Insert an ISB immediately prior
519 * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
520 */
521 .macro pre_disable_mmu_workaround
522#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
523 isb
524#endif
525 .endm
526
9b0f46dd
SP
527/*
528 * Check the MIDR_EL1 of the current CPU for a given model and a range of
529 * variant/revision. See asm/cputype.h for the macros used below.
530 *
531 * model: MIDR_CPU_MODEL of CPU
532 * rv_min: Minimum of MIDR_CPU_VAR_REV()
533 * rv_max: Maximum of MIDR_CPU_VAR_REV()
534 * res: Result register.
535 * tmp1, tmp2, tmp3: Temporary registers
536 *
537 * Corrupts: res, tmp1, tmp2, tmp3
538 * Returns: 0, if the CPU id doesn't match. Non-zero otherwise
539 */
540 .macro cpu_midr_match model, rv_min, rv_max, res, tmp1, tmp2, tmp3
541 mrs \res, midr_el1
542 mov_q \tmp1, (MIDR_REVISION_MASK | MIDR_VARIANT_MASK)
543 mov_q \tmp2, MIDR_CPU_MODEL_MASK
544 and \tmp3, \res, \tmp2 // Extract model
545 and \tmp1, \res, \tmp1 // rev & variant
546 mov_q \tmp2, \model
547 cmp \tmp3, \tmp2
548 cset \res, eq
549 cbz \res, .Ldone\@ // Model matches ?
550
551 .if (\rv_min != 0) // Skip min check if rv_min == 0
552 mov_q \tmp3, \rv_min
553 cmp \tmp1, \tmp3
554 cset \res, ge
555 .endif // \rv_min != 0
556 /* Skip rv_max check if rv_min == rv_max && rv_min != 0 */
557 .if ((\rv_min != \rv_max) || \rv_min == 0)
558 mov_q \tmp2, \rv_max
559 cmp \tmp1, \tmp2
560 cset \tmp2, le
561 and \res, \res, \tmp2
562 .endif
563.Ldone\@:
564 .endm
565
f3e39273 566#endif /* __ASM_ASSEMBLER_H */