]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/CompilerIntrinsicsLib/AArch64/Atomics.S
UefiCpuPkg: Move AsmRelocateApLoopStart from Mpfuncs.nasm to AmdSev.nasm
[mirror_edk2.git] / ArmPkg / Library / CompilerIntrinsicsLib / AArch64 / Atomics.S
1 #------------------------------------------------------------------------------
2 #
3 # Copyright (c) 2020, Arm, Limited. All rights reserved.<BR>
4 #
5 # SPDX-License-Identifier: BSD-2-Clause-Patent
6 #
7 #------------------------------------------------------------------------------
8
9 /*
10 * Provide the GCC intrinsics that are required when using GCC 9 or
11 * later with the -moutline-atomics options (which became the default
12 * in GCC 10)
13 */
14 .arch armv8-a
15
16 .macro reg_alias, pfx, sz
17 r0_\sz .req \pfx\()0
18 r1_\sz .req \pfx\()1
19 tmp0_\sz .req \pfx\()16
20 tmp1_\sz .req \pfx\()17
21 .endm
22
23 /*
24 * Define register aliases of the right type for each size
25 * (xN for 8 bytes, wN for everything smaller)
26 */
27 reg_alias w, 1
28 reg_alias w, 2
29 reg_alias w, 4
30 reg_alias x, 8
31
32 .macro fn_start, name:req
33 .section .text.\name
34 .globl \name
35 .type \name, %function
36 \name\():
37 .endm
38
39 .macro fn_end, name:req
40 .size \name, . - \name
41 .endm
42
43 /*
44 * Emit an atomic helper for \model with operands of size \sz, using
45 * the operation specified by \insn (which is the LSE name), and which
46 * can be implemented using the generic load-locked/store-conditional
47 * (LL/SC) sequence below, using the arithmetic operation given by
48 * \opc.
49 */
50 .macro emit_ld_sz, sz:req, insn:req, opc:req, model:req, s, a, l
51 fn_start __aarch64_\insn\()\sz\()\model
52 mov tmp0_\sz, r0_\sz
53 0: ld\a\()xr\s r0_\sz, [x1]
54 .ifnc \insn, swp
55 \opc tmp1_\sz, r0_\sz, tmp0_\sz
56 st\l\()xr\s w15, tmp1_\sz, [x1]
57 .else
58 st\l\()xr\s w15, tmp0_\sz, [x1]
59 .endif
60 cbnz w15, 0b
61 ret
62 fn_end __aarch64_\insn\()\sz\()\model
63 .endm
64
65 /*
66 * Emit atomic helpers for \model for operand sizes in the
67 * set {1, 2, 4, 8}, for the instruction pattern given by
68 * \insn. (This is the LSE name, but this implementation uses
69 * the generic LL/SC sequence using \opc as the arithmetic
70 * operation on the target.)
71 */
72 .macro emit_ld, insn:req, opc:req, model:req, a, l
73 emit_ld_sz 1, \insn, \opc, \model, b, \a, \l
74 emit_ld_sz 2, \insn, \opc, \model, h, \a, \l
75 emit_ld_sz 4, \insn, \opc, \model, , \a, \l
76 emit_ld_sz 8, \insn, \opc, \model, , \a, \l
77 .endm
78
79 /*
80 * Emit the compare and swap helper for \model and size \sz
81 * using LL/SC instructions.
82 */
83 .macro emit_cas_sz, sz:req, model:req, uxt:req, s, a, l
84 fn_start __aarch64_cas\sz\()\model
85 \uxt tmp0_\sz, r0_\sz
86 0: ld\a\()xr\s r0_\sz, [x2]
87 cmp r0_\sz, tmp0_\sz
88 bne 1f
89 st\l\()xr\s w15, r1_\sz, [x2]
90 cbnz w15, 0b
91 1: ret
92 fn_end __aarch64_cas\sz\()\model
93 .endm
94
95 /*
96 * Emit compare-and-swap helpers for \model for operand sizes in the
97 * set {1, 2, 4, 8, 16}.
98 */
99 .macro emit_cas, model:req, a, l
100 emit_cas_sz 1, \model, uxtb, b, \a, \l
101 emit_cas_sz 2, \model, uxth, h, \a, \l
102 emit_cas_sz 4, \model, mov , , \a, \l
103 emit_cas_sz 8, \model, mov , , \a, \l
104
105 /*
106 * We cannot use the parameterized sequence for 16 byte CAS, so we
107 * need to define it explicitly.
108 */
109 fn_start __aarch64_cas16\model
110 mov x16, x0
111 mov x17, x1
112 0: ld\a\()xp x0, x1, [x4]
113 cmp x0, x16
114 ccmp x1, x17, #0, eq
115 bne 1f
116 st\l\()xp w15, x16, x17, [x4]
117 cbnz w15, 0b
118 1: ret
119 fn_end __aarch64_cas16\model
120 .endm
121
122 /*
123 * Emit the set of GCC outline atomic helper functions for
124 * the memory ordering model given by \model:
125 * - relax unordered loads and stores
126 * - acq load-acquire, unordered store
127 * - rel unordered load, store-release
128 * - acq_rel load-acquire, store-release
129 */
130 .macro emit_model, model:req, a, l
131 emit_ld ldadd, add, \model, \a, \l
132 emit_ld ldclr, bic, \model, \a, \l
133 emit_ld ldeor, eor, \model, \a, \l
134 emit_ld ldset, orr, \model, \a, \l
135 emit_ld swp, mov, \model, \a, \l
136 emit_cas \model, \a, \l
137 .endm
138
139 emit_model _relax
140 emit_model _acq, a
141 emit_model _rel,, l
142 emit_model _acq_rel, a, l