1 use super::{InlineAsmArch, InlineAsmType}
;
2 use crate::spec
::Target
;
3 use rustc_macros
::HashStable_Generic
;
7 X86 X86InlineAsmRegClass
{
18 impl X86InlineAsmRegClass
{
19 pub fn valid_modifiers(self, arch
: super::InlineAsmArch
) -> &'
static [char] {
22 if arch
== InlineAsmArch
::X86_64
{
29 if arch
== InlineAsmArch
::X86_64
{
30 &['l'
, 'h'
, 'x'
, 'e'
, 'r'
]
35 Self::reg_byte
=> &[],
36 Self::xmm_reg
| Self::ymm_reg
| Self::zmm_reg
=> &['x'
, 'y'
, 'z'
],
41 pub fn suggest_class(self, _arch
: InlineAsmArch
, ty
: InlineAsmType
) -> Option
<Self> {
43 Self::reg
| Self::reg_abcd
if ty
.size().bits() == 8 => Some(Self::reg_byte
),
48 pub fn suggest_modifier(
52 ) -> Option
<(char, &'
static str)> {
54 Self::reg
=> match ty
.size().bits() {
55 16 => Some(('x'
, "ax")),
56 32 if arch
== InlineAsmArch
::X86_64
=> Some(('e'
, "eax")),
59 Self::reg_abcd
=> match ty
.size().bits() {
60 16 => Some(('x'
, "ax")),
61 32 if arch
== InlineAsmArch
::X86_64
=> Some(('e'
, "eax")),
64 Self::reg_byte
=> None
,
65 Self::xmm_reg
=> None
,
66 Self::ymm_reg
=> match ty
.size().bits() {
68 _
=> Some(('x'
, "xmm0")),
70 Self::zmm_reg
=> match ty
.size().bits() {
72 256 => Some(('y'
, "ymm0")),
73 _
=> Some(('x'
, "xmm0")),
79 pub fn default_modifier(self, arch
: InlineAsmArch
) -> Option
<(char, &'
static str)> {
81 Self::reg
| Self::reg_abcd
=> {
82 if arch
== InlineAsmArch
::X86_64
{
88 Self::reg_byte
=> None
,
89 Self::xmm_reg
=> Some(('x'
, "xmm0")),
90 Self::ymm_reg
=> Some(('y'
, "ymm0")),
91 Self::zmm_reg
=> Some(('z'
, "zmm0")),
96 pub fn supported_types(
99 ) -> &'
static [(InlineAsmType
, Option
<&'
static str>)] {
101 Self::reg
| Self::reg_abcd
=> {
102 if arch
== InlineAsmArch
::X86_64
{
103 types
! { _: I16, I32, I64, F32, F64; }
105 types
! { _: I16, I32, F32; }
108 Self::reg_byte
=> types
! { _: I8; }
,
109 Self::xmm_reg
=> types
! {
110 "sse": I32
, I64
, F32
, F64
,
111 VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2);
113 Self::ymm_reg
=> types
! {
114 "avx": I32
, I64
, F32
, F64
,
115 VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2),
116 VecI8(32), VecI16(16), VecI32(8), VecI64(4), VecF32(8), VecF64(4);
118 Self::zmm_reg
=> types
! {
119 "avx512f": I32
, I64
, F32
, F64
,
120 VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2),
121 VecI8(32), VecI16(16), VecI32(8), VecI64(4), VecF32(8), VecF64(4),
122 VecI8(64), VecI16(32), VecI32(16), VecI64(8), VecF32(16), VecF64(8);
124 Self::kreg
=> types
! {
126 "avx512bw": I32
, I64
;
134 _has_feature
: impl FnMut(&str) -> bool
,
137 ) -> Result
<(), &'
static str> {
139 InlineAsmArch
::X86
=> Err("register is only available on x86_64"),
140 InlineAsmArch
::X86_64
=> Ok(()),
147 _has_feature
: impl FnMut(&str) -> bool
,
150 ) -> Result
<(), &'
static str> {
152 InlineAsmArch
::X86_64
if allocating
=> {
153 // The error message isn't actually used...
154 Err("high byte registers are not allocated by reg_byte")
161 X86 X86InlineAsmReg X86InlineAsmRegClass
{
162 ax
: reg
, reg_abcd
= ["ax", "eax", "rax"],
163 bx
: reg
, reg_abcd
= ["bx", "ebx", "rbx"],
164 cx
: reg
, reg_abcd
= ["cx", "ecx", "rcx"],
165 dx
: reg
, reg_abcd
= ["dx", "edx", "rdx"],
166 si
: reg
= ["si", "esi", "rsi"],
167 di
: reg
= ["di", "edi", "rdi"],
168 r8
: reg
= ["r8", "r8w", "r8d"] % x86_64_only
,
169 r9
: reg
= ["r9", "r9w", "r9d"] % x86_64_only
,
170 r10
: reg
= ["r10", "r10w", "r10d"] % x86_64_only
,
171 r11
: reg
= ["r11", "r11w", "r11d"] % x86_64_only
,
172 r12
: reg
= ["r12", "r12w", "r12d"] % x86_64_only
,
173 r13
: reg
= ["r13", "r13w", "r13d"] % x86_64_only
,
174 r14
: reg
= ["r14", "r14w", "r14d"] % x86_64_only
,
175 r15
: reg
= ["r15", "r15w", "r15d"] % x86_64_only
,
176 al
: reg_byte
= ["al"],
177 ah
: reg_byte
= ["ah"] % high_byte
,
178 bl
: reg_byte
= ["bl"],
179 bh
: reg_byte
= ["bh"] % high_byte
,
180 cl
: reg_byte
= ["cl"],
181 ch
: reg_byte
= ["ch"] % high_byte
,
182 dl
: reg_byte
= ["dl"],
183 dh
: reg_byte
= ["dh"] % high_byte
,
184 sil
: reg_byte
= ["sil"] % x86_64_only
,
185 dil
: reg_byte
= ["dil"] % x86_64_only
,
186 r8b
: reg_byte
= ["r8b"] % x86_64_only
,
187 r9b
: reg_byte
= ["r9b"] % x86_64_only
,
188 r10b
: reg_byte
= ["r10b"] % x86_64_only
,
189 r11b
: reg_byte
= ["r11b"] % x86_64_only
,
190 r12b
: reg_byte
= ["r12b"] % x86_64_only
,
191 r13b
: reg_byte
= ["r13b"] % x86_64_only
,
192 r14b
: reg_byte
= ["r14b"] % x86_64_only
,
193 r15b
: reg_byte
= ["r15b"] % x86_64_only
,
194 xmm0
: xmm_reg
= ["xmm0"],
195 xmm1
: xmm_reg
= ["xmm1"],
196 xmm2
: xmm_reg
= ["xmm2"],
197 xmm3
: xmm_reg
= ["xmm3"],
198 xmm4
: xmm_reg
= ["xmm4"],
199 xmm5
: xmm_reg
= ["xmm5"],
200 xmm6
: xmm_reg
= ["xmm6"],
201 xmm7
: xmm_reg
= ["xmm7"],
202 xmm8
: xmm_reg
= ["xmm8"] % x86_64_only
,
203 xmm9
: xmm_reg
= ["xmm9"] % x86_64_only
,
204 xmm10
: xmm_reg
= ["xmm10"] % x86_64_only
,
205 xmm11
: xmm_reg
= ["xmm11"] % x86_64_only
,
206 xmm12
: xmm_reg
= ["xmm12"] % x86_64_only
,
207 xmm13
: xmm_reg
= ["xmm13"] % x86_64_only
,
208 xmm14
: xmm_reg
= ["xmm14"] % x86_64_only
,
209 xmm15
: xmm_reg
= ["xmm15"] % x86_64_only
,
210 ymm0
: ymm_reg
= ["ymm0"],
211 ymm1
: ymm_reg
= ["ymm1"],
212 ymm2
: ymm_reg
= ["ymm2"],
213 ymm3
: ymm_reg
= ["ymm3"],
214 ymm4
: ymm_reg
= ["ymm4"],
215 ymm5
: ymm_reg
= ["ymm5"],
216 ymm6
: ymm_reg
= ["ymm6"],
217 ymm7
: ymm_reg
= ["ymm7"],
218 ymm8
: ymm_reg
= ["ymm8"] % x86_64_only
,
219 ymm9
: ymm_reg
= ["ymm9"] % x86_64_only
,
220 ymm10
: ymm_reg
= ["ymm10"] % x86_64_only
,
221 ymm11
: ymm_reg
= ["ymm11"] % x86_64_only
,
222 ymm12
: ymm_reg
= ["ymm12"] % x86_64_only
,
223 ymm13
: ymm_reg
= ["ymm13"] % x86_64_only
,
224 ymm14
: ymm_reg
= ["ymm14"] % x86_64_only
,
225 ymm15
: ymm_reg
= ["ymm15"] % x86_64_only
,
226 zmm0
: zmm_reg
= ["zmm0"],
227 zmm1
: zmm_reg
= ["zmm1"],
228 zmm2
: zmm_reg
= ["zmm2"],
229 zmm3
: zmm_reg
= ["zmm3"],
230 zmm4
: zmm_reg
= ["zmm4"],
231 zmm5
: zmm_reg
= ["zmm5"],
232 zmm6
: zmm_reg
= ["zmm6"],
233 zmm7
: zmm_reg
= ["zmm7"],
234 zmm8
: zmm_reg
= ["zmm8"] % x86_64_only
,
235 zmm9
: zmm_reg
= ["zmm9"] % x86_64_only
,
236 zmm10
: zmm_reg
= ["zmm10"] % x86_64_only
,
237 zmm11
: zmm_reg
= ["zmm11"] % x86_64_only
,
238 zmm12
: zmm_reg
= ["zmm12"] % x86_64_only
,
239 zmm13
: zmm_reg
= ["zmm13"] % x86_64_only
,
240 zmm14
: zmm_reg
= ["zmm14"] % x86_64_only
,
241 zmm15
: zmm_reg
= ["zmm15"] % x86_64_only
,
242 zmm16
: zmm_reg
= ["zmm16", "xmm16", "ymm16"] % x86_64_only
,
243 zmm17
: zmm_reg
= ["zmm17", "xmm17", "ymm17"] % x86_64_only
,
244 zmm18
: zmm_reg
= ["zmm18", "xmm18", "ymm18"] % x86_64_only
,
245 zmm19
: zmm_reg
= ["zmm19", "xmm19", "ymm19"] % x86_64_only
,
246 zmm20
: zmm_reg
= ["zmm20", "xmm20", "ymm20"] % x86_64_only
,
247 zmm21
: zmm_reg
= ["zmm21", "xmm21", "ymm21"] % x86_64_only
,
248 zmm22
: zmm_reg
= ["zmm22", "xmm22", "ymm22"] % x86_64_only
,
249 zmm23
: zmm_reg
= ["zmm23", "xmm23", "ymm23"] % x86_64_only
,
250 zmm24
: zmm_reg
= ["zmm24", "xmm24", "ymm24"] % x86_64_only
,
251 zmm25
: zmm_reg
= ["zmm25", "xmm25", "ymm25"] % x86_64_only
,
252 zmm26
: zmm_reg
= ["zmm26", "xmm26", "ymm26"] % x86_64_only
,
253 zmm27
: zmm_reg
= ["zmm27", "xmm27", "ymm27"] % x86_64_only
,
254 zmm28
: zmm_reg
= ["zmm28", "xmm28", "ymm28"] % x86_64_only
,
255 zmm29
: zmm_reg
= ["zmm29", "xmm29", "ymm29"] % x86_64_only
,
256 zmm30
: zmm_reg
= ["zmm30", "xmm30", "ymm30"] % x86_64_only
,
257 zmm31
: zmm_reg
= ["zmm31", "xmm31", "ymm31"] % x86_64_only
,
265 #error = ["bp", "bpl", "ebp", "rbp"] =>
266 "the frame pointer cannot be used as an operand for inline asm",
267 #error = ["sp", "spl", "esp", "rsp"] =>
268 "the stack pointer cannot be used as an operand for inline asm",
269 #error = ["ip", "eip", "rip"] =>
270 "the instruction pointer cannot be used as an operand for inline asm",
271 #error = ["st", "st(0)", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)"] =>
272 "x87 registers are not currently supported as operands for inline asm",
273 #error = ["mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7"] =>
274 "MMX registers are not currently supported as operands for inline asm",
276 "the k0 AVX mask register cannot be used as an operand for inline asm",
280 impl X86InlineAsmReg
{
283 out
: &mut dyn fmt
::Write
,
285 modifier
: Option
<char>,
287 let reg_default_modifier
= match arch
{
288 InlineAsmArch
::X86
=> 'e'
,
289 InlineAsmArch
::X86_64
=> 'r'
,
292 if self as u32 <= Self::dx
as u32 {
293 let root
= ['a'
, 'b'
, 'c'
, 'd'
][self as usize - Self::ax
as usize];
294 match modifier
.unwrap_or(reg_default_modifier
) {
295 'l'
=> write
!(out
, "{}l", root
),
296 'h'
=> write
!(out
, "{}h", root
),
297 'x'
=> write
!(out
, "{}x", root
),
298 'e'
=> write
!(out
, "e{}x", root
),
299 'r'
=> write
!(out
, "r{}x", root
),
302 } else if self as u32 <= Self::di
as u32 {
303 let root
= self.name();
304 match modifier
.unwrap_or(reg_default_modifier
) {
305 'l'
=> write
!(out
, "{}l", root
),
306 'x'
=> write
!(out
, "{}", root
),
307 'e'
=> write
!(out
, "e{}", root
),
308 'r'
=> write
!(out
, "r{}", root
),
311 } else if self as u32 <= Self::r15
as u32 {
312 let root
= self.name();
313 match modifier
.unwrap_or(reg_default_modifier
) {
314 'l'
=> write
!(out
, "{}b", root
),
315 'x'
=> write
!(out
, "{}w", root
),
316 'e'
=> write
!(out
, "{}d", root
),
317 'r'
=> out
.write_str(root
),
320 } else if self as u32 <= Self::r15b
as u32 {
321 out
.write_str(self.name())
322 } else if self as u32 <= Self::xmm15
as u32 {
323 let prefix
= modifier
.unwrap_or('x'
);
324 let index
= self as u32 - Self::xmm0
as u32;
325 write
!(out
, "{}{}", prefix
, index
)
326 } else if self as u32 <= Self::ymm15
as u32 {
327 let prefix
= modifier
.unwrap_or('y'
);
328 let index
= self as u32 - Self::ymm0
as u32;
329 write
!(out
, "{}{}", prefix
, index
)
330 } else if self as u32 <= Self::zmm31
as u32 {
331 let prefix
= modifier
.unwrap_or('z'
);
332 let index
= self as u32 - Self::zmm0
as u32;
333 write
!(out
, "{}{}", prefix
, index
)
335 out
.write_str(self.name())
339 pub fn overlapping_regs(self, mut cb
: impl FnMut(X86InlineAsmReg
)) {
340 macro_rules
! reg_conflicts
{
343 $w
:ident
: $l
:ident $h
:ident
346 $w2
:ident
: $l2
:ident
349 $x
:ident
: $y
:ident
: $z
:ident
369 Self::$w2
| Self::$l2
=> {
375 Self::$x
| Self::$y
| Self::$z
=> {
386 // XMM*, YMM* and ZMM* are all different views of the same register.
388 // See section 15.5 of the combined Intel® 64 and IA-32 Architectures
389 // Software Developer’s Manual for more details.
391 // We don't need to specify conflicts for [x,y,z]mm[16-31] since these
392 // registers are only available with AVX-512, so we just specify them
393 // as aliases directly.
419 xmm10
: ymm10
: zmm10
,
420 xmm11
: ymm11
: zmm11
,
421 xmm12
: ymm12
: zmm12
,
422 xmm13
: ymm13
: zmm13
,
423 xmm14
: ymm14
: zmm14
,
424 xmm15
: ymm15
: zmm15
;