]> git.proxmox.com Git - rustc.git/blob - src/librustc_target/asm/x86.rs
New upstream version 1.47.0+dfsg1
[rustc.git] / src / librustc_target / asm / x86.rs
1 use super::{InlineAsmArch, InlineAsmType};
2 use crate::spec::Target;
3 use rustc_macros::HashStable_Generic;
4 use std::fmt;
5
6 def_reg_class! {
7 X86 X86InlineAsmRegClass {
8 reg,
9 reg_abcd,
10 reg_byte,
11 xmm_reg,
12 ymm_reg,
13 zmm_reg,
14 kreg,
15 }
16 }
17
18 impl X86InlineAsmRegClass {
19 pub fn valid_modifiers(self, arch: super::InlineAsmArch) -> &'static [char] {
20 match self {
21 Self::reg => {
22 if arch == InlineAsmArch::X86_64 {
23 &['l', 'x', 'e', 'r']
24 } else {
25 &['x', 'e']
26 }
27 }
28 Self::reg_abcd => {
29 if arch == InlineAsmArch::X86_64 {
30 &['l', 'h', 'x', 'e', 'r']
31 } else {
32 &['l', 'h', 'x', 'e']
33 }
34 }
35 Self::reg_byte => &[],
36 Self::xmm_reg | Self::ymm_reg | Self::zmm_reg => &['x', 'y', 'z'],
37 Self::kreg => &[],
38 }
39 }
40
41 pub fn suggest_class(self, _arch: InlineAsmArch, ty: InlineAsmType) -> Option<Self> {
42 match self {
43 Self::reg | Self::reg_abcd if ty.size().bits() == 8 => Some(Self::reg_byte),
44 _ => None,
45 }
46 }
47
48 pub fn suggest_modifier(
49 self,
50 arch: InlineAsmArch,
51 ty: InlineAsmType,
52 ) -> Option<(char, &'static str)> {
53 match self {
54 Self::reg => match ty.size().bits() {
55 16 => Some(('x', "ax")),
56 32 if arch == InlineAsmArch::X86_64 => Some(('e', "eax")),
57 _ => None,
58 },
59 Self::reg_abcd => match ty.size().bits() {
60 16 => Some(('x', "ax")),
61 32 if arch == InlineAsmArch::X86_64 => Some(('e', "eax")),
62 _ => None,
63 },
64 Self::reg_byte => None,
65 Self::xmm_reg => None,
66 Self::ymm_reg => match ty.size().bits() {
67 256 => None,
68 _ => Some(('x', "xmm0")),
69 },
70 Self::zmm_reg => match ty.size().bits() {
71 512 => None,
72 256 => Some(('y', "ymm0")),
73 _ => Some(('x', "xmm0")),
74 },
75 Self::kreg => None,
76 }
77 }
78
79 pub fn default_modifier(self, arch: InlineAsmArch) -> Option<(char, &'static str)> {
80 match self {
81 Self::reg | Self::reg_abcd => {
82 if arch == InlineAsmArch::X86_64 {
83 Some(('r', "rax"))
84 } else {
85 Some(('e', "eax"))
86 }
87 }
88 Self::reg_byte => None,
89 Self::xmm_reg => Some(('x', "xmm0")),
90 Self::ymm_reg => Some(('y', "ymm0")),
91 Self::zmm_reg => Some(('z', "zmm0")),
92 Self::kreg => None,
93 }
94 }
95
96 pub fn supported_types(
97 self,
98 arch: InlineAsmArch,
99 ) -> &'static [(InlineAsmType, Option<&'static str>)] {
100 match self {
101 Self::reg | Self::reg_abcd => {
102 if arch == InlineAsmArch::X86_64 {
103 types! { _: I16, I32, I64, F32, F64; }
104 } else {
105 types! { _: I16, I32, F32; }
106 }
107 }
108 Self::reg_byte => types! { _: I8; },
109 Self::xmm_reg => types! {
110 "sse": I32, I64, F32, F64,
111 VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2);
112 },
113 Self::ymm_reg => types! {
114 "avx": I32, I64, F32, F64,
115 VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2),
116 VecI8(32), VecI16(16), VecI32(8), VecI64(4), VecF32(8), VecF64(4);
117 },
118 Self::zmm_reg => types! {
119 "avx512f": I32, I64, F32, F64,
120 VecI8(16), VecI16(8), VecI32(4), VecI64(2), VecF32(4), VecF64(2),
121 VecI8(32), VecI16(16), VecI32(8), VecI64(4), VecF32(8), VecF64(4),
122 VecI8(64), VecI16(32), VecI32(16), VecI64(8), VecF32(16), VecF64(8);
123 },
124 Self::kreg => types! {
125 "avx512f": I8, I16;
126 "avx512bw": I32, I64;
127 },
128 }
129 }
130 }
131
132 fn x86_64_only(
133 arch: InlineAsmArch,
134 _has_feature: impl FnMut(&str) -> bool,
135 _target: &Target,
136 _allocating: bool,
137 ) -> Result<(), &'static str> {
138 match arch {
139 InlineAsmArch::X86 => Err("register is only available on x86_64"),
140 InlineAsmArch::X86_64 => Ok(()),
141 _ => unreachable!(),
142 }
143 }
144
145 fn high_byte(
146 arch: InlineAsmArch,
147 _has_feature: impl FnMut(&str) -> bool,
148 _target: &Target,
149 allocating: bool,
150 ) -> Result<(), &'static str> {
151 match arch {
152 InlineAsmArch::X86_64 if allocating => {
153 // The error message isn't actually used...
154 Err("high byte registers are not allocated by reg_byte")
155 }
156 _ => Ok(()),
157 }
158 }
159
160 def_regs! {
161 X86 X86InlineAsmReg X86InlineAsmRegClass {
162 ax: reg, reg_abcd = ["ax", "eax", "rax"],
163 bx: reg, reg_abcd = ["bx", "ebx", "rbx"],
164 cx: reg, reg_abcd = ["cx", "ecx", "rcx"],
165 dx: reg, reg_abcd = ["dx", "edx", "rdx"],
166 si: reg = ["si", "esi", "rsi"],
167 di: reg = ["di", "edi", "rdi"],
168 r8: reg = ["r8", "r8w", "r8d"] % x86_64_only,
169 r9: reg = ["r9", "r9w", "r9d"] % x86_64_only,
170 r10: reg = ["r10", "r10w", "r10d"] % x86_64_only,
171 r11: reg = ["r11", "r11w", "r11d"] % x86_64_only,
172 r12: reg = ["r12", "r12w", "r12d"] % x86_64_only,
173 r13: reg = ["r13", "r13w", "r13d"] % x86_64_only,
174 r14: reg = ["r14", "r14w", "r14d"] % x86_64_only,
175 r15: reg = ["r15", "r15w", "r15d"] % x86_64_only,
176 al: reg_byte = ["al"],
177 ah: reg_byte = ["ah"] % high_byte,
178 bl: reg_byte = ["bl"],
179 bh: reg_byte = ["bh"] % high_byte,
180 cl: reg_byte = ["cl"],
181 ch: reg_byte = ["ch"] % high_byte,
182 dl: reg_byte = ["dl"],
183 dh: reg_byte = ["dh"] % high_byte,
184 sil: reg_byte = ["sil"] % x86_64_only,
185 dil: reg_byte = ["dil"] % x86_64_only,
186 r8b: reg_byte = ["r8b"] % x86_64_only,
187 r9b: reg_byte = ["r9b"] % x86_64_only,
188 r10b: reg_byte = ["r10b"] % x86_64_only,
189 r11b: reg_byte = ["r11b"] % x86_64_only,
190 r12b: reg_byte = ["r12b"] % x86_64_only,
191 r13b: reg_byte = ["r13b"] % x86_64_only,
192 r14b: reg_byte = ["r14b"] % x86_64_only,
193 r15b: reg_byte = ["r15b"] % x86_64_only,
194 xmm0: xmm_reg = ["xmm0"],
195 xmm1: xmm_reg = ["xmm1"],
196 xmm2: xmm_reg = ["xmm2"],
197 xmm3: xmm_reg = ["xmm3"],
198 xmm4: xmm_reg = ["xmm4"],
199 xmm5: xmm_reg = ["xmm5"],
200 xmm6: xmm_reg = ["xmm6"],
201 xmm7: xmm_reg = ["xmm7"],
202 xmm8: xmm_reg = ["xmm8"] % x86_64_only,
203 xmm9: xmm_reg = ["xmm9"] % x86_64_only,
204 xmm10: xmm_reg = ["xmm10"] % x86_64_only,
205 xmm11: xmm_reg = ["xmm11"] % x86_64_only,
206 xmm12: xmm_reg = ["xmm12"] % x86_64_only,
207 xmm13: xmm_reg = ["xmm13"] % x86_64_only,
208 xmm14: xmm_reg = ["xmm14"] % x86_64_only,
209 xmm15: xmm_reg = ["xmm15"] % x86_64_only,
210 ymm0: ymm_reg = ["ymm0"],
211 ymm1: ymm_reg = ["ymm1"],
212 ymm2: ymm_reg = ["ymm2"],
213 ymm3: ymm_reg = ["ymm3"],
214 ymm4: ymm_reg = ["ymm4"],
215 ymm5: ymm_reg = ["ymm5"],
216 ymm6: ymm_reg = ["ymm6"],
217 ymm7: ymm_reg = ["ymm7"],
218 ymm8: ymm_reg = ["ymm8"] % x86_64_only,
219 ymm9: ymm_reg = ["ymm9"] % x86_64_only,
220 ymm10: ymm_reg = ["ymm10"] % x86_64_only,
221 ymm11: ymm_reg = ["ymm11"] % x86_64_only,
222 ymm12: ymm_reg = ["ymm12"] % x86_64_only,
223 ymm13: ymm_reg = ["ymm13"] % x86_64_only,
224 ymm14: ymm_reg = ["ymm14"] % x86_64_only,
225 ymm15: ymm_reg = ["ymm15"] % x86_64_only,
226 zmm0: zmm_reg = ["zmm0"],
227 zmm1: zmm_reg = ["zmm1"],
228 zmm2: zmm_reg = ["zmm2"],
229 zmm3: zmm_reg = ["zmm3"],
230 zmm4: zmm_reg = ["zmm4"],
231 zmm5: zmm_reg = ["zmm5"],
232 zmm6: zmm_reg = ["zmm6"],
233 zmm7: zmm_reg = ["zmm7"],
234 zmm8: zmm_reg = ["zmm8"] % x86_64_only,
235 zmm9: zmm_reg = ["zmm9"] % x86_64_only,
236 zmm10: zmm_reg = ["zmm10"] % x86_64_only,
237 zmm11: zmm_reg = ["zmm11"] % x86_64_only,
238 zmm12: zmm_reg = ["zmm12"] % x86_64_only,
239 zmm13: zmm_reg = ["zmm13"] % x86_64_only,
240 zmm14: zmm_reg = ["zmm14"] % x86_64_only,
241 zmm15: zmm_reg = ["zmm15"] % x86_64_only,
242 zmm16: zmm_reg = ["zmm16", "xmm16", "ymm16"] % x86_64_only,
243 zmm17: zmm_reg = ["zmm17", "xmm17", "ymm17"] % x86_64_only,
244 zmm18: zmm_reg = ["zmm18", "xmm18", "ymm18"] % x86_64_only,
245 zmm19: zmm_reg = ["zmm19", "xmm19", "ymm19"] % x86_64_only,
246 zmm20: zmm_reg = ["zmm20", "xmm20", "ymm20"] % x86_64_only,
247 zmm21: zmm_reg = ["zmm21", "xmm21", "ymm21"] % x86_64_only,
248 zmm22: zmm_reg = ["zmm22", "xmm22", "ymm22"] % x86_64_only,
249 zmm23: zmm_reg = ["zmm23", "xmm23", "ymm23"] % x86_64_only,
250 zmm24: zmm_reg = ["zmm24", "xmm24", "ymm24"] % x86_64_only,
251 zmm25: zmm_reg = ["zmm25", "xmm25", "ymm25"] % x86_64_only,
252 zmm26: zmm_reg = ["zmm26", "xmm26", "ymm26"] % x86_64_only,
253 zmm27: zmm_reg = ["zmm27", "xmm27", "ymm27"] % x86_64_only,
254 zmm28: zmm_reg = ["zmm28", "xmm28", "ymm28"] % x86_64_only,
255 zmm29: zmm_reg = ["zmm29", "xmm29", "ymm29"] % x86_64_only,
256 zmm30: zmm_reg = ["zmm30", "xmm30", "ymm30"] % x86_64_only,
257 zmm31: zmm_reg = ["zmm31", "xmm31", "ymm31"] % x86_64_only,
258 k1: kreg = ["k1"],
259 k2: kreg = ["k2"],
260 k3: kreg = ["k3"],
261 k4: kreg = ["k4"],
262 k5: kreg = ["k5"],
263 k6: kreg = ["k6"],
264 k7: kreg = ["k7"],
265 #error = ["bp", "bpl", "ebp", "rbp"] =>
266 "the frame pointer cannot be used as an operand for inline asm",
267 #error = ["sp", "spl", "esp", "rsp"] =>
268 "the stack pointer cannot be used as an operand for inline asm",
269 #error = ["ip", "eip", "rip"] =>
270 "the instruction pointer cannot be used as an operand for inline asm",
271 #error = ["st", "st(0)", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)"] =>
272 "x87 registers are not currently supported as operands for inline asm",
273 #error = ["mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7"] =>
274 "MMX registers are not currently supported as operands for inline asm",
275 #error = ["k0"] =>
276 "the k0 AVX mask register cannot be used as an operand for inline asm",
277 }
278 }
279
280 impl X86InlineAsmReg {
281 pub fn emit(
282 self,
283 out: &mut dyn fmt::Write,
284 arch: InlineAsmArch,
285 modifier: Option<char>,
286 ) -> fmt::Result {
287 let reg_default_modifier = match arch {
288 InlineAsmArch::X86 => 'e',
289 InlineAsmArch::X86_64 => 'r',
290 _ => unreachable!(),
291 };
292 if self as u32 <= Self::dx as u32 {
293 let root = ['a', 'b', 'c', 'd'][self as usize - Self::ax as usize];
294 match modifier.unwrap_or(reg_default_modifier) {
295 'l' => write!(out, "{}l", root),
296 'h' => write!(out, "{}h", root),
297 'x' => write!(out, "{}x", root),
298 'e' => write!(out, "e{}x", root),
299 'r' => write!(out, "r{}x", root),
300 _ => unreachable!(),
301 }
302 } else if self as u32 <= Self::di as u32 {
303 let root = self.name();
304 match modifier.unwrap_or(reg_default_modifier) {
305 'l' => write!(out, "{}l", root),
306 'x' => write!(out, "{}", root),
307 'e' => write!(out, "e{}", root),
308 'r' => write!(out, "r{}", root),
309 _ => unreachable!(),
310 }
311 } else if self as u32 <= Self::r15 as u32 {
312 let root = self.name();
313 match modifier.unwrap_or(reg_default_modifier) {
314 'l' => write!(out, "{}b", root),
315 'x' => write!(out, "{}w", root),
316 'e' => write!(out, "{}d", root),
317 'r' => out.write_str(root),
318 _ => unreachable!(),
319 }
320 } else if self as u32 <= Self::r15b as u32 {
321 out.write_str(self.name())
322 } else if self as u32 <= Self::xmm15 as u32 {
323 let prefix = modifier.unwrap_or('x');
324 let index = self as u32 - Self::xmm0 as u32;
325 write!(out, "{}{}", prefix, index)
326 } else if self as u32 <= Self::ymm15 as u32 {
327 let prefix = modifier.unwrap_or('y');
328 let index = self as u32 - Self::ymm0 as u32;
329 write!(out, "{}{}", prefix, index)
330 } else if self as u32 <= Self::zmm31 as u32 {
331 let prefix = modifier.unwrap_or('z');
332 let index = self as u32 - Self::zmm0 as u32;
333 write!(out, "{}{}", prefix, index)
334 } else {
335 out.write_str(self.name())
336 }
337 }
338
339 pub fn overlapping_regs(self, mut cb: impl FnMut(X86InlineAsmReg)) {
340 macro_rules! reg_conflicts {
341 (
342 $(
343 $w:ident : $l:ident $h:ident
344 ),*;
345 $(
346 $w2:ident : $l2:ident
347 ),*;
348 $(
349 $x:ident : $y:ident : $z:ident
350 ),*;
351 ) => {
352 match self {
353 $(
354 Self::$w => {
355 cb(Self::$w);
356 cb(Self::$l);
357 cb(Self::$h);
358 }
359 Self::$l => {
360 cb(Self::$w);
361 cb(Self::$l);
362 }
363 Self::$h => {
364 cb(Self::$w);
365 cb(Self::$h);
366 }
367 )*
368 $(
369 Self::$w2 | Self::$l2 => {
370 cb(Self::$w2);
371 cb(Self::$l2);
372 }
373 )*
374 $(
375 Self::$x | Self::$y | Self::$z => {
376 cb(Self::$x);
377 cb(Self::$y);
378 cb(Self::$z);
379 }
380 )*
381 r => cb(r),
382 }
383 };
384 }
385
386 // XMM*, YMM* and ZMM* are all different views of the same register.
387 //
388 // See section 15.5 of the combined Intel® 64 and IA-32 Architectures
389 // Software Developer’s Manual for more details.
390 //
391 // We don't need to specify conflicts for [x,y,z]mm[16-31] since these
392 // registers are only available with AVX-512, so we just specify them
393 // as aliases directly.
394 reg_conflicts! {
395 ax : al ah,
396 bx : bl bh,
397 cx : cl ch,
398 dx : dl dh;
399 si : sil,
400 di : dil,
401 r8 : r8b,
402 r9 : r9b,
403 r10 : r10b,
404 r11 : r11b,
405 r12 : r12b,
406 r13 : r13b,
407 r14 : r14b,
408 r15 : r15b;
409 xmm0 : ymm0 : zmm0,
410 xmm1 : ymm1 : zmm1,
411 xmm2 : ymm2 : zmm2,
412 xmm3 : ymm3 : zmm3,
413 xmm4 : ymm4 : zmm4,
414 xmm5 : ymm5 : zmm5,
415 xmm6 : ymm6 : zmm6,
416 xmm7 : ymm7 : zmm7,
417 xmm8 : ymm8 : zmm8,
418 xmm9 : ymm9 : zmm9,
419 xmm10 : ymm10 : zmm10,
420 xmm11 : ymm11 : zmm11,
421 xmm12 : ymm12 : zmm12,
422 xmm13 : ymm13 : zmm13,
423 xmm14 : ymm14 : zmm14,
424 xmm15 : ymm15 : zmm15;
425 }
426 }
427 }