]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_ast_lowering/src/asm.rs
New upstream version 1.68.2+dfsg1
[rustc.git] / compiler / rustc_ast_lowering / src / asm.rs
CommitLineData
923072b8 1use crate::{ImplTraitContext, ImplTraitPosition, ParamMode, ResolverAstLoweringExt};
04454e1e 2
f2b60f7d
FG
3use super::errors::{
4 AbiSpecifiedMultipleTimes, AttSyntaxOnlyX86, ClobberAbiNotSupported,
5 InlineAsmUnsupportedTarget, InvalidAbiClobberAbi, InvalidAsmTemplateModifierConst,
6 InvalidAsmTemplateModifierRegClass, InvalidAsmTemplateModifierRegClassSub,
7 InvalidAsmTemplateModifierSym, InvalidRegister, InvalidRegisterClass, RegisterClassOnlyClobber,
8 RegisterConflict,
9};
17df50a5
XL
10use super::LoweringContext;
11
04454e1e 12use rustc_ast::ptr::P;
17df50a5 13use rustc_ast::*;
487cf647 14use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
17df50a5 15use rustc_hir as hir;
04454e1e
FG
16use rustc_hir::def::{DefKind, Res};
17use rustc_hir::definitions::DefPathData;
3c0e092e 18use rustc_session::parse::feature_err;
923072b8 19use rustc_span::{sym, Span};
17df50a5
XL
20use rustc_target::asm;
21use std::collections::hash_map::Entry;
22use std::fmt::Write;
23
24impl<'a, 'hir> LoweringContext<'a, 'hir> {
923072b8
FG
25 pub(crate) fn lower_inline_asm(
26 &mut self,
27 sp: Span,
28 asm: &InlineAsm,
29 ) -> &'hir hir::InlineAsm<'hir> {
3c0e092e
XL
30 // Rustdoc needs to support asm! from foreign architectures: don't try
31 // lowering the register constraints in this case.
064997fb
FG
32 let asm_arch =
33 if self.tcx.sess.opts.actually_rustdoc { None } else { self.tcx.sess.asm_arch };
34 if asm_arch.is_none() && !self.tcx.sess.opts.actually_rustdoc {
f2b60f7d 35 self.tcx.sess.emit_err(InlineAsmUnsupportedTarget { span: sp });
17df50a5 36 }
3c0e092e
XL
37 if let Some(asm_arch) = asm_arch {
38 // Inline assembly is currently only stable for these architectures.
39 let is_stable = matches!(
40 asm_arch,
41 asm::InlineAsmArch::X86
42 | asm::InlineAsmArch::X86_64
43 | asm::InlineAsmArch::Arm
44 | asm::InlineAsmArch::AArch64
45 | asm::InlineAsmArch::RiscV32
46 | asm::InlineAsmArch::RiscV64
47 );
064997fb 48 if !is_stable && !self.tcx.features().asm_experimental_arch {
3c0e092e 49 feature_err(
064997fb 50 &self.tcx.sess.parse_sess,
3c0e092e
XL
51 sym::asm_experimental_arch,
52 sp,
53 "inline assembly is not stable yet on this architecture",
54 )
55 .emit();
56 }
57 }
17df50a5
XL
58 if asm.options.contains(InlineAsmOptions::ATT_SYNTAX)
59 && !matches!(asm_arch, Some(asm::InlineAsmArch::X86 | asm::InlineAsmArch::X86_64))
064997fb 60 && !self.tcx.sess.opts.actually_rustdoc
17df50a5 61 {
f2b60f7d 62 self.tcx.sess.emit_err(AttSyntaxOnlyX86 { span: sp });
17df50a5 63 }
064997fb 64 if asm.options.contains(InlineAsmOptions::MAY_UNWIND) && !self.tcx.features().asm_unwind {
a2a8927a 65 feature_err(
064997fb 66 &self.tcx.sess.parse_sess,
a2a8927a
XL
67 sym::asm_unwind,
68 sp,
69 "the `may_unwind` option is unstable",
70 )
71 .emit();
72 }
17df50a5 73
487cf647 74 let mut clobber_abis = FxIndexMap::default();
94222f64 75 if let Some(asm_arch) = asm_arch {
3c0e092e 76 for (abi_name, abi_span) in &asm.clobber_abis {
064997fb 77 match asm::InlineAsmClobberAbi::parse(asm_arch, &self.tcx.sess.target, *abi_name) {
3c0e092e
XL
78 Ok(abi) => {
79 // If the abi was already in the list, emit an error
80 match clobber_abis.get(&abi) {
81 Some((prev_name, prev_sp)) => {
3c0e092e
XL
82 // Multiple different abi names may actually be the same ABI
83 // If the specified ABIs are not the same name, alert the user that they resolve to the same ABI
064997fb 84 let source_map = self.tcx.sess.source_map();
f2b60f7d
FG
85 let equivalent = (source_map.span_to_snippet(*prev_sp)
86 != source_map.span_to_snippet(*abi_span))
87 .then_some(());
3c0e092e 88
f2b60f7d
FG
89 self.tcx.sess.emit_err(AbiSpecifiedMultipleTimes {
90 abi_span: *abi_span,
91 prev_name: *prev_name,
92 prev_span: *prev_sp,
93 equivalent,
94 });
3c0e092e
XL
95 }
96 None => {
f2b60f7d 97 clobber_abis.insert(abi, (*abi_name, *abi_span));
3c0e092e
XL
98 }
99 }
100 }
94222f64 101 Err(&[]) => {
f2b60f7d 102 self.tcx.sess.emit_err(ClobberAbiNotSupported { abi_span: *abi_span });
94222f64
XL
103 }
104 Err(supported_abis) => {
94222f64
XL
105 let mut abis = format!("`{}`", supported_abis[0]);
106 for m in &supported_abis[1..] {
9c376795 107 let _ = write!(abis, ", `{m}`");
94222f64 108 }
f2b60f7d
FG
109 self.tcx.sess.emit_err(InvalidAbiClobberAbi {
110 abi_span: *abi_span,
111 supported_abis: abis,
112 });
94222f64
XL
113 }
114 }
115 }
116 }
117
17df50a5
XL
118 // Lower operands to HIR. We use dummy register classes if an error
119 // occurs during lowering because we still need to be able to produce a
120 // valid HIR.
064997fb 121 let sess = self.tcx.sess;
94222f64 122 let mut operands: Vec<_> = asm
17df50a5
XL
123 .operands
124 .iter()
125 .map(|(op, op_sp)| {
487cf647 126 let lower_reg = |&reg: &_| match reg {
f2b60f7d 127 InlineAsmRegOrRegClass::Reg(reg) => {
17df50a5 128 asm::InlineAsmRegOrRegClass::Reg(if let Some(asm_arch) = asm_arch {
f2b60f7d
FG
129 asm::InlineAsmReg::parse(asm_arch, reg).unwrap_or_else(|error| {
130 sess.emit_err(InvalidRegister { op_span: *op_sp, reg, error });
17df50a5
XL
131 asm::InlineAsmReg::Err
132 })
133 } else {
134 asm::InlineAsmReg::Err
135 })
136 }
f2b60f7d 137 InlineAsmRegOrRegClass::RegClass(reg_class) => {
17df50a5 138 asm::InlineAsmRegOrRegClass::RegClass(if let Some(asm_arch) = asm_arch {
f2b60f7d
FG
139 asm::InlineAsmRegClass::parse(asm_arch, reg_class).unwrap_or_else(
140 |error| {
141 sess.emit_err(InvalidRegisterClass {
142 op_span: *op_sp,
143 reg_class,
144 error,
145 });
146 asm::InlineAsmRegClass::Err
147 },
148 )
17df50a5
XL
149 } else {
150 asm::InlineAsmRegClass::Err
151 })
152 }
153 };
154
487cf647
FG
155 let op = match op {
156 InlineAsmOperand::In { reg, expr } => hir::InlineAsmOperand::In {
5e7ed085 157 reg: lower_reg(reg),
f2b60f7d 158 expr: self.lower_expr(expr),
17df50a5 159 },
487cf647 160 InlineAsmOperand::Out { reg, late, expr } => hir::InlineAsmOperand::Out {
5e7ed085 161 reg: lower_reg(reg),
487cf647 162 late: *late,
f2b60f7d 163 expr: expr.as_ref().map(|expr| self.lower_expr(expr)),
17df50a5 164 },
487cf647
FG
165 InlineAsmOperand::InOut { reg, late, expr } => hir::InlineAsmOperand::InOut {
166 reg: lower_reg(reg),
167 late: *late,
168 expr: self.lower_expr(expr),
169 },
170 InlineAsmOperand::SplitInOut { reg, late, in_expr, out_expr } => {
17df50a5 171 hir::InlineAsmOperand::SplitInOut {
5e7ed085 172 reg: lower_reg(reg),
487cf647 173 late: *late,
f2b60f7d
FG
174 in_expr: self.lower_expr(in_expr),
175 out_expr: out_expr.as_ref().map(|expr| self.lower_expr(expr)),
17df50a5
XL
176 }
177 }
487cf647 178 InlineAsmOperand::Const { anon_const } => {
064997fb 179 if !self.tcx.features().asm_const {
3c0e092e 180 feature_err(
064997fb 181 &sess.parse_sess,
3c0e092e
XL
182 sym::asm_const,
183 *op_sp,
184 "const operands for inline assembly are unstable",
185 )
186 .emit();
187 }
188 hir::InlineAsmOperand::Const {
189 anon_const: self.lower_anon_const(anon_const),
190 }
191 }
487cf647 192 InlineAsmOperand::Sym { sym } => {
04454e1e
FG
193 let static_def_id = self
194 .resolver
195 .get_partial_res(sym.id)
2b03887a
FG
196 .and_then(|res| res.full_res())
197 .and_then(|res| match res {
198 Res::Def(DefKind::Static(_), def_id) => Some(def_id),
199 _ => None,
04454e1e
FG
200 });
201
202 if let Some(def_id) = static_def_id {
203 let path = self.lower_qpath(
204 sym.id,
205 &sym.qself,
206 &sym.path,
207 ParamMode::Optional,
f2b60f7d 208 &ImplTraitContext::Disallowed(ImplTraitPosition::Path),
04454e1e
FG
209 );
210 hir::InlineAsmOperand::SymStatic { path, def_id }
211 } else {
212 // Replace the InlineAsmSym AST node with an
213 // Expr using the name node id.
214 let expr = Expr {
215 id: sym.id,
216 kind: ExprKind::Path(sym.qself.clone(), sym.path.clone()),
217 span: *op_sp,
218 attrs: AttrVec::new(),
219 tokens: None,
220 };
221
222 // Wrap the expression in an AnonConst.
223 let parent_def_id = self.current_hir_id_owner;
923072b8 224 let node_id = self.next_node_id();
487cf647
FG
225 self.create_def(
226 parent_def_id.def_id,
227 node_id,
228 DefPathData::AnonConst,
229 *op_sp,
230 );
04454e1e
FG
231 let anon_const = AnonConst { id: node_id, value: P(expr) };
232 hir::InlineAsmOperand::SymFn {
233 anon_const: self.lower_anon_const(&anon_const),
234 }
235 }
17df50a5
XL
236 }
237 };
94222f64 238 (op, self.lower_span(*op_sp))
17df50a5
XL
239 })
240 .collect();
241
242 // Validate template modifiers against the register classes for the operands
243 for p in &asm.template {
244 if let InlineAsmTemplatePiece::Placeholder {
245 operand_idx,
246 modifier: Some(modifier),
247 span: placeholder_span,
248 } = *p
249 {
250 let op_sp = asm.operands[operand_idx].1;
251 match &operands[operand_idx].0 {
252 hir::InlineAsmOperand::In { reg, .. }
253 | hir::InlineAsmOperand::Out { reg, .. }
254 | hir::InlineAsmOperand::InOut { reg, .. }
255 | hir::InlineAsmOperand::SplitInOut { reg, .. } => {
256 let class = reg.reg_class();
257 if class == asm::InlineAsmRegClass::Err {
258 continue;
259 }
260 let valid_modifiers = class.valid_modifiers(asm_arch.unwrap());
261 if !valid_modifiers.contains(&modifier) {
f2b60f7d 262 let sub = if !valid_modifiers.is_empty() {
17df50a5
XL
263 let mut mods = format!("`{}`", valid_modifiers[0]);
264 for m in &valid_modifiers[1..] {
9c376795 265 let _ = write!(mods, ", `{m}`");
17df50a5 266 }
f2b60f7d
FG
267 InvalidAsmTemplateModifierRegClassSub::SupportModifier {
268 class_name: class.name(),
269 modifiers: mods,
270 }
17df50a5 271 } else {
f2b60f7d
FG
272 InvalidAsmTemplateModifierRegClassSub::DoesNotSupportModifier {
273 class_name: class.name(),
274 }
275 };
276 sess.emit_err(InvalidAsmTemplateModifierRegClass {
277 placeholder_span,
278 op_span: op_sp,
279 sub,
280 });
17df50a5
XL
281 }
282 }
283 hir::InlineAsmOperand::Const { .. } => {
f2b60f7d 284 sess.emit_err(InvalidAsmTemplateModifierConst {
17df50a5 285 placeholder_span,
f2b60f7d
FG
286 op_span: op_sp,
287 });
17df50a5 288 }
04454e1e
FG
289 hir::InlineAsmOperand::SymFn { .. }
290 | hir::InlineAsmOperand::SymStatic { .. } => {
f2b60f7d 291 sess.emit_err(InvalidAsmTemplateModifierSym {
17df50a5 292 placeholder_span,
f2b60f7d
FG
293 op_span: op_sp,
294 });
17df50a5
XL
295 }
296 }
297 }
298 }
299
300 let mut used_input_regs = FxHashMap::default();
301 let mut used_output_regs = FxHashMap::default();
c295e0f8 302
17df50a5
XL
303 for (idx, &(ref op, op_sp)) in operands.iter().enumerate() {
304 if let Some(reg) = op.reg() {
17df50a5
XL
305 let reg_class = reg.reg_class();
306 if reg_class == asm::InlineAsmRegClass::Err {
307 continue;
308 }
309
136023e0
XL
310 // Some register classes can only be used as clobbers. This
311 // means that we disallow passing a value in/out of the asm and
312 // require that the operand name an explicit register, not a
313 // register class.
3c0e092e 314 if reg_class.is_clobber_only(asm_arch.unwrap()) && !op.is_clobber() {
f2b60f7d
FG
315 sess.emit_err(RegisterClassOnlyClobber {
316 op_span: op_sp,
317 reg_class_name: reg_class.name(),
318 });
136023e0
XL
319 continue;
320 }
321
17df50a5
XL
322 // Check for conflicts between explicit register operands.
323 if let asm::InlineAsmRegOrRegClass::Reg(reg) = reg {
324 let (input, output) = match op {
325 hir::InlineAsmOperand::In { .. } => (true, false),
326
327 // Late output do not conflict with inputs, but normal outputs do
328 hir::InlineAsmOperand::Out { late, .. } => (!late, true),
329
330 hir::InlineAsmOperand::InOut { .. }
331 | hir::InlineAsmOperand::SplitInOut { .. } => (true, true),
332
04454e1e
FG
333 hir::InlineAsmOperand::Const { .. }
334 | hir::InlineAsmOperand::SymFn { .. }
335 | hir::InlineAsmOperand::SymStatic { .. } => {
17df50a5
XL
336 unreachable!()
337 }
338 };
339
340 // Flag to output the error only once per operand
341 let mut skip = false;
342 reg.overlapping_regs(|r| {
343 let mut check = |used_regs: &mut FxHashMap<asm::InlineAsmReg, usize>,
344 input| {
345 match used_regs.entry(r) {
346 Entry::Occupied(o) => {
347 if skip {
348 return;
349 }
350 skip = true;
351
352 let idx2 = *o.get();
487cf647 353 let (ref op2, op_sp2) = operands[idx2];
5e7ed085
FG
354 let Some(asm::InlineAsmRegOrRegClass::Reg(reg2)) = op2.reg() else {
355 unreachable!();
17df50a5
XL
356 };
357
f2b60f7d 358 let in_out = match (op, op2) {
17df50a5
XL
359 (
360 hir::InlineAsmOperand::In { .. },
361 hir::InlineAsmOperand::Out { late, .. },
362 )
363 | (
364 hir::InlineAsmOperand::Out { late, .. },
365 hir::InlineAsmOperand::In { .. },
366 ) => {
367 assert!(!*late);
368 let out_op_sp = if input { op_sp2 } else { op_sp };
f2b60f7d
FG
369 Some(out_op_sp)
370 },
371 _ => None,
372 };
17df50a5 373
f2b60f7d
FG
374 sess.emit_err(RegisterConflict {
375 op_span1: op_sp,
376 op_span2: op_sp2,
377 reg1_name: reg.name(),
378 reg2_name: reg2.name(),
379 in_out
380 });
17df50a5
XL
381 }
382 Entry::Vacant(v) => {
5099ac24
FG
383 if r == reg {
384 v.insert(idx);
385 }
17df50a5
XL
386 }
387 }
388 };
389 if input {
390 check(&mut used_input_regs, true);
391 }
392 if output {
393 check(&mut used_output_regs, false);
394 }
395 });
396 }
397 }
398 }
399
94222f64
XL
400 // If a clobber_abi is specified, add the necessary clobbers to the
401 // operands list.
3c0e092e
XL
402 let mut clobbered = FxHashSet::default();
403 for (abi, (_, abi_span)) in clobber_abis {
94222f64 404 for &clobber in abi.clobbered_regs() {
3c0e092e
XL
405 // Don't emit a clobber for a register already clobbered
406 if clobbered.contains(&clobber) {
407 continue;
408 }
409
94222f64
XL
410 let mut output_used = false;
411 clobber.overlapping_regs(|reg| {
412 if used_output_regs.contains_key(&reg) {
413 output_used = true;
414 }
415 });
416
417 if !output_used {
418 operands.push((
419 hir::InlineAsmOperand::Out {
420 reg: asm::InlineAsmRegOrRegClass::Reg(clobber),
421 late: true,
422 expr: None,
423 },
424 self.lower_span(abi_span),
425 ));
3c0e092e 426 clobbered.insert(clobber);
94222f64
XL
427 }
428 }
429 }
430
17df50a5
XL
431 let operands = self.arena.alloc_from_iter(operands);
432 let template = self.arena.alloc_from_iter(asm.template.iter().cloned());
94222f64
XL
433 let template_strs = self.arena.alloc_from_iter(
434 asm.template_strs
435 .iter()
436 .map(|(sym, snippet, span)| (*sym, *snippet, self.lower_span(*span))),
437 );
438 let line_spans =
439 self.arena.alloc_from_iter(asm.line_spans.iter().map(|span| self.lower_span(*span)));
440 let hir_asm =
441 hir::InlineAsm { template, template_strs, operands, options: asm.options, line_spans };
17df50a5
XL
442 self.arena.alloc(hir_asm)
443 }
444}