]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_codegen_cranelift/src/inline_asm.rs
New upstream version 1.61.0+dfsg1
[rustc.git] / compiler / rustc_codegen_cranelift / src / inline_asm.rs
CommitLineData
a2a8927a 1//! Codegen of `asm!` invocations.
29967ef6
XL
2
3use crate::prelude::*;
4
5use std::fmt::Write;
6
7use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
8use rustc_middle::mir::InlineAsmOperand;
5099ac24 9use rustc_span::sym;
29967ef6
XL
10use rustc_target::asm::*;
11
12pub(crate) fn codegen_inline_asm<'tcx>(
6a06907d 13 fx: &mut FunctionCx<'_, '_, 'tcx>,
29967ef6
XL
14 _span: Span,
15 template: &[InlineAsmTemplatePiece],
16 operands: &[InlineAsmOperand<'tcx>],
17 options: InlineAsmOptions,
18) {
19 // FIXME add .eh_frame unwind info directives
20
a2a8927a 21 if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
cdc7bbd5
XL
22 let true_ = fx.bcx.ins().iconst(types::I32, 1);
23 fx.bcx.ins().trapnz(true_, TrapCode::User(1));
24 return;
17df50a5
XL
25 } else if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
26 && matches!(
27 template[1],
28 InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ }
29 )
30 && template[2] == InlineAsmTemplatePiece::String("\n".to_string())
31 && template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
32 && template[4] == InlineAsmTemplatePiece::String("\n".to_string())
33 && template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
34 && matches!(
35 template[6],
36 InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ }
37 )
38 {
39 assert_eq!(operands.len(), 4);
40 let (leaf, eax_place) = match operands[1] {
41 InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
a2a8927a
XL
42 assert_eq!(
43 reg,
44 InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax))
45 );
17df50a5
XL
46 (
47 crate::base::codegen_operand(fx, in_value).load_scalar(fx),
48 crate::base::codegen_place(fx, out_place.unwrap()),
49 )
50 }
51 _ => unreachable!(),
52 };
53 let ebx_place = match operands[0] {
54 InlineAsmOperand::Out { reg, late: true, place } => {
55 assert_eq!(
56 reg,
57 InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
58 X86InlineAsmRegClass::reg
59 ))
60 );
61 crate::base::codegen_place(fx, place.unwrap())
62 }
63 _ => unreachable!(),
64 };
65 let (sub_leaf, ecx_place) = match operands[2] {
66 InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
a2a8927a
XL
67 assert_eq!(
68 reg,
69 InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx))
70 );
17df50a5
XL
71 (
72 crate::base::codegen_operand(fx, in_value).load_scalar(fx),
73 crate::base::codegen_place(fx, out_place.unwrap()),
74 )
75 }
76 _ => unreachable!(),
77 };
78 let edx_place = match operands[3] {
79 InlineAsmOperand::Out { reg, late: true, place } => {
a2a8927a
XL
80 assert_eq!(
81 reg,
82 InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx))
83 );
17df50a5
XL
84 crate::base::codegen_place(fx, place.unwrap())
85 }
86 _ => unreachable!(),
87 };
88
89 let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
90
91 eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
92 ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
93 ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
94 edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
95 return;
96 } else if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
97 // ___chkstk, ___chkstk_ms and __alloca are only used on Windows
98 crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
99 } else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
100 crate::trap::trap_unimplemented(fx, "Alloca is not supported");
29967ef6
XL
101 }
102
29967ef6
XL
103 let mut inputs = Vec::new();
104 let mut outputs = Vec::new();
105
a2a8927a
XL
106 let mut asm_gen = InlineAssemblyGenerator {
107 tcx: fx.tcx,
108 arch: fx.tcx.sess.asm_arch.unwrap(),
5e7ed085 109 enclosing_def_id: fx.instance.def_id(),
a2a8927a
XL
110 template,
111 operands,
112 options,
113 registers: Vec::new(),
114 stack_slots_clobber: Vec::new(),
115 stack_slots_input: Vec::new(),
116 stack_slots_output: Vec::new(),
117 stack_slot_size: Size::from_bytes(0),
29967ef6 118 };
a2a8927a
XL
119 asm_gen.allocate_registers();
120 asm_gen.allocate_stack_slots();
121
122 let inline_asm_index = fx.cx.inline_asm_index.get();
123 fx.cx.inline_asm_index.set(inline_asm_index + 1);
124 let asm_name = format!(
125 "__inline_asm_{}_n{}",
126 fx.cx.cgu_name.as_str().replace('.', "__").replace('-', "_"),
127 inline_asm_index
128 );
129
130 let generated_asm = asm_gen.generate_asm_wrapper(&asm_name);
131 fx.cx.global_asm.push_str(&generated_asm);
29967ef6 132
a2a8927a 133 for (i, operand) in operands.iter().enumerate() {
29967ef6 134 match *operand {
a2a8927a 135 InlineAsmOperand::In { reg: _, ref value } => {
29967ef6 136 inputs.push((
a2a8927a 137 asm_gen.stack_slots_input[i].unwrap(),
29967ef6
XL
138 crate::base::codegen_operand(fx, value).load_scalar(fx),
139 ));
140 }
a2a8927a 141 InlineAsmOperand::Out { reg: _, late: _, place } => {
29967ef6
XL
142 if let Some(place) = place {
143 outputs.push((
a2a8927a 144 asm_gen.stack_slots_output[i].unwrap(),
29967ef6
XL
145 crate::base::codegen_place(fx, place),
146 ));
147 }
148 }
a2a8927a 149 InlineAsmOperand::InOut { reg: _, late: _, ref in_value, out_place } => {
29967ef6 150 inputs.push((
a2a8927a 151 asm_gen.stack_slots_input[i].unwrap(),
29967ef6
XL
152 crate::base::codegen_operand(fx, in_value).load_scalar(fx),
153 ));
154 if let Some(out_place) = out_place {
155 outputs.push((
a2a8927a 156 asm_gen.stack_slots_output[i].unwrap(),
29967ef6
XL
157 crate::base::codegen_place(fx, out_place),
158 ));
159 }
160 }
161 InlineAsmOperand::Const { value: _ } => todo!(),
162 InlineAsmOperand::SymFn { value: _ } => todo!(),
163 InlineAsmOperand::SymStatic { def_id: _ } => todo!(),
164 }
165 }
166
a2a8927a 167 call_inline_asm(fx, &asm_name, asm_gen.stack_slot_size, inputs, outputs);
29967ef6
XL
168}
169
a2a8927a
XL
170struct InlineAssemblyGenerator<'a, 'tcx> {
171 tcx: TyCtxt<'tcx>,
29967ef6 172 arch: InlineAsmArch,
5e7ed085 173 enclosing_def_id: DefId,
a2a8927a
XL
174 template: &'a [InlineAsmTemplatePiece],
175 operands: &'a [InlineAsmOperand<'tcx>],
29967ef6 176 options: InlineAsmOptions,
a2a8927a
XL
177 registers: Vec<Option<InlineAsmReg>>,
178 stack_slots_clobber: Vec<Option<Size>>,
179 stack_slots_input: Vec<Option<Size>>,
180 stack_slots_output: Vec<Option<Size>>,
181 stack_slot_size: Size,
182}
183
184impl<'tcx> InlineAssemblyGenerator<'_, 'tcx> {
185 fn allocate_registers(&mut self) {
186 let sess = self.tcx.sess;
5e7ed085
FG
187 let map = allocatable_registers(
188 self.arch,
189 sess.relocation_model(),
190 self.tcx.asm_target_features(self.enclosing_def_id),
191 &sess.target,
192 );
a2a8927a
XL
193 let mut allocated = FxHashMap::<_, (bool, bool)>::default();
194 let mut regs = vec![None; self.operands.len()];
195
196 // Add explicit registers to the allocated set.
197 for (i, operand) in self.operands.iter().enumerate() {
198 match *operand {
199 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
200 regs[i] = Some(reg);
201 allocated.entry(reg).or_default().0 = true;
202 }
203 InlineAsmOperand::Out {
204 reg: InlineAsmRegOrRegClass::Reg(reg), late: true, ..
205 } => {
206 regs[i] = Some(reg);
207 allocated.entry(reg).or_default().1 = true;
208 }
209 InlineAsmOperand::Out { reg: InlineAsmRegOrRegClass::Reg(reg), .. }
210 | InlineAsmOperand::InOut { reg: InlineAsmRegOrRegClass::Reg(reg), .. } => {
211 regs[i] = Some(reg);
212 allocated.insert(reg, (true, true));
213 }
214 _ => (),
215 }
29967ef6 216 }
29967ef6 217
a2a8927a
XL
218 // Allocate out/inout/inlateout registers first because they are more constrained.
219 for (i, operand) in self.operands.iter().enumerate() {
220 match *operand {
221 InlineAsmOperand::Out {
222 reg: InlineAsmRegOrRegClass::RegClass(class),
223 late: false,
224 ..
225 }
226 | InlineAsmOperand::InOut {
227 reg: InlineAsmRegOrRegClass::RegClass(class), ..
228 } => {
229 let mut alloc_reg = None;
230 for &reg in &map[&class] {
231 let mut used = false;
232 reg.overlapping_regs(|r| {
233 if allocated.contains_key(&r) {
234 used = true;
235 }
236 });
237
238 if !used {
239 alloc_reg = Some(reg);
240 break;
241 }
242 }
243
244 let reg = alloc_reg.expect("cannot allocate registers");
245 regs[i] = Some(reg);
246 allocated.insert(reg, (true, true));
247 }
248 _ => (),
249 }
250 }
251
252 // Allocate in/lateout.
253 for (i, operand) in self.operands.iter().enumerate() {
254 match *operand {
255 InlineAsmOperand::In { reg: InlineAsmRegOrRegClass::RegClass(class), .. } => {
256 let mut alloc_reg = None;
257 for &reg in &map[&class] {
258 let mut used = false;
259 reg.overlapping_regs(|r| {
260 if allocated.get(&r).copied().unwrap_or_default().0 {
261 used = true;
262 }
263 });
264
265 if !used {
266 alloc_reg = Some(reg);
267 break;
268 }
269 }
270
271 let reg = alloc_reg.expect("cannot allocate registers");
272 regs[i] = Some(reg);
273 allocated.entry(reg).or_default().0 = true;
274 }
275 InlineAsmOperand::Out {
276 reg: InlineAsmRegOrRegClass::RegClass(class),
277 late: true,
278 ..
279 } => {
280 let mut alloc_reg = None;
281 for &reg in &map[&class] {
282 let mut used = false;
283 reg.overlapping_regs(|r| {
284 if allocated.get(&r).copied().unwrap_or_default().1 {
285 used = true;
286 }
287 });
288
289 if !used {
290 alloc_reg = Some(reg);
291 break;
292 }
293 }
294
295 let reg = alloc_reg.expect("cannot allocate registers");
296 regs[i] = Some(reg);
297 allocated.entry(reg).or_default().1 = true;
298 }
299 _ => (),
300 }
301 }
29967ef6 302
a2a8927a 303 self.registers = regs;
29967ef6
XL
304 }
305
a2a8927a
XL
306 fn allocate_stack_slots(&mut self) {
307 let mut slot_size = Size::from_bytes(0);
308 let mut slots_clobber = vec![None; self.operands.len()];
309 let mut slots_input = vec![None; self.operands.len()];
310 let mut slots_output = vec![None; self.operands.len()];
311
312 let new_slot_fn = |slot_size: &mut Size, reg_class: InlineAsmRegClass| {
313 let reg_size =
314 reg_class.supported_types(self.arch).iter().map(|(ty, _)| ty.size()).max().unwrap();
315 let align = rustc_target::abi::Align::from_bytes(reg_size.bytes()).unwrap();
316 let offset = slot_size.align_to(align);
317 *slot_size = offset + reg_size;
318 offset
319 };
320 let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
321
322 // Allocate stack slots for saving clobbered registers
5e7ed085
FG
323 let abi_clobber = InlineAsmClobberAbi::parse(self.arch, &self.tcx.sess.target, sym::C)
324 .unwrap()
325 .clobbered_regs();
a2a8927a
XL
326 for (i, reg) in self.registers.iter().enumerate().filter_map(|(i, r)| r.map(|r| (i, r))) {
327 let mut need_save = true;
328 // If the register overlaps with a register clobbered by function call, then
329 // we don't need to save it.
330 for r in abi_clobber {
331 r.overlapping_regs(|r| {
332 if r == reg {
333 need_save = false;
334 }
335 });
336
337 if !need_save {
338 break;
339 }
340 }
341
342 if need_save {
343 slots_clobber[i] = Some(new_slot(reg.reg_class()));
29967ef6 344 }
29967ef6 345 }
a2a8927a
XL
346
347 // Allocate stack slots for inout
348 for (i, operand) in self.operands.iter().enumerate() {
349 match *operand {
350 InlineAsmOperand::InOut { reg, out_place: Some(_), .. } => {
351 let slot = new_slot(reg.reg_class());
352 slots_input[i] = Some(slot);
353 slots_output[i] = Some(slot);
354 }
355 _ => (),
356 }
357 }
358
359 let slot_size_before_input = slot_size;
360 let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
361
362 // Allocate stack slots for input
363 for (i, operand) in self.operands.iter().enumerate() {
364 match *operand {
365 InlineAsmOperand::In { reg, .. }
366 | InlineAsmOperand::InOut { reg, out_place: None, .. } => {
367 slots_input[i] = Some(new_slot(reg.reg_class()));
368 }
369 _ => (),
370 }
371 }
372
373 // Reset slot size to before input so that input and output operands can overlap
374 // and save some memory.
375 let slot_size_after_input = slot_size;
376 slot_size = slot_size_before_input;
377 let mut new_slot = |x| new_slot_fn(&mut slot_size, x);
378
379 // Allocate stack slots for output
380 for (i, operand) in self.operands.iter().enumerate() {
381 match *operand {
382 InlineAsmOperand::Out { reg, place: Some(_), .. } => {
383 slots_output[i] = Some(new_slot(reg.reg_class()));
384 }
385 _ => (),
386 }
387 }
388
389 slot_size = slot_size.max(slot_size_after_input);
390
391 self.stack_slots_clobber = slots_clobber;
392 self.stack_slots_input = slots_input;
393 self.stack_slots_output = slots_output;
394 self.stack_slot_size = slot_size;
29967ef6 395 }
29967ef6 396
a2a8927a
XL
397 fn generate_asm_wrapper(&self, asm_name: &str) -> String {
398 let mut generated_asm = String::new();
399 writeln!(generated_asm, ".globl {}", asm_name).unwrap();
400 writeln!(generated_asm, ".type {},@function", asm_name).unwrap();
401 writeln!(generated_asm, ".section .text.{},\"ax\",@progbits", asm_name).unwrap();
402 writeln!(generated_asm, "{}:", asm_name).unwrap();
403
404 let is_x86 = matches!(self.arch, InlineAsmArch::X86 | InlineAsmArch::X86_64);
405
406 if is_x86 {
407 generated_asm.push_str(".intel_syntax noprefix\n");
408 }
409 Self::prologue(&mut generated_asm, self.arch);
410
411 // Save clobbered registers
412 if !self.options.contains(InlineAsmOptions::NORETURN) {
413 for (reg, slot) in self
414 .registers
415 .iter()
416 .zip(self.stack_slots_clobber.iter().copied())
417 .filter_map(|(r, s)| r.zip(s))
418 {
419 Self::save_register(&mut generated_asm, self.arch, reg, slot);
420 }
421 }
422
423 // Write input registers
424 for (reg, slot) in self
425 .registers
426 .iter()
427 .zip(self.stack_slots_input.iter().copied())
428 .filter_map(|(r, s)| r.zip(s))
429 {
430 Self::restore_register(&mut generated_asm, self.arch, reg, slot);
431 }
432
433 if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
434 generated_asm.push_str(".att_syntax\n");
435 }
436
437 // The actual inline asm
438 for piece in self.template {
439 match piece {
440 InlineAsmTemplatePiece::String(s) => {
441 generated_asm.push_str(s);
442 }
443 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
444 if self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
445 generated_asm.push('%');
446 }
447 self.registers[*operand_idx]
448 .unwrap()
449 .emit(&mut generated_asm, self.arch, *modifier)
450 .unwrap();
451 }
452 }
453 }
454 generated_asm.push('\n');
455
456 if is_x86 && self.options.contains(InlineAsmOptions::ATT_SYNTAX) {
457 generated_asm.push_str(".intel_syntax noprefix\n");
458 }
459
460 if !self.options.contains(InlineAsmOptions::NORETURN) {
461 // Read output registers
462 for (reg, slot) in self
463 .registers
464 .iter()
465 .zip(self.stack_slots_output.iter().copied())
466 .filter_map(|(r, s)| r.zip(s))
467 {
468 Self::save_register(&mut generated_asm, self.arch, reg, slot);
469 }
470
471 // Restore clobbered registers
472 for (reg, slot) in self
473 .registers
474 .iter()
475 .zip(self.stack_slots_clobber.iter().copied())
476 .filter_map(|(r, s)| r.zip(s))
477 {
478 Self::restore_register(&mut generated_asm, self.arch, reg, slot);
479 }
480
481 Self::epilogue(&mut generated_asm, self.arch);
482 } else {
483 Self::epilogue_noreturn(&mut generated_asm, self.arch);
484 }
485
486 if is_x86 {
487 generated_asm.push_str(".att_syntax\n");
488 }
489 writeln!(generated_asm, ".size {name}, .-{name}", name = asm_name).unwrap();
490 generated_asm.push_str(".text\n");
491 generated_asm.push_str("\n\n");
492
493 generated_asm
29967ef6
XL
494 }
495
a2a8927a
XL
496 fn prologue(generated_asm: &mut String, arch: InlineAsmArch) {
497 match arch {
498 InlineAsmArch::X86 => {
499 generated_asm.push_str(" push ebp\n");
500 generated_asm.push_str(" mov ebp,[esp+8]\n");
501 }
502 InlineAsmArch::X86_64 => {
503 generated_asm.push_str(" push rbp\n");
504 generated_asm.push_str(" mov rbp,rdi\n");
505 }
506 InlineAsmArch::RiscV32 => {
507 generated_asm.push_str(" addi sp, sp, -8\n");
508 generated_asm.push_str(" sw ra, 4(sp)\n");
509 generated_asm.push_str(" sw s0, 0(sp)\n");
510 generated_asm.push_str(" mv s0, a0\n");
511 }
512 InlineAsmArch::RiscV64 => {
513 generated_asm.push_str(" addi sp, sp, -16\n");
514 generated_asm.push_str(" sd ra, 8(sp)\n");
515 generated_asm.push_str(" sd s0, 0(sp)\n");
516 generated_asm.push_str(" mv s0, a0\n");
517 }
518 _ => unimplemented!("prologue for {:?}", arch),
29967ef6 519 }
a2a8927a 520 }
29967ef6 521
a2a8927a
XL
522 fn epilogue(generated_asm: &mut String, arch: InlineAsmArch) {
523 match arch {
524 InlineAsmArch::X86 => {
525 generated_asm.push_str(" pop ebp\n");
526 generated_asm.push_str(" ret\n");
527 }
528 InlineAsmArch::X86_64 => {
529 generated_asm.push_str(" pop rbp\n");
530 generated_asm.push_str(" ret\n");
531 }
532 InlineAsmArch::RiscV32 => {
533 generated_asm.push_str(" lw s0, 0(sp)\n");
534 generated_asm.push_str(" lw ra, 4(sp)\n");
535 generated_asm.push_str(" addi sp, sp, 8\n");
536 generated_asm.push_str(" ret\n");
537 }
538 InlineAsmArch::RiscV64 => {
539 generated_asm.push_str(" ld s0, 0(sp)\n");
540 generated_asm.push_str(" ld ra, 8(sp)\n");
541 generated_asm.push_str(" addi sp, sp, 16\n");
542 generated_asm.push_str(" ret\n");
543 }
544 _ => unimplemented!("epilogue for {:?}", arch),
29967ef6 545 }
a2a8927a 546 }
29967ef6 547
a2a8927a
XL
548 fn epilogue_noreturn(generated_asm: &mut String, arch: InlineAsmArch) {
549 match arch {
550 InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
551 generated_asm.push_str(" ud2\n");
552 }
553 InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {
554 generated_asm.push_str(" ebreak\n");
555 }
556 _ => unimplemented!("epilogue_noreturn for {:?}", arch),
557 }
29967ef6
XL
558 }
559
a2a8927a
XL
560 fn save_register(
561 generated_asm: &mut String,
562 arch: InlineAsmArch,
563 reg: InlineAsmReg,
564 offset: Size,
565 ) {
566 match arch {
567 InlineAsmArch::X86 => {
568 write!(generated_asm, " mov [ebp+0x{:x}], ", offset.bytes()).unwrap();
569 reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
570 generated_asm.push('\n');
571 }
572 InlineAsmArch::X86_64 => {
573 write!(generated_asm, " mov [rbp+0x{:x}], ", offset.bytes()).unwrap();
574 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
575 generated_asm.push('\n');
576 }
577 InlineAsmArch::RiscV32 => {
578 generated_asm.push_str(" sw ");
579 reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
580 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
581 }
582 InlineAsmArch::RiscV64 => {
583 generated_asm.push_str(" sd ");
584 reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
585 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
586 }
587 _ => unimplemented!("save_register for {:?}", arch),
588 }
589 }
29967ef6 590
a2a8927a
XL
591 fn restore_register(
592 generated_asm: &mut String,
593 arch: InlineAsmArch,
594 reg: InlineAsmReg,
595 offset: Size,
596 ) {
597 match arch {
598 InlineAsmArch::X86 => {
599 generated_asm.push_str(" mov ");
600 reg.emit(generated_asm, InlineAsmArch::X86, None).unwrap();
601 writeln!(generated_asm, ", [ebp+0x{:x}]", offset.bytes()).unwrap();
602 }
603 InlineAsmArch::X86_64 => {
604 generated_asm.push_str(" mov ");
605 reg.emit(generated_asm, InlineAsmArch::X86_64, None).unwrap();
606 writeln!(generated_asm, ", [rbp+0x{:x}]", offset.bytes()).unwrap();
607 }
608 InlineAsmArch::RiscV32 => {
609 generated_asm.push_str(" lw ");
610 reg.emit(generated_asm, InlineAsmArch::RiscV32, None).unwrap();
611 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
612 }
613 InlineAsmArch::RiscV64 => {
614 generated_asm.push_str(" ld ");
615 reg.emit(generated_asm, InlineAsmArch::RiscV64, None).unwrap();
616 writeln!(generated_asm, ", 0x{:x}(s0)", offset.bytes()).unwrap();
617 }
618 _ => unimplemented!("restore_register for {:?}", arch),
619 }
620 }
29967ef6
XL
621}
622
623fn call_inline_asm<'tcx>(
6a06907d 624 fx: &mut FunctionCx<'_, '_, 'tcx>,
29967ef6
XL
625 asm_name: &str,
626 slot_size: Size,
a2a8927a
XL
627 inputs: Vec<(Size, Value)>,
628 outputs: Vec<(Size, CPlace<'tcx>)>,
29967ef6
XL
629) {
630 let stack_slot = fx.bcx.func.create_stack_slot(StackSlotData {
631 kind: StackSlotKind::ExplicitSlot,
29967ef6
XL
632 size: u32::try_from(slot_size.bytes()).unwrap(),
633 });
cdc7bbd5
XL
634 if fx.clif_comments.enabled() {
635 fx.add_comment(stack_slot, "inline asm scratch slot");
636 }
29967ef6
XL
637
638 let inline_asm_func = fx
29967ef6
XL
639 .module
640 .declare_function(
641 asm_name,
642 Linkage::Import,
643 &Signature {
644 call_conv: CallConv::SystemV,
645 params: vec![AbiParam::new(fx.pointer_type)],
646 returns: vec![],
647 },
648 )
649 .unwrap();
17df50a5 650 let inline_asm_func = fx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
cdc7bbd5
XL
651 if fx.clif_comments.enabled() {
652 fx.add_comment(inline_asm_func, asm_name);
653 }
29967ef6 654
a2a8927a 655 for (offset, value) in inputs {
6a06907d 656 fx.bcx.ins().stack_store(value, stack_slot, i32::try_from(offset.bytes()).unwrap());
29967ef6
XL
657 }
658
659 let stack_slot_addr = fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0);
660 fx.bcx.ins().call(inline_asm_func, &[stack_slot_addr]);
661
a2a8927a 662 for (offset, place) in outputs {
29967ef6 663 let ty = fx.clif_type(place.layout().ty).unwrap();
6a06907d 664 let value = fx.bcx.ins().stack_load(ty, stack_slot, i32::try_from(offset.bytes()).unwrap());
29967ef6
XL
665 place.write_cvalue(fx, CValue::by_val(value, place.layout()));
666 }
667}