1 use crate::builder
::Builder
;
2 use crate::type_
::Type
;
3 use crate::type_of
::LayoutLlvmExt
;
4 use crate::value
::Value
;
5 use rustc_codegen_ssa
::mir
::operand
::OperandRef
;
6 use rustc_codegen_ssa
::{
8 traits
::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods}
,
10 use rustc_middle
::ty
::layout
::HasTyCtxt
;
11 use rustc_middle
::ty
::Ty
;
12 use rustc_target
::abi
::{Align, Endian, HasDataLayout, LayoutOf, Size}
;
14 fn round_pointer_up_to_alignment(
15 bx
: &mut Builder
<'a
, 'll
, 'tcx
>,
20 let mut ptr_as_int
= bx
.ptrtoint(addr
, bx
.cx().type_isize());
21 ptr_as_int
= bx
.add(ptr_as_int
, bx
.cx().const_i32(align
.bytes() as i32 - 1));
22 ptr_as_int
= bx
.and(ptr_as_int
, bx
.cx().const_i32(-(align
.bytes() as i32)));
23 bx
.inttoptr(ptr_as_int
, ptr_ty
)
26 fn emit_direct_ptr_va_arg(
27 bx
: &mut Builder
<'a
, 'll
, 'tcx
>,
28 list
: OperandRef
<'tcx
, &'ll Value
>,
33 allow_higher_align
: bool
,
34 ) -> (&'ll Value
, Align
) {
35 let va_list_ty
= bx
.type_i8p();
36 let va_list_ptr_ty
= bx
.type_ptr_to(va_list_ty
);
37 let va_list_addr
= if list
.layout
.llvm_type(bx
.cx
) != va_list_ptr_ty
{
38 bx
.bitcast(list
.immediate(), va_list_ptr_ty
)
43 let ptr
= bx
.load(va_list_ty
, va_list_addr
, bx
.tcx().data_layout
.pointer_align
.abi
);
45 let (addr
, addr_align
) = if allow_higher_align
&& align
> slot_size
{
46 (round_pointer_up_to_alignment(bx
, ptr
, align
, bx
.cx().type_i8p()), align
)
51 let aligned_size
= size
.align_to(slot_size
).bytes() as i32;
52 let full_direct_size
= bx
.cx().const_i32(aligned_size
);
53 let next
= bx
.inbounds_gep(addr
, &[full_direct_size
]);
54 bx
.store(next
, va_list_addr
, bx
.tcx().data_layout
.pointer_align
.abi
);
56 if size
.bytes() < slot_size
.bytes() && bx
.tcx().sess
.target
.endian
== Endian
::Big
{
57 let adjusted_size
= bx
.cx().const_i32((slot_size
.bytes() - size
.bytes()) as i32);
58 let adjusted
= bx
.inbounds_gep(addr
, &[adjusted_size
]);
59 (bx
.bitcast(adjusted
, bx
.cx().type_ptr_to(llty
)), addr_align
)
61 (bx
.bitcast(addr
, bx
.cx().type_ptr_to(llty
)), addr_align
)
66 bx
: &mut Builder
<'a
, 'll
, 'tcx
>,
67 list
: OperandRef
<'tcx
, &'ll Value
>,
71 allow_higher_align
: bool
,
73 let layout
= bx
.cx
.layout_of(target_ty
);
74 let (llty
, size
, align
) = if indirect
{
76 bx
.cx
.layout_of(bx
.cx
.tcx
.mk_imm_ptr(target_ty
)).llvm_type(bx
.cx
),
77 bx
.cx
.data_layout().pointer_size
,
78 bx
.cx
.data_layout().pointer_align
,
81 (layout
.llvm_type(bx
.cx
), layout
.size
, layout
.align
)
83 let (addr
, addr_align
) =
84 emit_direct_ptr_va_arg(bx
, list
, llty
, size
, align
.abi
, slot_size
, allow_higher_align
);
86 let tmp_ret
= bx
.load(llty
, addr
, addr_align
);
87 bx
.load(bx
.cx
.layout_of(target_ty
).llvm_type(bx
.cx
), tmp_ret
, align
.abi
)
89 bx
.load(llty
, addr
, addr_align
)
94 bx
: &mut Builder
<'a
, 'll
, 'tcx
>,
95 list
: OperandRef
<'tcx
, &'ll Value
>,
98 // Implementation of the AAPCS64 calling convention for va_args see
99 // https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
100 let va_list_addr
= list
.immediate();
101 let layout
= bx
.cx
.layout_of(target_ty
);
103 let mut maybe_reg
= bx
.build_sibling_block("va_arg.maybe_reg");
104 let mut in_reg
= bx
.build_sibling_block("va_arg.in_reg");
105 let mut on_stack
= bx
.build_sibling_block("va_arg.on_stack");
106 let mut end
= bx
.build_sibling_block("va_arg.end");
107 let zero
= bx
.const_i32(0);
108 let offset_align
= Align
::from_bytes(4).unwrap();
110 let gr_type
= target_ty
.is_any_ptr() || target_ty
.is_integral();
111 let (reg_off
, reg_top_index
, slot_size
) = if gr_type
{
112 let gr_offs
= bx
.struct_gep(va_list_addr
, 7);
113 let nreg
= (layout
.size
.bytes() + 7) / 8;
114 (gr_offs
, 3, nreg
* 8)
116 let vr_off
= bx
.struct_gep(va_list_addr
, 9);
117 let nreg
= (layout
.size
.bytes() + 15) / 16;
118 (vr_off
, 5, nreg
* 16)
121 // if the offset >= 0 then the value will be on the stack
122 let mut reg_off_v
= bx
.load(bx
.type_i32(), reg_off
, offset_align
);
123 let use_stack
= bx
.icmp(IntPredicate
::IntSGE
, reg_off_v
, zero
);
124 bx
.cond_br(use_stack
, &on_stack
.llbb(), &maybe_reg
.llbb());
126 // The value at this point might be in a register, but there is a chance that
127 // it could be on the stack so we have to update the offset and then check
130 if gr_type
&& layout
.align
.abi
.bytes() > 8 {
131 reg_off_v
= maybe_reg
.add(reg_off_v
, bx
.const_i32(15));
132 reg_off_v
= maybe_reg
.and(reg_off_v
, bx
.const_i32(-16));
134 let new_reg_off_v
= maybe_reg
.add(reg_off_v
, bx
.const_i32(slot_size
as i32));
136 maybe_reg
.store(new_reg_off_v
, reg_off
, offset_align
);
138 // Check to see if we have overflowed the registers as a result of this.
139 // If we have then we need to use the stack for this value
140 let use_stack
= maybe_reg
.icmp(IntPredicate
::IntSGT
, new_reg_off_v
, zero
);
141 maybe_reg
.cond_br(use_stack
, &on_stack
.llbb(), &in_reg
.llbb());
143 let top_type
= bx
.type_i8p();
144 let top
= in_reg
.struct_gep(va_list_addr
, reg_top_index
);
145 let top
= in_reg
.load(top_type
, top
, bx
.tcx().data_layout
.pointer_align
.abi
);
147 // reg_value = *(@top + reg_off_v);
148 let mut reg_addr
= in_reg
.gep(top
, &[reg_off_v
]);
149 if bx
.tcx().sess
.target
.endian
== Endian
::Big
&& layout
.size
.bytes() != slot_size
{
150 // On big-endian systems the value is right-aligned in its slot.
151 let offset
= bx
.const_i32((slot_size
- layout
.size
.bytes()) as i32);
152 reg_addr
= in_reg
.gep(reg_addr
, &[offset
]);
154 let reg_type
= layout
.llvm_type(bx
);
155 let reg_addr
= in_reg
.bitcast(reg_addr
, bx
.cx
.type_ptr_to(reg_type
));
156 let reg_value
= in_reg
.load(reg_type
, reg_addr
, layout
.align
.abi
);
157 in_reg
.br(&end
.llbb());
161 emit_ptr_va_arg(&mut on_stack
, list
, target_ty
, false, Align
::from_bytes(8).unwrap(), true);
162 on_stack
.br(&end
.llbb());
165 layout
.immediate_llvm_type(bx
),
166 &[reg_value
, stack_value
],
167 &[&in_reg
.llbb(), &on_stack
.llbb()],
174 pub(super) fn emit_va_arg(
175 bx
: &mut Builder
<'a
, 'll
, 'tcx
>,
176 addr
: OperandRef
<'tcx
, &'ll Value
>,
179 // Determine the va_arg implementation to use. The LLVM va_arg instruction
180 // is lacking in some instances, so we should only use it as a fallback.
181 let target
= &bx
.cx
.tcx
.sess
.target
;
182 let arch
= &bx
.cx
.tcx
.sess
.target
.arch
;
185 "x86" if target
.is_like_windows
=> {
186 emit_ptr_va_arg(bx
, addr
, target_ty
, false, Align
::from_bytes(4).unwrap(), false)
189 "x86" => emit_ptr_va_arg(bx
, addr
, target_ty
, false, Align
::from_bytes(4).unwrap(), true),
191 "aarch64" if target
.is_like_windows
=> {
192 emit_ptr_va_arg(bx
, addr
, target_ty
, false, Align
::from_bytes(8).unwrap(), false)
194 // macOS / iOS AArch64
195 "aarch64" if target
.is_like_osx
=> {
196 emit_ptr_va_arg(bx
, addr
, target_ty
, false, Align
::from_bytes(8).unwrap(), true)
198 "aarch64" => emit_aapcs_va_arg(bx
, addr
, target_ty
),
200 "x86_64" if target
.is_like_windows
=> {
201 let target_ty_size
= bx
.cx
.size_of(target_ty
).bytes();
202 let indirect
: bool
= target_ty_size
> 8 || !target_ty_size
.is_power_of_two();
203 emit_ptr_va_arg(bx
, addr
, target_ty
, indirect
, Align
::from_bytes(8).unwrap(), false)
205 // For all other architecture/OS combinations fall back to using
206 // the LLVM va_arg instruction.
207 // https://llvm.org/docs/LangRef.html#va-arg-instruction
208 _
=> bx
.va_arg(addr
.immediate(), bx
.cx
.layout_of(target_ty
).llvm_type(bx
.cx
)),