]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_codegen_ssa/src/traits/builder.rs
New upstream version 1.61.0+dfsg1
[rustc.git] / compiler / rustc_codegen_ssa / src / traits / builder.rs
CommitLineData
a1dfa0c6
XL
1use super::abi::AbiBuilderMethods;
2use super::asm::AsmBuilderMethods;
5099ac24 3use super::consts::ConstMethods;
f035d41b 4use super::coverageinfo::CoverageInfoBuilderMethods;
a1dfa0c6
XL
5use super::debuginfo::DebugInfoBuilderMethods;
6use super::intrinsic::IntrinsicCallMethods;
5099ac24
FG
7use super::misc::MiscMethods;
8use super::type_::{ArgAbiMethods, BaseTypeMethods};
a1dfa0c6 9use super::{HasCodegen, StaticBuilderMethods};
60c5eb7d 10
dfeec247 11use crate::common::{
5099ac24 12 AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind,
dfeec247 13};
9fa01778
XL
14use crate::mir::operand::OperandRef;
15use crate::mir::place::PlaceRef;
16use crate::MemFlags;
60c5eb7d 17
5099ac24 18use rustc_apfloat::{ieee, Float, Round, Status};
1b1a35ee 19use rustc_middle::ty::layout::{HasParamEnv, TyAndLayout};
ba9703b0 20use rustc_middle::ty::Ty;
29967ef6 21use rustc_span::Span;
c295e0f8 22use rustc_target::abi::{Abi, Align, Scalar, Size, WrappingRange};
60c5eb7d
XL
23use rustc_target::spec::HasTargetSpec;
24
a1dfa0c6
XL
25#[derive(Copy, Clone)]
26pub enum OverflowOp {
27 Add,
28 Sub,
29 Mul,
30}
31
dc9dc135 32pub trait BuilderMethods<'a, 'tcx>:
a1dfa0c6 33 HasCodegen<'tcx>
f035d41b 34 + CoverageInfoBuilderMethods<'tcx>
74b04a01 35 + DebugInfoBuilderMethods
60c5eb7d 36 + ArgAbiMethods<'tcx>
a1dfa0c6
XL
37 + AbiBuilderMethods<'tcx>
38 + IntrinsicCallMethods<'tcx>
39 + AsmBuilderMethods<'tcx>
dc9dc135 40 + StaticBuilderMethods
48663c56
XL
41 + HasParamEnv<'tcx>
42 + HasTargetSpec
a1dfa0c6 43{
17df50a5
XL
44 fn build(cx: &'a Self::CodegenCx, llbb: Self::BasicBlock) -> Self;
45
a1dfa0c6 46 fn cx(&self) -> &Self::CodegenCx;
a1dfa0c6 47 fn llbb(&self) -> Self::BasicBlock;
17df50a5 48
29967ef6 49 fn set_span(&mut self, span: Span);
a1dfa0c6 50
17df50a5
XL
51 // FIXME(eddyb) replace uses of this with `append_sibling_block`.
52 fn append_block(cx: &'a Self::CodegenCx, llfn: Self::Function, name: &str) -> Self::BasicBlock;
53
54 fn append_sibling_block(&mut self, name: &str) -> Self::BasicBlock;
55
5e7ed085 56 fn switch_to_block(&mut self, llbb: Self::BasicBlock);
17df50a5 57
a1dfa0c6
XL
58 fn ret_void(&mut self);
59 fn ret(&mut self, v: Self::Value);
60 fn br(&mut self, dest: Self::BasicBlock);
61 fn cond_br(
62 &mut self,
63 cond: Self::Value,
64 then_llbb: Self::BasicBlock,
65 else_llbb: Self::BasicBlock,
66 );
67 fn switch(
68 &mut self,
69 v: Self::Value,
70 else_llbb: Self::BasicBlock,
1b1a35ee 71 cases: impl ExactSizeIterator<Item = (u128, Self::BasicBlock)>,
532ac7d7 72 );
a1dfa0c6
XL
73 fn invoke(
74 &mut self,
94222f64 75 llty: Self::Type,
a1dfa0c6
XL
76 llfn: Self::Value,
77 args: &[Self::Value],
78 then: Self::BasicBlock,
79 catch: Self::BasicBlock,
80 funclet: Option<&Self::Funclet>,
81 ) -> Self::Value;
82 fn unreachable(&mut self);
532ac7d7 83
a1dfa0c6
XL
84 fn add(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
85 fn fadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
86 fn fadd_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
87 fn sub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
88 fn fsub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
89 fn fsub_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
90 fn mul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
91 fn fmul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
92 fn fmul_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
93 fn udiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
94 fn exactudiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
95 fn sdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
96 fn exactsdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
97 fn fdiv(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
98 fn fdiv_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
99 fn urem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
100 fn srem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
101 fn frem(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
102 fn frem_fast(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
103 fn shl(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
104 fn lshr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
105 fn ashr(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
dc9dc135
XL
106 fn unchecked_sadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
107 fn unchecked_uadd(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
108 fn unchecked_ssub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
109 fn unchecked_usub(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
110 fn unchecked_smul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
111 fn unchecked_umul(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
a1dfa0c6
XL
112 fn and(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
113 fn or(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
114 fn xor(&mut self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
115 fn neg(&mut self, v: Self::Value) -> Self::Value;
116 fn fneg(&mut self, v: Self::Value) -> Self::Value;
117 fn not(&mut self, v: Self::Value) -> Self::Value;
118
119 fn checked_binop(
120 &mut self,
121 oop: OverflowOp,
9fa01778 122 ty: Ty<'_>,
a1dfa0c6
XL
123 lhs: Self::Value,
124 rhs: Self::Value,
125 ) -> (Self::Value, Self::Value);
126
1b1a35ee
XL
127 fn from_immediate(&mut self, val: Self::Value) -> Self::Value;
128 fn to_immediate(&mut self, val: Self::Value, layout: TyAndLayout<'_>) -> Self::Value {
c295e0f8 129 if let Abi::Scalar(scalar) = layout.abi {
1b1a35ee
XL
130 self.to_immediate_scalar(val, scalar)
131 } else {
132 val
133 }
134 }
c295e0f8 135 fn to_immediate_scalar(&mut self, val: Self::Value, scalar: Scalar) -> Self::Value;
1b1a35ee 136
e1599b0c
XL
137 fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
138 fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
dfeec247 139 fn array_alloca(&mut self, ty: Self::Type, len: Self::Value, align: Align) -> Self::Value;
a1dfa0c6 140
136023e0
XL
141 fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
142 fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;
143 fn atomic_load(
144 &mut self,
145 ty: Self::Type,
146 ptr: Self::Value,
147 order: AtomicOrdering,
148 size: Size,
149 ) -> Self::Value;
a1dfa0c6 150 fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
dfeec247 151 -> OperandRef<'tcx, Self::Value>;
a1dfa0c6 152
dfeec247 153 /// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset.
532ac7d7
XL
154 fn write_operand_repeatedly(
155 self,
156 elem: OperandRef<'tcx, Self::Value>,
157 count: u64,
158 dest: PlaceRef<'tcx, Self::Value>,
159 ) -> Self;
160
c295e0f8 161 fn range_metadata(&mut self, load: Self::Value, range: WrappingRange);
a1dfa0c6 162 fn nonnull_metadata(&mut self, load: Self::Value);
3c0e092e
XL
163 fn type_metadata(&mut self, function: Self::Function, typeid: String);
164 fn typeid_metadata(&mut self, typeid: String) -> Self::Value;
a1dfa0c6
XL
165
166 fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
167 fn store_with_flags(
168 &mut self,
169 val: Self::Value,
170 ptr: Self::Value,
171 align: Align,
172 flags: MemFlags,
173 ) -> Self::Value;
174 fn atomic_store(
175 &mut self,
176 val: Self::Value,
177 ptr: Self::Value,
178 order: AtomicOrdering,
179 size: Size,
180 );
181
94222f64
XL
182 fn gep(&mut self, ty: Self::Type, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value;
183 fn inbounds_gep(
184 &mut self,
185 ty: Self::Type,
186 ptr: Self::Value,
187 indices: &[Self::Value],
188 ) -> Self::Value;
189 fn struct_gep(&mut self, ty: Self::Type, ptr: Self::Value, idx: u64) -> Self::Value;
a1dfa0c6
XL
190
191 fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
192 fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
f035d41b
XL
193 fn fptoui_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Option<Self::Value>;
194 fn fptosi_sat(&mut self, val: Self::Value, dest_ty: Self::Type) -> Option<Self::Value>;
a1dfa0c6
XL
195 fn fptoui(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
196 fn fptosi(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
197 fn uitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
198 fn sitofp(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
199 fn fptrunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
200 fn fpext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
201 fn ptrtoint(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
202 fn inttoptr(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
203 fn bitcast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
204 fn intcast(&mut self, val: Self::Value, dest_ty: Self::Type, is_signed: bool) -> Self::Value;
205 fn pointercast(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
206
5099ac24
FG
207 fn cast_float_to_int(
208 &mut self,
209 signed: bool,
210 x: Self::Value,
211 dest_ty: Self::Type,
212 ) -> Self::Value {
213 let in_ty = self.cx().val_ty(x);
214 let (float_ty, int_ty) = if self.cx().type_kind(dest_ty) == TypeKind::Vector
215 && self.cx().type_kind(in_ty) == TypeKind::Vector
216 {
217 (self.cx().element_type(in_ty), self.cx().element_type(dest_ty))
218 } else {
219 (in_ty, dest_ty)
220 };
221 assert!(matches!(self.cx().type_kind(float_ty), TypeKind::Float | TypeKind::Double));
222 assert_eq!(self.cx().type_kind(int_ty), TypeKind::Integer);
223
224 if let Some(false) = self.cx().sess().opts.debugging_opts.saturating_float_casts {
225 return if signed { self.fptosi(x, dest_ty) } else { self.fptoui(x, dest_ty) };
226 }
227
228 let try_sat_result =
229 if signed { self.fptosi_sat(x, dest_ty) } else { self.fptoui_sat(x, dest_ty) };
230 if let Some(try_sat_result) = try_sat_result {
231 return try_sat_result;
232 }
233
234 let int_width = self.cx().int_width(int_ty);
235 let float_width = self.cx().float_width(float_ty);
236 // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
237 // destination integer type after rounding towards zero. This `undef` value can cause UB in
238 // safe code (see issue #10184), so we implement a saturating conversion on top of it:
239 // Semantically, the mathematical value of the input is rounded towards zero to the next
240 // mathematical integer, and then the result is clamped into the range of the destination
241 // integer type. Positive and negative infinity are mapped to the maximum and minimum value of
242 // the destination integer type. NaN is mapped to 0.
243 //
244 // Define f_min and f_max as the largest and smallest (finite) floats that are exactly equal to
245 // a value representable in int_ty.
246 // They are exactly equal to int_ty::{MIN,MAX} if float_ty has enough significand bits.
247 // Otherwise, int_ty::MAX must be rounded towards zero, as it is one less than a power of two.
248 // int_ty::MIN, however, is either zero or a negative power of two and is thus exactly
249 // representable. Note that this only works if float_ty's exponent range is sufficiently large.
250 // f16 or 256 bit integers would break this property. Right now the smallest float type is f32
251 // with exponents ranging up to 127, which is barely enough for i128::MIN = -2^127.
252 // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
253 // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
254 // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
255 let int_max = |signed: bool, int_width: u64| -> u128 {
256 let shift_amount = 128 - int_width;
257 if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
258 };
259 let int_min = |signed: bool, int_width: u64| -> i128 {
260 if signed { i128::MIN >> (128 - int_width) } else { 0 }
261 };
262
263 let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
264 let rounded_min =
265 ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero);
266 assert_eq!(rounded_min.status, Status::OK);
267 let rounded_max =
268 ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero);
269 assert!(rounded_max.value.is_finite());
270 (rounded_min.value.to_bits(), rounded_max.value.to_bits())
271 };
272 let compute_clamp_bounds_double = |signed: bool, int_width: u64| -> (u128, u128) {
273 let rounded_min =
274 ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero);
275 assert_eq!(rounded_min.status, Status::OK);
276 let rounded_max =
277 ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero);
278 assert!(rounded_max.value.is_finite());
279 (rounded_min.value.to_bits(), rounded_max.value.to_bits())
280 };
281 // To implement saturation, we perform the following steps:
282 //
283 // 1. Cast x to an integer with fpto[su]i. This may result in undef.
284 // 2. Compare x to f_min and f_max, and use the comparison results to select:
285 // a) int_ty::MIN if x < f_min or x is NaN
286 // b) int_ty::MAX if x > f_max
287 // c) the result of fpto[su]i otherwise
288 // 3. If x is NaN, return 0.0, otherwise return the result of step 2.
289 //
290 // This avoids resulting undef because values in range [f_min, f_max] by definition fit into the
291 // destination type. It creates an undef temporary, but *producing* undef is not UB. Our use of
292 // undef does not introduce any non-determinism either.
293 // More importantly, the above procedure correctly implements saturating conversion.
294 // Proof (sketch):
295 // If x is NaN, 0 is returned by definition.
296 // Otherwise, x is finite or infinite and thus can be compared with f_min and f_max.
297 // This yields three cases to consider:
298 // (1) if x in [f_min, f_max], the result of fpto[su]i is returned, which agrees with
299 // saturating conversion for inputs in that range.
300 // (2) if x > f_max, then x is larger than int_ty::MAX. This holds even if f_max is rounded
301 // (i.e., if f_max < int_ty::MAX) because in those cases, nextUp(f_max) is already larger
302 // than int_ty::MAX. Because x is larger than int_ty::MAX, the return value of int_ty::MAX
303 // is correct.
304 // (3) if x < f_min, then x is smaller than int_ty::MIN. As shown earlier, f_min exactly equals
305 // int_ty::MIN and therefore the return value of int_ty::MIN is correct.
306 // QED.
307
308 let float_bits_to_llval = |bx: &mut Self, bits| {
309 let bits_llval = match float_width {
310 32 => bx.cx().const_u32(bits as u32),
311 64 => bx.cx().const_u64(bits as u64),
312 n => bug!("unsupported float width {}", n),
313 };
314 bx.bitcast(bits_llval, float_ty)
315 };
316 let (f_min, f_max) = match float_width {
317 32 => compute_clamp_bounds_single(signed, int_width),
318 64 => compute_clamp_bounds_double(signed, int_width),
319 n => bug!("unsupported float width {}", n),
320 };
321 let f_min = float_bits_to_llval(self, f_min);
322 let f_max = float_bits_to_llval(self, f_max);
323 let int_max = self.cx().const_uint_big(int_ty, int_max(signed, int_width));
324 let int_min = self.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128);
325 let zero = self.cx().const_uint(int_ty, 0);
326
327 // If we're working with vectors, constants must be "splatted": the constant is duplicated
328 // into each lane of the vector. The algorithm stays the same, we are just using the
329 // same constant across all lanes.
330 let maybe_splat = |bx: &mut Self, val| {
331 if bx.cx().type_kind(dest_ty) == TypeKind::Vector {
332 bx.vector_splat(bx.vector_length(dest_ty), val)
333 } else {
334 val
335 }
336 };
337 let f_min = maybe_splat(self, f_min);
338 let f_max = maybe_splat(self, f_max);
339 let int_max = maybe_splat(self, int_max);
340 let int_min = maybe_splat(self, int_min);
341 let zero = maybe_splat(self, zero);
342
343 // Step 1 ...
344 let fptosui_result = if signed { self.fptosi(x, dest_ty) } else { self.fptoui(x, dest_ty) };
345 let less_or_nan = self.fcmp(RealPredicate::RealULT, x, f_min);
346 let greater = self.fcmp(RealPredicate::RealOGT, x, f_max);
347
348 // Step 2: We use two comparisons and two selects, with %s1 being the
349 // result:
350 // %less_or_nan = fcmp ult %x, %f_min
351 // %greater = fcmp olt %x, %f_max
352 // %s0 = select %less_or_nan, int_ty::MIN, %fptosi_result
353 // %s1 = select %greater, int_ty::MAX, %s0
354 // Note that %less_or_nan uses an *unordered* comparison. This
355 // comparison is true if the operands are not comparable (i.e., if x is
356 // NaN). The unordered comparison ensures that s1 becomes int_ty::MIN if
357 // x is NaN.
358 //
359 // Performance note: Unordered comparison can be lowered to a "flipped"
360 // comparison and a negation, and the negation can be merged into the
361 // select. Therefore, it not necessarily any more expensive than an
362 // ordered ("normal") comparison. Whether these optimizations will be
363 // performed is ultimately up to the backend, but at least x86 does
364 // perform them.
365 let s0 = self.select(less_or_nan, int_min, fptosui_result);
366 let s1 = self.select(greater, int_max, s0);
367
368 // Step 3: NaN replacement.
369 // For unsigned types, the above step already yielded int_ty::MIN == 0 if x is NaN.
370 // Therefore we only need to execute this step for signed integer types.
371 if signed {
372 // LLVM has no isNaN predicate, so we use (x == x) instead
373 let cmp = self.fcmp(RealPredicate::RealOEQ, x, x);
374 self.select(cmp, s1, zero)
375 } else {
376 s1
377 }
378 }
379
a1dfa0c6
XL
380 fn icmp(&mut self, op: IntPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
381 fn fcmp(&mut self, op: RealPredicate, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
382
a1dfa0c6
XL
383 fn memcpy(
384 &mut self,
385 dst: Self::Value,
386 dst_align: Align,
387 src: Self::Value,
388 src_align: Align,
389 size: Self::Value,
390 flags: MemFlags,
391 );
392 fn memmove(
393 &mut self,
394 dst: Self::Value,
395 dst_align: Align,
396 src: Self::Value,
397 src_align: Align,
398 size: Self::Value,
399 flags: MemFlags,
400 );
401 fn memset(
402 &mut self,
403 ptr: Self::Value,
404 fill_byte: Self::Value,
405 size: Self::Value,
406 align: Align,
407 flags: MemFlags,
408 );
409
a1dfa0c6
XL
410 fn select(
411 &mut self,
412 cond: Self::Value,
413 then_val: Self::Value,
414 else_val: Self::Value,
415 ) -> Self::Value;
416
417 fn va_arg(&mut self, list: Self::Value, ty: Self::Type) -> Self::Value;
418 fn extract_element(&mut self, vec: Self::Value, idx: Self::Value) -> Self::Value;
a1dfa0c6 419 fn vector_splat(&mut self, num_elts: usize, elt: Self::Value) -> Self::Value;
a1dfa0c6
XL
420 fn extract_value(&mut self, agg_val: Self::Value, idx: u64) -> Self::Value;
421 fn insert_value(&mut self, agg_val: Self::Value, elt: Self::Value, idx: u64) -> Self::Value;
422
5099ac24
FG
423 fn set_personality_fn(&mut self, personality: Self::Value);
424
425 // These are used by everyone except msvc
426 fn cleanup_landing_pad(&mut self, ty: Self::Type, pers_fn: Self::Value) -> Self::Value;
427 fn resume(&mut self, exn: Self::Value);
428
429 // These are used only by msvc
a1dfa0c6 430 fn cleanup_pad(&mut self, parent: Option<Self::Value>, args: &[Self::Value]) -> Self::Funclet;
5099ac24 431 fn cleanup_ret(&mut self, funclet: &Self::Funclet, unwind: Option<Self::BasicBlock>);
a1dfa0c6 432 fn catch_pad(&mut self, parent: Self::Value, args: &[Self::Value]) -> Self::Funclet;
a1dfa0c6
XL
433 fn catch_switch(
434 &mut self,
435 parent: Option<Self::Value>,
436 unwind: Option<Self::BasicBlock>,
5099ac24 437 handlers: &[Self::BasicBlock],
a1dfa0c6 438 ) -> Self::Value;
a1dfa0c6
XL
439
440 fn atomic_cmpxchg(
441 &mut self,
442 dst: Self::Value,
443 cmp: Self::Value,
444 src: Self::Value,
445 order: AtomicOrdering,
446 failure_order: AtomicOrdering,
447 weak: bool,
448 ) -> Self::Value;
449 fn atomic_rmw(
450 &mut self,
451 op: AtomicRmwBinOp,
452 dst: Self::Value,
453 src: Self::Value,
454 order: AtomicOrdering,
455 ) -> Self::Value;
456 fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope);
a1dfa0c6
XL
457 fn set_invariant_load(&mut self, load: Self::Value);
458
a1dfa0c6
XL
459 /// Called for `StorageLive`
460 fn lifetime_start(&mut self, ptr: Self::Value, size: Size);
461
462 /// Called for `StorageDead`
463 fn lifetime_end(&mut self, ptr: Self::Value, size: Size);
464
f035d41b
XL
465 fn instrprof_increment(
466 &mut self,
467 fn_name: Self::Value,
468 hash: Self::Value,
469 num_counters: Self::Value,
470 index: Self::Value,
3dfed10e 471 );
f035d41b 472
a1dfa0c6
XL
473 fn call(
474 &mut self,
94222f64 475 llty: Self::Type,
a1dfa0c6
XL
476 llfn: Self::Value,
477 args: &[Self::Value],
478 funclet: Option<&Self::Funclet>,
479 ) -> Self::Value;
480 fn zext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
481
5e7ed085 482 fn do_not_inline(&mut self, llret: Self::Value);
a1dfa0c6 483}