]>
Commit | Line | Data |
---|---|---|
1b1a35ee XL |
1 | use super::operand::{OperandRef, OperandValue}; |
2 | use super::place::PlaceRef; | |
3 | use super::FunctionCx; | |
9c376795 FG |
4 | use crate::common::IntPredicate; |
5 | use crate::errors; | |
6 | use crate::errors::InvalidMonomorphization; | |
064997fb | 7 | use crate::meth; |
4b012472 | 8 | use crate::size_of_val; |
1b1a35ee XL |
9 | use crate::traits::*; |
10 | use crate::MemFlags; | |
11 | ||
12 | use rustc_middle::ty::{self, Ty, TyCtxt}; | |
13 | use rustc_span::{sym, Span}; | |
064997fb FG |
14 | use rustc_target::abi::{ |
15 | call::{FnAbi, PassMode}, | |
16 | WrappingRange, | |
17 | }; | |
1b1a35ee XL |
18 | |
19 | fn copy_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( | |
20 | bx: &mut Bx, | |
21 | allow_overlap: bool, | |
22 | volatile: bool, | |
23 | ty: Ty<'tcx>, | |
24 | dst: Bx::Value, | |
25 | src: Bx::Value, | |
26 | count: Bx::Value, | |
27 | ) { | |
28 | let layout = bx.layout_of(ty); | |
29 | let size = layout.size; | |
30 | let align = layout.align.abi; | |
31 | let size = bx.mul(bx.const_usize(size.bytes()), count); | |
32 | let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() }; | |
33 | if allow_overlap { | |
34 | bx.memmove(dst, align, src, align, size, flags); | |
35 | } else { | |
36 | bx.memcpy(dst, align, src, align, size, flags); | |
37 | } | |
38 | } | |
39 | ||
40 | fn memset_intrinsic<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( | |
41 | bx: &mut Bx, | |
42 | volatile: bool, | |
43 | ty: Ty<'tcx>, | |
44 | dst: Bx::Value, | |
45 | val: Bx::Value, | |
46 | count: Bx::Value, | |
47 | ) { | |
48 | let layout = bx.layout_of(ty); | |
49 | let size = layout.size; | |
50 | let align = layout.align.abi; | |
51 | let size = bx.mul(bx.const_usize(size.bytes()), count); | |
52 | let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() }; | |
53 | bx.memset(dst, val, size, align, flags); | |
54 | } | |
55 | ||
56 | impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { | |
57 | pub fn codegen_intrinsic_call( | |
58 | bx: &mut Bx, | |
59 | instance: ty::Instance<'tcx>, | |
60 | fn_abi: &FnAbi<'tcx, Ty<'tcx>>, | |
61 | args: &[OperandRef<'tcx, Bx::Value>], | |
62 | llresult: Bx::Value, | |
63 | span: Span, | |
64 | ) { | |
65 | let callee_ty = instance.ty(bx.tcx(), ty::ParamEnv::reveal_all()); | |
66 | ||
add651ee | 67 | let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else { |
5e7ed085 | 68 | bug!("expected fn item type, found {}", callee_ty); |
1b1a35ee XL |
69 | }; |
70 | ||
71 | let sig = callee_ty.fn_sig(bx.tcx()); | |
fc512014 | 72 | let sig = bx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig); |
1b1a35ee XL |
73 | let arg_tys = sig.inputs(); |
74 | let ret_ty = sig.output(); | |
75 | let name = bx.tcx().item_name(def_id); | |
a2a8927a | 76 | let name_str = name.as_str(); |
1b1a35ee XL |
77 | |
78 | let llret_ty = bx.backend_type(bx.layout_of(ret_ty)); | |
79 | let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout); | |
80 | ||
81 | let llval = match name { | |
1b1a35ee XL |
82 | sym::abort => { |
83 | bx.abort(); | |
84 | return; | |
85 | } | |
86 | ||
1b1a35ee XL |
87 | sym::va_start => bx.va_start(args[0].immediate()), |
88 | sym::va_end => bx.va_end(args[0].immediate()), | |
89 | sym::size_of_val => { | |
add651ee | 90 | let tp_ty = fn_args.type_at(0); |
4b012472 FG |
91 | let meta = |
92 | if let OperandValue::Pair(_, meta) = args[0].val { Some(meta) } else { None }; | |
93 | let (llsize, _) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta); | |
94 | llsize | |
1b1a35ee XL |
95 | } |
96 | sym::min_align_of_val => { | |
add651ee | 97 | let tp_ty = fn_args.type_at(0); |
4b012472 FG |
98 | let meta = |
99 | if let OperandValue::Pair(_, meta) = args[0].val { Some(meta) } else { None }; | |
100 | let (_, llalign) = size_of_val::size_and_align_of_dst(bx, tp_ty, meta); | |
101 | llalign | |
1b1a35ee | 102 | } |
064997fb FG |
103 | sym::vtable_size | sym::vtable_align => { |
104 | let vtable = args[0].immediate(); | |
105 | let idx = match name { | |
106 | sym::vtable_size => ty::COMMON_VTABLE_ENTRIES_SIZE, | |
107 | sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN, | |
108 | _ => bug!(), | |
109 | }; | |
110 | let value = meth::VirtualIndex::from_index(idx).get_usize(bx, vtable); | |
9c376795 FG |
111 | match name { |
112 | // Size is always <= isize::MAX. | |
113 | sym::vtable_size => { | |
114 | let size_bound = bx.data_layout().ptr_sized_integer().signed_max() as u128; | |
115 | bx.range_metadata(value, WrappingRange { start: 0, end: size_bound }); | |
ed00b5ec | 116 | } |
064997fb | 117 | // Alignment is always nonzero. |
ed00b5ec FG |
118 | sym::vtable_align => { |
119 | bx.range_metadata(value, WrappingRange { start: 1, end: !0 }) | |
120 | } | |
9c376795 FG |
121 | _ => {} |
122 | } | |
064997fb FG |
123 | value |
124 | } | |
fc512014 | 125 | sym::pref_align_of |
1b1a35ee XL |
126 | | sym::needs_drop |
127 | | sym::type_id | |
128 | | sym::type_name | |
129 | | sym::variant_count => { | |
130 | let value = bx | |
131 | .tcx() | |
132 | .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None) | |
133 | .unwrap(); | |
134 | OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx) | |
135 | } | |
1b1a35ee | 136 | sym::arith_offset => { |
add651ee | 137 | let ty = fn_args.type_at(0); |
94222f64 | 138 | let layout = bx.layout_of(ty); |
1b1a35ee XL |
139 | let ptr = args[0].immediate(); |
140 | let offset = args[1].immediate(); | |
94222f64 | 141 | bx.gep(bx.backend_type(layout), ptr, &[offset]) |
1b1a35ee | 142 | } |
1b1a35ee XL |
143 | sym::copy => { |
144 | copy_intrinsic( | |
145 | bx, | |
146 | true, | |
147 | false, | |
add651ee | 148 | fn_args.type_at(0), |
1b1a35ee XL |
149 | args[1].immediate(), |
150 | args[0].immediate(), | |
151 | args[2].immediate(), | |
152 | ); | |
153 | return; | |
154 | } | |
155 | sym::write_bytes => { | |
156 | memset_intrinsic( | |
157 | bx, | |
158 | false, | |
add651ee | 159 | fn_args.type_at(0), |
1b1a35ee XL |
160 | args[0].immediate(), |
161 | args[1].immediate(), | |
162 | args[2].immediate(), | |
163 | ); | |
164 | return; | |
165 | } | |
166 | ||
167 | sym::volatile_copy_nonoverlapping_memory => { | |
168 | copy_intrinsic( | |
169 | bx, | |
170 | false, | |
171 | true, | |
add651ee | 172 | fn_args.type_at(0), |
1b1a35ee XL |
173 | args[0].immediate(), |
174 | args[1].immediate(), | |
175 | args[2].immediate(), | |
176 | ); | |
177 | return; | |
178 | } | |
179 | sym::volatile_copy_memory => { | |
180 | copy_intrinsic( | |
181 | bx, | |
182 | true, | |
183 | true, | |
add651ee | 184 | fn_args.type_at(0), |
1b1a35ee XL |
185 | args[0].immediate(), |
186 | args[1].immediate(), | |
187 | args[2].immediate(), | |
188 | ); | |
189 | return; | |
190 | } | |
191 | sym::volatile_set_memory => { | |
192 | memset_intrinsic( | |
193 | bx, | |
194 | true, | |
add651ee | 195 | fn_args.type_at(0), |
1b1a35ee XL |
196 | args[0].immediate(), |
197 | args[1].immediate(), | |
198 | args[2].immediate(), | |
199 | ); | |
200 | return; | |
201 | } | |
202 | sym::volatile_store => { | |
203 | let dst = args[0].deref(bx.cx()); | |
204 | args[1].val.volatile_store(bx, dst); | |
205 | return; | |
206 | } | |
207 | sym::unaligned_volatile_store => { | |
208 | let dst = args[0].deref(bx.cx()); | |
209 | args[1].val.unaligned_volatile_store(bx, dst); | |
210 | return; | |
211 | } | |
fe692bf9 | 212 | sym::exact_div => { |
1b1a35ee XL |
213 | let ty = arg_tys[0]; |
214 | match int_type_width_signed(ty, bx.tcx()) { | |
fe692bf9 FG |
215 | Some((_width, signed)) => { |
216 | if signed { | |
217 | bx.exactsdiv(args[0].immediate(), args[1].immediate()) | |
218 | } else { | |
219 | bx.exactudiv(args[0].immediate(), args[1].immediate()) | |
1b1a35ee | 220 | } |
ed00b5ec | 221 | } |
1b1a35ee | 222 | None => { |
ed00b5ec FG |
223 | bx.tcx().sess.emit_err(InvalidMonomorphization::BasicIntegerType { |
224 | span, | |
225 | name, | |
226 | ty, | |
227 | }); | |
1b1a35ee XL |
228 | return; |
229 | } | |
230 | } | |
231 | } | |
232 | sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => { | |
233 | match float_type_width(arg_tys[0]) { | |
234 | Some(_width) => match name { | |
235 | sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()), | |
236 | sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()), | |
237 | sym::fmul_fast => bx.fmul_fast(args[0].immediate(), args[1].immediate()), | |
238 | sym::fdiv_fast => bx.fdiv_fast(args[0].immediate(), args[1].immediate()), | |
239 | sym::frem_fast => bx.frem_fast(args[0].immediate(), args[1].immediate()), | |
240 | _ => bug!(), | |
241 | }, | |
242 | None => { | |
ed00b5ec FG |
243 | bx.tcx().sess.emit_err(InvalidMonomorphization::BasicFloatType { |
244 | span, | |
245 | name, | |
246 | ty: arg_tys[0], | |
247 | }); | |
1b1a35ee XL |
248 | return; |
249 | } | |
250 | } | |
251 | } | |
252 | ||
253 | sym::float_to_int_unchecked => { | |
254 | if float_type_width(arg_tys[0]).is_none() { | |
ed00b5ec FG |
255 | bx.tcx().sess.emit_err(InvalidMonomorphization::FloatToIntUnchecked { |
256 | span, | |
257 | ty: arg_tys[0], | |
258 | }); | |
1b1a35ee XL |
259 | return; |
260 | } | |
5e7ed085 | 261 | let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else { |
ed00b5ec FG |
262 | bx.tcx().sess.emit_err(InvalidMonomorphization::FloatToIntUnchecked { |
263 | span, | |
264 | ty: ret_ty, | |
265 | }); | |
5e7ed085 | 266 | return; |
1b1a35ee XL |
267 | }; |
268 | if signed { | |
269 | bx.fptosi(args[0].immediate(), llret_ty) | |
270 | } else { | |
271 | bx.fptoui(args[0].immediate(), llret_ty) | |
272 | } | |
273 | } | |
274 | ||
275 | sym::discriminant_value => { | |
276 | if ret_ty.is_integral() { | |
277 | args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty) | |
278 | } else { | |
279 | span_bug!(span, "Invalid discriminant type for `{:?}`", arg_tys[0]) | |
280 | } | |
281 | } | |
282 | ||
5099ac24 FG |
283 | sym::const_allocate => { |
284 | // returns a null pointer at runtime. | |
add651ee | 285 | bx.const_null(bx.type_ptr()) |
5099ac24 FG |
286 | } |
287 | ||
288 | sym::const_deallocate => { | |
289 | // nop at runtime. | |
290 | return; | |
291 | } | |
292 | ||
1b1a35ee | 293 | // This requires that atomic intrinsics follow a specific naming pattern: |
064997fb FG |
294 | // "atomic_<operation>[_<ordering>]" |
295 | name if let Some(atomic) = name_str.strip_prefix("atomic_") => { | |
1b1a35ee XL |
296 | use crate::common::AtomicOrdering::*; |
297 | use crate::common::{AtomicRmwBinOp, SynchronizationScope}; | |
298 | ||
064997fb | 299 | let Some((instruction, ordering)) = atomic.split_once('_') else { |
9c376795 | 300 | bx.sess().emit_fatal(errors::MissingMemoryOrdering); |
064997fb | 301 | }; |
1b1a35ee | 302 | |
064997fb FG |
303 | let parse_ordering = |bx: &Bx, s| match s { |
304 | "unordered" => Unordered, | |
305 | "relaxed" => Relaxed, | |
306 | "acquire" => Acquire, | |
307 | "release" => Release, | |
308 | "acqrel" => AcquireRelease, | |
309 | "seqcst" => SequentiallyConsistent, | |
9c376795 | 310 | _ => bx.sess().emit_fatal(errors::UnknownAtomicOrdering), |
1b1a35ee XL |
311 | }; |
312 | ||
313 | let invalid_monomorphization = |ty| { | |
ed00b5ec FG |
314 | bx.tcx().sess.emit_err(InvalidMonomorphization::BasicIntegerType { |
315 | span, | |
316 | name, | |
317 | ty, | |
318 | }); | |
1b1a35ee XL |
319 | }; |
320 | ||
064997fb | 321 | match instruction { |
1b1a35ee | 322 | "cxchg" | "cxchgweak" => { |
064997fb | 323 | let Some((success, failure)) = ordering.split_once('_') else { |
9c376795 | 324 | bx.sess().emit_fatal(errors::AtomicCompareExchange); |
064997fb | 325 | }; |
add651ee | 326 | let ty = fn_args.type_at(0); |
fc512014 | 327 | if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { |
064997fb | 328 | let weak = instruction == "cxchgweak"; |
add651ee | 329 | let dst = args[0].immediate(); |
fc512014 XL |
330 | let mut cmp = args[1].immediate(); |
331 | let mut src = args[2].immediate(); | |
332 | if ty.is_unsafe_ptr() { | |
333 | // Some platforms do not support atomic operations on pointers, | |
334 | // so we cast to integer first. | |
fc512014 XL |
335 | cmp = bx.ptrtoint(cmp, bx.type_isize()); |
336 | src = bx.ptrtoint(src, bx.type_isize()); | |
337 | } | |
ed00b5ec FG |
338 | let pair = bx.atomic_cmpxchg( |
339 | dst, | |
340 | cmp, | |
341 | src, | |
342 | parse_ordering(bx, success), | |
343 | parse_ordering(bx, failure), | |
344 | weak, | |
345 | ); | |
1b1a35ee XL |
346 | let val = bx.extract_value(pair, 0); |
347 | let success = bx.extract_value(pair, 1); | |
348 | let val = bx.from_immediate(val); | |
349 | let success = bx.from_immediate(success); | |
350 | ||
351 | let dest = result.project_field(bx, 0); | |
352 | bx.store(val, dest.llval, dest.align); | |
353 | let dest = result.project_field(bx, 1); | |
354 | bx.store(success, dest.llval, dest.align); | |
355 | return; | |
356 | } else { | |
357 | return invalid_monomorphization(ty); | |
358 | } | |
359 | } | |
360 | ||
361 | "load" => { | |
add651ee | 362 | let ty = fn_args.type_at(0); |
fc512014 XL |
363 | if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { |
364 | let layout = bx.layout_of(ty); | |
365 | let size = layout.size; | |
add651ee | 366 | let source = args[0].immediate(); |
fc512014 XL |
367 | if ty.is_unsafe_ptr() { |
368 | // Some platforms do not support atomic operations on pointers, | |
369 | // so we cast to integer first... | |
136023e0 | 370 | let llty = bx.type_isize(); |
ed00b5ec FG |
371 | let result = bx.atomic_load( |
372 | llty, | |
373 | source, | |
374 | parse_ordering(bx, ordering), | |
375 | size, | |
376 | ); | |
fc512014 XL |
377 | // ... and then cast the result back to a pointer |
378 | bx.inttoptr(result, bx.backend_type(layout)) | |
379 | } else { | |
ed00b5ec FG |
380 | bx.atomic_load( |
381 | bx.backend_type(layout), | |
382 | source, | |
383 | parse_ordering(bx, ordering), | |
384 | size, | |
385 | ) | |
fc512014 | 386 | } |
1b1a35ee XL |
387 | } else { |
388 | return invalid_monomorphization(ty); | |
389 | } | |
390 | } | |
391 | ||
392 | "store" => { | |
add651ee | 393 | let ty = fn_args.type_at(0); |
fc512014 | 394 | if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { |
1b1a35ee | 395 | let size = bx.layout_of(ty).size; |
fc512014 | 396 | let mut val = args[1].immediate(); |
add651ee | 397 | let ptr = args[0].immediate(); |
fc512014 XL |
398 | if ty.is_unsafe_ptr() { |
399 | // Some platforms do not support atomic operations on pointers, | |
400 | // so we cast to integer first. | |
fc512014 XL |
401 | val = bx.ptrtoint(val, bx.type_isize()); |
402 | } | |
064997fb | 403 | bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size); |
1b1a35ee XL |
404 | return; |
405 | } else { | |
406 | return invalid_monomorphization(ty); | |
407 | } | |
408 | } | |
409 | ||
410 | "fence" => { | |
ed00b5ec FG |
411 | bx.atomic_fence( |
412 | parse_ordering(bx, ordering), | |
413 | SynchronizationScope::CrossThread, | |
414 | ); | |
1b1a35ee XL |
415 | return; |
416 | } | |
417 | ||
418 | "singlethreadfence" => { | |
ed00b5ec FG |
419 | bx.atomic_fence( |
420 | parse_ordering(bx, ordering), | |
421 | SynchronizationScope::SingleThread, | |
422 | ); | |
1b1a35ee XL |
423 | return; |
424 | } | |
425 | ||
426 | // These are all AtomicRMW ops | |
427 | op => { | |
428 | let atom_op = match op { | |
429 | "xchg" => AtomicRmwBinOp::AtomicXchg, | |
430 | "xadd" => AtomicRmwBinOp::AtomicAdd, | |
431 | "xsub" => AtomicRmwBinOp::AtomicSub, | |
432 | "and" => AtomicRmwBinOp::AtomicAnd, | |
433 | "nand" => AtomicRmwBinOp::AtomicNand, | |
434 | "or" => AtomicRmwBinOp::AtomicOr, | |
435 | "xor" => AtomicRmwBinOp::AtomicXor, | |
436 | "max" => AtomicRmwBinOp::AtomicMax, | |
437 | "min" => AtomicRmwBinOp::AtomicMin, | |
438 | "umax" => AtomicRmwBinOp::AtomicUMax, | |
439 | "umin" => AtomicRmwBinOp::AtomicUMin, | |
9c376795 | 440 | _ => bx.sess().emit_fatal(errors::UnknownAtomicOperation), |
1b1a35ee XL |
441 | }; |
442 | ||
add651ee | 443 | let ty = fn_args.type_at(0); |
064997fb | 444 | if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { |
add651ee | 445 | let ptr = args[0].immediate(); |
fc512014 XL |
446 | let mut val = args[1].immediate(); |
447 | if ty.is_unsafe_ptr() { | |
448 | // Some platforms do not support atomic operations on pointers, | |
449 | // so we cast to integer first. | |
fc512014 XL |
450 | val = bx.ptrtoint(val, bx.type_isize()); |
451 | } | |
064997fb | 452 | bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering)) |
1b1a35ee XL |
453 | } else { |
454 | return invalid_monomorphization(ty); | |
455 | } | |
456 | } | |
457 | } | |
458 | } | |
459 | ||
460 | sym::nontemporal_store => { | |
461 | let dst = args[0].deref(bx.cx()); | |
462 | args[1].val.nontemporal_store(bx, dst); | |
463 | return; | |
464 | } | |
465 | ||
f2b60f7d | 466 | sym::ptr_guaranteed_cmp => { |
1b1a35ee XL |
467 | let a = args[0].immediate(); |
468 | let b = args[1].immediate(); | |
f2b60f7d | 469 | bx.icmp(IntPredicate::IntEQ, a, b) |
1b1a35ee XL |
470 | } |
471 | ||
04454e1e | 472 | sym::ptr_offset_from | sym::ptr_offset_from_unsigned => { |
add651ee | 473 | let ty = fn_args.type_at(0); |
1b1a35ee XL |
474 | let pointee_size = bx.layout_of(ty).size; |
475 | ||
1b1a35ee XL |
476 | let a = args[0].immediate(); |
477 | let b = args[1].immediate(); | |
478 | let a = bx.ptrtoint(a, bx.type_isize()); | |
479 | let b = bx.ptrtoint(b, bx.type_isize()); | |
1b1a35ee | 480 | let pointee_size = bx.const_usize(pointee_size.bytes()); |
04454e1e FG |
481 | if name == sym::ptr_offset_from { |
482 | // This is the same sequence that Clang emits for pointer subtraction. | |
483 | // It can be neither `nsw` nor `nuw` because the input is treated as | |
484 | // unsigned but then the output is treated as signed, so neither works. | |
485 | let d = bx.sub(a, b); | |
486 | // this is where the signed magic happens (notice the `s` in `exactsdiv`) | |
487 | bx.exactsdiv(d, pointee_size) | |
488 | } else { | |
489 | // The `_unsigned` version knows the relative ordering of the pointers, | |
490 | // so can use `sub nuw` and `udiv exact` instead of dealing in signed. | |
491 | let d = bx.unchecked_usub(a, b); | |
492 | bx.exactudiv(d, pointee_size) | |
493 | } | |
1b1a35ee XL |
494 | } |
495 | ||
496 | _ => { | |
497 | // Need to use backend-specific things in the implementation. | |
498 | bx.codegen_intrinsic_call(instance, fn_abi, args, llresult, span); | |
499 | return; | |
500 | } | |
501 | }; | |
502 | ||
503 | if !fn_abi.ret.is_ignore() { | |
781aab86 | 504 | if let PassMode::Cast { .. } = &fn_abi.ret.mode { |
add651ee | 505 | bx.store(llval, result.llval, result.align); |
1b1a35ee XL |
506 | } else { |
507 | OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) | |
508 | .val | |
509 | .store(bx, result); | |
510 | } | |
511 | } | |
512 | } | |
513 | } | |
514 | ||
515 | // Returns the width of an int Ty, and if it's signed or not | |
516 | // Returns None if the type is not an integer | |
517 | // FIXME: there’s multiple of this functions, investigate using some of the already existing | |
518 | // stuffs. | |
519 | fn int_type_width_signed(ty: Ty<'_>, tcx: TyCtxt<'_>) -> Option<(u64, bool)> { | |
520 | match ty.kind() { | |
29967ef6 XL |
521 | ty::Int(t) => { |
522 | Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), true)) | |
523 | } | |
524 | ty::Uint(t) => { | |
525 | Some((t.bit_width().unwrap_or(u64::from(tcx.sess.target.pointer_width)), false)) | |
526 | } | |
1b1a35ee XL |
527 | _ => None, |
528 | } | |
529 | } | |
530 | ||
531 | // Returns the width of a float Ty | |
532 | // Returns None if the type is not a float | |
533 | fn float_type_width(ty: Ty<'_>) -> Option<u64> { | |
534 | match ty.kind() { | |
535 | ty::Float(t) => Some(t.bit_width()), | |
536 | _ => None, | |
537 | } | |
538 | } |