]> git.proxmox.com Git - rustc.git/blob - src/librustc_codegen_llvm/intrinsic.rs
New upstream version 1.44.1+dfsg1
[rustc.git] / src / librustc_codegen_llvm / intrinsic.rs
1 use crate::abi::{Abi, FnAbi, LlvmType, PassMode};
2 use crate::builder::Builder;
3 use crate::context::CodegenCx;
4 use crate::llvm;
5 use crate::type_::Type;
6 use crate::type_of::LayoutLlvmExt;
7 use crate::va_arg::emit_va_arg;
8 use crate::value::Value;
9
10 use rustc_ast::ast;
11 use rustc_codegen_ssa::base::{compare_simd_types, to_immediate, wants_msvc_seh};
12 use rustc_codegen_ssa::common::span_invalid_monomorphization_error;
13 use rustc_codegen_ssa::common::TypeKind;
14 use rustc_codegen_ssa::glue;
15 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
16 use rustc_codegen_ssa::mir::place::PlaceRef;
17 use rustc_codegen_ssa::traits::*;
18 use rustc_codegen_ssa::MemFlags;
19 use rustc_hir as hir;
20 use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt};
21 use rustc_middle::ty::{self, Ty};
22 use rustc_middle::{bug, span_bug};
23 use rustc_span::Span;
24 use rustc_target::abi::{self, HasDataLayout, LayoutOf, Primitive};
25
26 use std::cmp::Ordering;
27 use std::iter;
28
29 fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> {
30 let llvm_name = match name {
31 "sqrtf32" => "llvm.sqrt.f32",
32 "sqrtf64" => "llvm.sqrt.f64",
33 "powif32" => "llvm.powi.f32",
34 "powif64" => "llvm.powi.f64",
35 "sinf32" => "llvm.sin.f32",
36 "sinf64" => "llvm.sin.f64",
37 "cosf32" => "llvm.cos.f32",
38 "cosf64" => "llvm.cos.f64",
39 "powf32" => "llvm.pow.f32",
40 "powf64" => "llvm.pow.f64",
41 "expf32" => "llvm.exp.f32",
42 "expf64" => "llvm.exp.f64",
43 "exp2f32" => "llvm.exp2.f32",
44 "exp2f64" => "llvm.exp2.f64",
45 "logf32" => "llvm.log.f32",
46 "logf64" => "llvm.log.f64",
47 "log10f32" => "llvm.log10.f32",
48 "log10f64" => "llvm.log10.f64",
49 "log2f32" => "llvm.log2.f32",
50 "log2f64" => "llvm.log2.f64",
51 "fmaf32" => "llvm.fma.f32",
52 "fmaf64" => "llvm.fma.f64",
53 "fabsf32" => "llvm.fabs.f32",
54 "fabsf64" => "llvm.fabs.f64",
55 "minnumf32" => "llvm.minnum.f32",
56 "minnumf64" => "llvm.minnum.f64",
57 "maxnumf32" => "llvm.maxnum.f32",
58 "maxnumf64" => "llvm.maxnum.f64",
59 "copysignf32" => "llvm.copysign.f32",
60 "copysignf64" => "llvm.copysign.f64",
61 "floorf32" => "llvm.floor.f32",
62 "floorf64" => "llvm.floor.f64",
63 "ceilf32" => "llvm.ceil.f32",
64 "ceilf64" => "llvm.ceil.f64",
65 "truncf32" => "llvm.trunc.f32",
66 "truncf64" => "llvm.trunc.f64",
67 "rintf32" => "llvm.rint.f32",
68 "rintf64" => "llvm.rint.f64",
69 "nearbyintf32" => "llvm.nearbyint.f32",
70 "nearbyintf64" => "llvm.nearbyint.f64",
71 "roundf32" => "llvm.round.f32",
72 "roundf64" => "llvm.round.f64",
73 "assume" => "llvm.assume",
74 "abort" => "llvm.trap",
75 _ => return None,
76 };
77 Some(cx.get_intrinsic(&llvm_name))
78 }
79
80 impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
81 fn codegen_intrinsic_call(
82 &mut self,
83 instance: ty::Instance<'tcx>,
84 fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
85 args: &[OperandRef<'tcx, &'ll Value>],
86 llresult: &'ll Value,
87 span: Span,
88 ) {
89 let tcx = self.tcx;
90 let callee_ty = instance.monomorphic_ty(tcx);
91
92 let (def_id, substs) = match callee_ty.kind {
93 ty::FnDef(def_id, substs) => (def_id, substs),
94 _ => bug!("expected fn item type, found {}", callee_ty),
95 };
96
97 let sig = callee_ty.fn_sig(tcx);
98 let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
99 let arg_tys = sig.inputs();
100 let ret_ty = sig.output();
101 let name = &*tcx.item_name(def_id).as_str();
102
103 let llret_ty = self.layout_of(ret_ty).llvm_type(self);
104 let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
105
106 let simple = get_simple_intrinsic(self, name);
107 let llval = match name {
108 _ if simple.is_some() => self.call(
109 simple.unwrap(),
110 &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
111 None,
112 ),
113 "unreachable" => {
114 return;
115 }
116 "likely" => {
117 let expect = self.get_intrinsic(&("llvm.expect.i1"));
118 self.call(expect, &[args[0].immediate(), self.const_bool(true)], None)
119 }
120 "unlikely" => {
121 let expect = self.get_intrinsic(&("llvm.expect.i1"));
122 self.call(expect, &[args[0].immediate(), self.const_bool(false)], None)
123 }
124 "try" => {
125 try_intrinsic(
126 self,
127 args[0].immediate(),
128 args[1].immediate(),
129 args[2].immediate(),
130 llresult,
131 );
132 return;
133 }
134 "breakpoint" => {
135 let llfn = self.get_intrinsic(&("llvm.debugtrap"));
136 self.call(llfn, &[], None)
137 }
138 "va_start" => self.va_start(args[0].immediate()),
139 "va_end" => self.va_end(args[0].immediate()),
140 "va_copy" => {
141 let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy"));
142 self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)
143 }
144 "va_arg" => {
145 match fn_abi.ret.layout.abi {
146 abi::Abi::Scalar(ref scalar) => {
147 match scalar.value {
148 Primitive::Int(..) => {
149 if self.cx().size_of(ret_ty).bytes() < 4 {
150 // `va_arg` should not be called on a integer type
151 // less than 4 bytes in length. If it is, promote
152 // the integer to a `i32` and truncate the result
153 // back to the smaller type.
154 let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
155 self.trunc(promoted_result, llret_ty)
156 } else {
157 emit_va_arg(self, args[0], ret_ty)
158 }
159 }
160 Primitive::F64 | Primitive::Pointer => {
161 emit_va_arg(self, args[0], ret_ty)
162 }
163 // `va_arg` should never be used with the return type f32.
164 Primitive::F32 => bug!("the va_arg intrinsic does not work with `f32`"),
165 }
166 }
167 _ => bug!("the va_arg intrinsic does not work with non-scalar types"),
168 }
169 }
170 "size_of_val" => {
171 let tp_ty = substs.type_at(0);
172 if let OperandValue::Pair(_, meta) = args[0].val {
173 let (llsize, _) = glue::size_and_align_of_dst(self, tp_ty, Some(meta));
174 llsize
175 } else {
176 self.const_usize(self.size_of(tp_ty).bytes())
177 }
178 }
179 "min_align_of_val" => {
180 let tp_ty = substs.type_at(0);
181 if let OperandValue::Pair(_, meta) = args[0].val {
182 let (_, llalign) = glue::size_and_align_of_dst(self, tp_ty, Some(meta));
183 llalign
184 } else {
185 self.const_usize(self.align_of(tp_ty).bytes())
186 }
187 }
188 "size_of" | "pref_align_of" | "min_align_of" | "needs_drop" | "type_id"
189 | "type_name" => {
190 let ty_name = self
191 .tcx
192 .const_eval_instance(ty::ParamEnv::reveal_all(), instance, None)
193 .unwrap();
194 OperandRef::from_const(self, ty_name, ret_ty).immediate_or_packed_pair(self)
195 }
196 // Effectively no-op
197 "forget" => {
198 return;
199 }
200 "offset" => {
201 let ptr = args[0].immediate();
202 let offset = args[1].immediate();
203 self.inbounds_gep(ptr, &[offset])
204 }
205 "arith_offset" => {
206 let ptr = args[0].immediate();
207 let offset = args[1].immediate();
208 self.gep(ptr, &[offset])
209 }
210
211 "copy_nonoverlapping" => {
212 copy_intrinsic(
213 self,
214 false,
215 false,
216 substs.type_at(0),
217 args[1].immediate(),
218 args[0].immediate(),
219 args[2].immediate(),
220 );
221 return;
222 }
223 "copy" => {
224 copy_intrinsic(
225 self,
226 true,
227 false,
228 substs.type_at(0),
229 args[1].immediate(),
230 args[0].immediate(),
231 args[2].immediate(),
232 );
233 return;
234 }
235 "write_bytes" => {
236 memset_intrinsic(
237 self,
238 false,
239 substs.type_at(0),
240 args[0].immediate(),
241 args[1].immediate(),
242 args[2].immediate(),
243 );
244 return;
245 }
246
247 "volatile_copy_nonoverlapping_memory" => {
248 copy_intrinsic(
249 self,
250 false,
251 true,
252 substs.type_at(0),
253 args[0].immediate(),
254 args[1].immediate(),
255 args[2].immediate(),
256 );
257 return;
258 }
259 "volatile_copy_memory" => {
260 copy_intrinsic(
261 self,
262 true,
263 true,
264 substs.type_at(0),
265 args[0].immediate(),
266 args[1].immediate(),
267 args[2].immediate(),
268 );
269 return;
270 }
271 "volatile_set_memory" => {
272 memset_intrinsic(
273 self,
274 true,
275 substs.type_at(0),
276 args[0].immediate(),
277 args[1].immediate(),
278 args[2].immediate(),
279 );
280 return;
281 }
282 "volatile_load" | "unaligned_volatile_load" => {
283 let tp_ty = substs.type_at(0);
284 let mut ptr = args[0].immediate();
285 if let PassMode::Cast(ty) = fn_abi.ret.mode {
286 ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self)));
287 }
288 let load = self.volatile_load(ptr);
289 let align = if name == "unaligned_volatile_load" {
290 1
291 } else {
292 self.align_of(tp_ty).bytes() as u32
293 };
294 unsafe {
295 llvm::LLVMSetAlignment(load, align);
296 }
297 to_immediate(self, load, self.layout_of(tp_ty))
298 }
299 "volatile_store" => {
300 let dst = args[0].deref(self.cx());
301 args[1].val.volatile_store(self, dst);
302 return;
303 }
304 "unaligned_volatile_store" => {
305 let dst = args[0].deref(self.cx());
306 args[1].val.unaligned_volatile_store(self, dst);
307 return;
308 }
309 "prefetch_read_data"
310 | "prefetch_write_data"
311 | "prefetch_read_instruction"
312 | "prefetch_write_instruction" => {
313 let expect = self.get_intrinsic(&("llvm.prefetch"));
314 let (rw, cache_type) = match name {
315 "prefetch_read_data" => (0, 1),
316 "prefetch_write_data" => (1, 1),
317 "prefetch_read_instruction" => (0, 0),
318 "prefetch_write_instruction" => (1, 0),
319 _ => bug!(),
320 };
321 self.call(
322 expect,
323 &[
324 args[0].immediate(),
325 self.const_i32(rw),
326 args[1].immediate(),
327 self.const_i32(cache_type),
328 ],
329 None,
330 )
331 }
332 "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap"
333 | "bitreverse" | "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow"
334 | "wrapping_add" | "wrapping_sub" | "wrapping_mul" | "unchecked_div"
335 | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "unchecked_add"
336 | "unchecked_sub" | "unchecked_mul" | "exact_div" | "rotate_left" | "rotate_right"
337 | "saturating_add" | "saturating_sub" => {
338 let ty = arg_tys[0];
339 match int_type_width_signed(ty, self) {
340 Some((width, signed)) => match name {
341 "ctlz" | "cttz" => {
342 let y = self.const_bool(false);
343 let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width));
344 self.call(llfn, &[args[0].immediate(), y], None)
345 }
346 "ctlz_nonzero" | "cttz_nonzero" => {
347 let y = self.const_bool(true);
348 let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
349 let llfn = self.get_intrinsic(llvm_name);
350 self.call(llfn, &[args[0].immediate(), y], None)
351 }
352 "ctpop" => self.call(
353 self.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
354 &[args[0].immediate()],
355 None,
356 ),
357 "bswap" => {
358 if width == 8 {
359 args[0].immediate() // byte swap a u8/i8 is just a no-op
360 } else {
361 self.call(
362 self.get_intrinsic(&format!("llvm.bswap.i{}", width)),
363 &[args[0].immediate()],
364 None,
365 )
366 }
367 }
368 "bitreverse" => self.call(
369 self.get_intrinsic(&format!("llvm.bitreverse.i{}", width)),
370 &[args[0].immediate()],
371 None,
372 ),
373 "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
374 let intrinsic = format!(
375 "llvm.{}{}.with.overflow.i{}",
376 if signed { 's' } else { 'u' },
377 &name[..3],
378 width
379 );
380 let llfn = self.get_intrinsic(&intrinsic);
381
382 // Convert `i1` to a `bool`, and write it to the out parameter
383 let pair =
384 self.call(llfn, &[args[0].immediate(), args[1].immediate()], None);
385 let val = self.extract_value(pair, 0);
386 let overflow = self.extract_value(pair, 1);
387 let overflow = self.zext(overflow, self.type_bool());
388
389 let dest = result.project_field(self, 0);
390 self.store(val, dest.llval, dest.align);
391 let dest = result.project_field(self, 1);
392 self.store(overflow, dest.llval, dest.align);
393
394 return;
395 }
396 "wrapping_add" => self.add(args[0].immediate(), args[1].immediate()),
397 "wrapping_sub" => self.sub(args[0].immediate(), args[1].immediate()),
398 "wrapping_mul" => self.mul(args[0].immediate(), args[1].immediate()),
399 "exact_div" => {
400 if signed {
401 self.exactsdiv(args[0].immediate(), args[1].immediate())
402 } else {
403 self.exactudiv(args[0].immediate(), args[1].immediate())
404 }
405 }
406 "unchecked_div" => {
407 if signed {
408 self.sdiv(args[0].immediate(), args[1].immediate())
409 } else {
410 self.udiv(args[0].immediate(), args[1].immediate())
411 }
412 }
413 "unchecked_rem" => {
414 if signed {
415 self.srem(args[0].immediate(), args[1].immediate())
416 } else {
417 self.urem(args[0].immediate(), args[1].immediate())
418 }
419 }
420 "unchecked_shl" => self.shl(args[0].immediate(), args[1].immediate()),
421 "unchecked_shr" => {
422 if signed {
423 self.ashr(args[0].immediate(), args[1].immediate())
424 } else {
425 self.lshr(args[0].immediate(), args[1].immediate())
426 }
427 }
428 "unchecked_add" => {
429 if signed {
430 self.unchecked_sadd(args[0].immediate(), args[1].immediate())
431 } else {
432 self.unchecked_uadd(args[0].immediate(), args[1].immediate())
433 }
434 }
435 "unchecked_sub" => {
436 if signed {
437 self.unchecked_ssub(args[0].immediate(), args[1].immediate())
438 } else {
439 self.unchecked_usub(args[0].immediate(), args[1].immediate())
440 }
441 }
442 "unchecked_mul" => {
443 if signed {
444 self.unchecked_smul(args[0].immediate(), args[1].immediate())
445 } else {
446 self.unchecked_umul(args[0].immediate(), args[1].immediate())
447 }
448 }
449 "rotate_left" | "rotate_right" => {
450 let is_left = name == "rotate_left";
451 let val = args[0].immediate();
452 let raw_shift = args[1].immediate();
453 // rotate = funnel shift with first two args the same
454 let llvm_name =
455 &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
456 let llfn = self.get_intrinsic(llvm_name);
457 self.call(llfn, &[val, val, raw_shift], None)
458 }
459 "saturating_add" | "saturating_sub" => {
460 let is_add = name == "saturating_add";
461 let lhs = args[0].immediate();
462 let rhs = args[1].immediate();
463 let llvm_name = &format!(
464 "llvm.{}{}.sat.i{}",
465 if signed { 's' } else { 'u' },
466 if is_add { "add" } else { "sub" },
467 width
468 );
469 let llfn = self.get_intrinsic(llvm_name);
470 self.call(llfn, &[lhs, rhs], None)
471 }
472 _ => bug!(),
473 },
474 None => {
475 span_invalid_monomorphization_error(
476 tcx.sess,
477 span,
478 &format!(
479 "invalid monomorphization of `{}` intrinsic: \
480 expected basic integer type, found `{}`",
481 name, ty
482 ),
483 );
484 return;
485 }
486 }
487 }
488 "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
489 match float_type_width(arg_tys[0]) {
490 Some(_width) => match name {
491 "fadd_fast" => self.fadd_fast(args[0].immediate(), args[1].immediate()),
492 "fsub_fast" => self.fsub_fast(args[0].immediate(), args[1].immediate()),
493 "fmul_fast" => self.fmul_fast(args[0].immediate(), args[1].immediate()),
494 "fdiv_fast" => self.fdiv_fast(args[0].immediate(), args[1].immediate()),
495 "frem_fast" => self.frem_fast(args[0].immediate(), args[1].immediate()),
496 _ => bug!(),
497 },
498 None => {
499 span_invalid_monomorphization_error(
500 tcx.sess,
501 span,
502 &format!(
503 "invalid monomorphization of `{}` intrinsic: \
504 expected basic float type, found `{}`",
505 name, arg_tys[0]
506 ),
507 );
508 return;
509 }
510 }
511 }
512
513 "float_to_int_unchecked" => {
514 if float_type_width(arg_tys[0]).is_none() {
515 span_invalid_monomorphization_error(
516 tcx.sess,
517 span,
518 &format!(
519 "invalid monomorphization of `float_to_int_unchecked` \
520 intrinsic: expected basic float type, \
521 found `{}`",
522 arg_tys[0]
523 ),
524 );
525 return;
526 }
527 match int_type_width_signed(ret_ty, self.cx) {
528 Some((width, signed)) => {
529 if signed {
530 self.fptosi(args[0].immediate(), self.cx.type_ix(width))
531 } else {
532 self.fptoui(args[0].immediate(), self.cx.type_ix(width))
533 }
534 }
535 None => {
536 span_invalid_monomorphization_error(
537 tcx.sess,
538 span,
539 &format!(
540 "invalid monomorphization of `float_to_int_unchecked` \
541 intrinsic: expected basic integer type, \
542 found `{}`",
543 ret_ty
544 ),
545 );
546 return;
547 }
548 }
549 }
550
551 "discriminant_value" => args[0].deref(self.cx()).codegen_get_discr(self, ret_ty),
552
553 name if name.starts_with("simd_") => {
554 match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
555 Ok(llval) => llval,
556 Err(()) => return,
557 }
558 }
559 // This requires that atomic intrinsics follow a specific naming pattern:
560 // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
561 name if name.starts_with("atomic_") => {
562 use rustc_codegen_ssa::common::AtomicOrdering::*;
563 use rustc_codegen_ssa::common::{AtomicRmwBinOp, SynchronizationScope};
564
565 let split: Vec<&str> = name.split('_').collect();
566
567 let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
568 let (order, failorder) = match split.len() {
569 2 => (SequentiallyConsistent, SequentiallyConsistent),
570 3 => match split[2] {
571 "unordered" => (Unordered, Unordered),
572 "relaxed" => (Monotonic, Monotonic),
573 "acq" => (Acquire, Acquire),
574 "rel" => (Release, Monotonic),
575 "acqrel" => (AcquireRelease, Acquire),
576 "failrelaxed" if is_cxchg => (SequentiallyConsistent, Monotonic),
577 "failacq" if is_cxchg => (SequentiallyConsistent, Acquire),
578 _ => self.sess().fatal("unknown ordering in atomic intrinsic"),
579 },
580 4 => match (split[2], split[3]) {
581 ("acq", "failrelaxed") if is_cxchg => (Acquire, Monotonic),
582 ("acqrel", "failrelaxed") if is_cxchg => (AcquireRelease, Monotonic),
583 _ => self.sess().fatal("unknown ordering in atomic intrinsic"),
584 },
585 _ => self.sess().fatal("Atomic intrinsic not in correct format"),
586 };
587
588 let invalid_monomorphization = |ty| {
589 span_invalid_monomorphization_error(
590 tcx.sess,
591 span,
592 &format!(
593 "invalid monomorphization of `{}` intrinsic: \
594 expected basic integer type, found `{}`",
595 name, ty
596 ),
597 );
598 };
599
600 match split[1] {
601 "cxchg" | "cxchgweak" => {
602 let ty = substs.type_at(0);
603 if int_type_width_signed(ty, self).is_some() {
604 let weak = split[1] == "cxchgweak";
605 let pair = self.atomic_cmpxchg(
606 args[0].immediate(),
607 args[1].immediate(),
608 args[2].immediate(),
609 order,
610 failorder,
611 weak,
612 );
613 let val = self.extract_value(pair, 0);
614 let success = self.extract_value(pair, 1);
615 let success = self.zext(success, self.type_bool());
616
617 let dest = result.project_field(self, 0);
618 self.store(val, dest.llval, dest.align);
619 let dest = result.project_field(self, 1);
620 self.store(success, dest.llval, dest.align);
621 return;
622 } else {
623 return invalid_monomorphization(ty);
624 }
625 }
626
627 "load" => {
628 let ty = substs.type_at(0);
629 if int_type_width_signed(ty, self).is_some() {
630 let size = self.size_of(ty);
631 self.atomic_load(args[0].immediate(), order, size)
632 } else {
633 return invalid_monomorphization(ty);
634 }
635 }
636
637 "store" => {
638 let ty = substs.type_at(0);
639 if int_type_width_signed(ty, self).is_some() {
640 let size = self.size_of(ty);
641 self.atomic_store(
642 args[1].immediate(),
643 args[0].immediate(),
644 order,
645 size,
646 );
647 return;
648 } else {
649 return invalid_monomorphization(ty);
650 }
651 }
652
653 "fence" => {
654 self.atomic_fence(order, SynchronizationScope::CrossThread);
655 return;
656 }
657
658 "singlethreadfence" => {
659 self.atomic_fence(order, SynchronizationScope::SingleThread);
660 return;
661 }
662
663 // These are all AtomicRMW ops
664 op => {
665 let atom_op = match op {
666 "xchg" => AtomicRmwBinOp::AtomicXchg,
667 "xadd" => AtomicRmwBinOp::AtomicAdd,
668 "xsub" => AtomicRmwBinOp::AtomicSub,
669 "and" => AtomicRmwBinOp::AtomicAnd,
670 "nand" => AtomicRmwBinOp::AtomicNand,
671 "or" => AtomicRmwBinOp::AtomicOr,
672 "xor" => AtomicRmwBinOp::AtomicXor,
673 "max" => AtomicRmwBinOp::AtomicMax,
674 "min" => AtomicRmwBinOp::AtomicMin,
675 "umax" => AtomicRmwBinOp::AtomicUMax,
676 "umin" => AtomicRmwBinOp::AtomicUMin,
677 _ => self.sess().fatal("unknown atomic operation"),
678 };
679
680 let ty = substs.type_at(0);
681 if int_type_width_signed(ty, self).is_some() {
682 self.atomic_rmw(
683 atom_op,
684 args[0].immediate(),
685 args[1].immediate(),
686 order,
687 )
688 } else {
689 return invalid_monomorphization(ty);
690 }
691 }
692 }
693 }
694
695 "nontemporal_store" => {
696 let dst = args[0].deref(self.cx());
697 args[1].val.nontemporal_store(self, dst);
698 return;
699 }
700
701 "ptr_offset_from" => {
702 let ty = substs.type_at(0);
703 let pointee_size = self.size_of(ty);
704
705 // This is the same sequence that Clang emits for pointer subtraction.
706 // It can be neither `nsw` nor `nuw` because the input is treated as
707 // unsigned but then the output is treated as signed, so neither works.
708 let a = args[0].immediate();
709 let b = args[1].immediate();
710 let a = self.ptrtoint(a, self.type_isize());
711 let b = self.ptrtoint(b, self.type_isize());
712 let d = self.sub(a, b);
713 let pointee_size = self.const_usize(pointee_size.bytes());
714 // this is where the signed magic happens (notice the `s` in `exactsdiv`)
715 self.exactsdiv(d, pointee_size)
716 }
717
718 _ => bug!("unknown intrinsic '{}'", name),
719 };
720
721 if !fn_abi.ret.is_ignore() {
722 if let PassMode::Cast(ty) = fn_abi.ret.mode {
723 let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
724 let ptr = self.pointercast(result.llval, ptr_llty);
725 self.store(llval, ptr, result.align);
726 } else {
727 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
728 .val
729 .store(self, result);
730 }
731 }
732 }
733
734 fn abort(&mut self) {
735 let fnname = self.get_intrinsic(&("llvm.trap"));
736 self.call(fnname, &[], None);
737 }
738
739 fn assume(&mut self, val: Self::Value) {
740 let assume_intrinsic = self.get_intrinsic("llvm.assume");
741 self.call(assume_intrinsic, &[val], None);
742 }
743
744 fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
745 let expect = self.get_intrinsic(&"llvm.expect.i1");
746 self.call(expect, &[cond, self.const_bool(expected)], None)
747 }
748
749 fn sideeffect(&mut self) {
750 if self.tcx.sess.opts.debugging_opts.insert_sideeffect {
751 let fnname = self.get_intrinsic(&("llvm.sideeffect"));
752 self.call(fnname, &[], None);
753 }
754 }
755
756 fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value {
757 let intrinsic = self.cx().get_intrinsic("llvm.va_start");
758 self.call(intrinsic, &[va_list], None)
759 }
760
761 fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value {
762 let intrinsic = self.cx().get_intrinsic("llvm.va_end");
763 self.call(intrinsic, &[va_list], None)
764 }
765 }
766
767 fn copy_intrinsic(
768 bx: &mut Builder<'a, 'll, 'tcx>,
769 allow_overlap: bool,
770 volatile: bool,
771 ty: Ty<'tcx>,
772 dst: &'ll Value,
773 src: &'ll Value,
774 count: &'ll Value,
775 ) {
776 let (size, align) = bx.size_and_align_of(ty);
777 let size = bx.mul(bx.const_usize(size.bytes()), count);
778 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
779 if allow_overlap {
780 bx.memmove(dst, align, src, align, size, flags);
781 } else {
782 bx.memcpy(dst, align, src, align, size, flags);
783 }
784 }
785
786 fn memset_intrinsic(
787 bx: &mut Builder<'a, 'll, 'tcx>,
788 volatile: bool,
789 ty: Ty<'tcx>,
790 dst: &'ll Value,
791 val: &'ll Value,
792 count: &'ll Value,
793 ) {
794 let (size, align) = bx.size_and_align_of(ty);
795 let size = bx.mul(bx.const_usize(size.bytes()), count);
796 let flags = if volatile { MemFlags::VOLATILE } else { MemFlags::empty() };
797 bx.memset(dst, val, size, align, flags);
798 }
799
800 fn try_intrinsic(
801 bx: &mut Builder<'a, 'll, 'tcx>,
802 try_func: &'ll Value,
803 data: &'ll Value,
804 catch_func: &'ll Value,
805 dest: &'ll Value,
806 ) {
807 if bx.sess().no_landing_pads() {
808 bx.call(try_func, &[data], None);
809 // Return 0 unconditionally from the intrinsic call;
810 // we can never unwind.
811 let ret_align = bx.tcx().data_layout.i32_align.abi;
812 bx.store(bx.const_i32(0), dest, ret_align);
813 } else if wants_msvc_seh(bx.sess()) {
814 codegen_msvc_try(bx, try_func, data, catch_func, dest);
815 } else {
816 codegen_gnu_try(bx, try_func, data, catch_func, dest);
817 }
818 }
819
820 // MSVC's definition of the `rust_try` function.
821 //
822 // This implementation uses the new exception handling instructions in LLVM
823 // which have support in LLVM for SEH on MSVC targets. Although these
824 // instructions are meant to work for all targets, as of the time of this
825 // writing, however, LLVM does not recommend the usage of these new instructions
826 // as the old ones are still more optimized.
827 fn codegen_msvc_try(
828 bx: &mut Builder<'a, 'll, 'tcx>,
829 try_func: &'ll Value,
830 data: &'ll Value,
831 catch_func: &'ll Value,
832 dest: &'ll Value,
833 ) {
834 let llfn = get_rust_try_fn(bx, &mut |mut bx| {
835 bx.set_personality_fn(bx.eh_personality());
836 bx.sideeffect();
837
838 let mut normal = bx.build_sibling_block("normal");
839 let mut catchswitch = bx.build_sibling_block("catchswitch");
840 let mut catchpad = bx.build_sibling_block("catchpad");
841 let mut caught = bx.build_sibling_block("caught");
842
843 let try_func = llvm::get_param(bx.llfn(), 0);
844 let data = llvm::get_param(bx.llfn(), 1);
845 let catch_func = llvm::get_param(bx.llfn(), 2);
846
847 // We're generating an IR snippet that looks like:
848 //
849 // declare i32 @rust_try(%try_func, %data, %catch_func) {
850 // %slot = alloca u8*
851 // invoke %try_func(%data) to label %normal unwind label %catchswitch
852 //
853 // normal:
854 // ret i32 0
855 //
856 // catchswitch:
857 // %cs = catchswitch within none [%catchpad] unwind to caller
858 //
859 // catchpad:
860 // %tok = catchpad within %cs [%type_descriptor, 0, %slot]
861 // %ptr = load %slot
862 // call %catch_func(%data, %ptr)
863 // catchret from %tok to label %caught
864 //
865 // caught:
866 // ret i32 1
867 // }
868 //
869 // This structure follows the basic usage of throw/try/catch in LLVM.
870 // For example, compile this C++ snippet to see what LLVM generates:
871 //
872 // #include <stdint.h>
873 //
874 // struct rust_panic {
875 // rust_panic(const rust_panic&);
876 // ~rust_panic();
877 //
878 // uint64_t x[2];
879 // };
880 //
881 // int __rust_try(
882 // void (*try_func)(void*),
883 // void *data,
884 // void (*catch_func)(void*, void*) noexcept
885 // ) {
886 // try {
887 // try_func(data);
888 // return 0;
889 // } catch(rust_panic& a) {
890 // catch_func(data, &a);
891 // return 1;
892 // }
893 // }
894 //
895 // More information can be found in libstd's seh.rs implementation.
896 let ptr_align = bx.tcx().data_layout.pointer_align.abi;
897 let slot = bx.alloca(bx.type_i8p(), ptr_align);
898 bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None);
899
900 normal.ret(bx.const_i32(0));
901
902 let cs = catchswitch.catch_switch(None, None, 1);
903 catchswitch.add_handler(cs, catchpad.llbb());
904
905 // We can't use the TypeDescriptor defined in libpanic_unwind because it
906 // might be in another DLL and the SEH encoding only supports specifying
907 // a TypeDescriptor from the current module.
908 //
909 // However this isn't an issue since the MSVC runtime uses string
910 // comparison on the type name to match TypeDescriptors rather than
911 // pointer equality.
912 //
913 // So instead we generate a new TypeDescriptor in each module that uses
914 // `try` and let the linker merge duplicate definitions in the same
915 // module.
916 //
917 // When modifying, make sure that the type_name string exactly matches
918 // the one used in src/libpanic_unwind/seh.rs.
919 let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p());
920 let type_name = bx.const_bytes(b"rust_panic\0");
921 let type_info =
922 bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false);
923 let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
924 unsafe {
925 llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
926 llvm::SetUniqueComdat(bx.llmod, tydesc);
927 llvm::LLVMSetInitializer(tydesc, type_info);
928 }
929
930 // The flag value of 8 indicates that we are catching the exception by
931 // reference instead of by value. We can't use catch by value because
932 // that requires copying the exception object, which we don't support
933 // since our exception object effectively contains a Box.
934 //
935 // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
936 let flags = bx.const_i32(8);
937 let funclet = catchpad.catch_pad(cs, &[tydesc, flags, slot]);
938 let ptr = catchpad.load(slot, ptr_align);
939 catchpad.call(catch_func, &[data, ptr], Some(&funclet));
940
941 catchpad.catch_ret(&funclet, caught.llbb());
942
943 caught.ret(bx.const_i32(1));
944 });
945
946 // Note that no invoke is used here because by definition this function
947 // can't panic (that's what it's catching).
948 let ret = bx.call(llfn, &[try_func, data, catch_func], None);
949 let i32_align = bx.tcx().data_layout.i32_align.abi;
950 bx.store(ret, dest, i32_align);
951 }
952
953 // Definition of the standard `try` function for Rust using the GNU-like model
954 // of exceptions (e.g., the normal semantics of LLVM's `landingpad` and `invoke`
955 // instructions).
956 //
957 // This codegen is a little surprising because we always call a shim
958 // function instead of inlining the call to `invoke` manually here. This is done
959 // because in LLVM we're only allowed to have one personality per function
960 // definition. The call to the `try` intrinsic is being inlined into the
961 // function calling it, and that function may already have other personality
962 // functions in play. By calling a shim we're guaranteed that our shim will have
963 // the right personality function.
964 fn codegen_gnu_try(
965 bx: &mut Builder<'a, 'll, 'tcx>,
966 try_func: &'ll Value,
967 data: &'ll Value,
968 catch_func: &'ll Value,
969 dest: &'ll Value,
970 ) {
971 let llfn = get_rust_try_fn(bx, &mut |mut bx| {
972 // Codegens the shims described above:
973 //
974 // bx:
975 // invoke %try_func(%data) normal %normal unwind %catch
976 //
977 // normal:
978 // ret 0
979 //
980 // catch:
981 // (%ptr, _) = landingpad
982 // call %catch_func(%data, %ptr)
983 // ret 1
984
985 bx.sideeffect();
986
987 let mut then = bx.build_sibling_block("then");
988 let mut catch = bx.build_sibling_block("catch");
989
990 let try_func = llvm::get_param(bx.llfn(), 0);
991 let data = llvm::get_param(bx.llfn(), 1);
992 let catch_func = llvm::get_param(bx.llfn(), 2);
993 bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None);
994 then.ret(bx.const_i32(0));
995
996 // Type indicator for the exception being thrown.
997 //
998 // The first value in this tuple is a pointer to the exception object
999 // being thrown. The second value is a "selector" indicating which of
1000 // the landing pad clauses the exception's type had been matched to.
1001 // rust_try ignores the selector.
1002 let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
1003 let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
1004 let tydesc = match bx.tcx().lang_items().eh_catch_typeinfo() {
1005 Some(tydesc) => {
1006 let tydesc = bx.get_static(tydesc);
1007 bx.bitcast(tydesc, bx.type_i8p())
1008 }
1009 None => bx.const_null(bx.type_i8p()),
1010 };
1011 catch.add_clause(vals, tydesc);
1012 let ptr = catch.extract_value(vals, 0);
1013 catch.call(catch_func, &[data, ptr], None);
1014 catch.ret(bx.const_i32(1));
1015 });
1016
1017 // Note that no invoke is used here because by definition this function
1018 // can't panic (that's what it's catching).
1019 let ret = bx.call(llfn, &[try_func, data, catch_func], None);
1020 let i32_align = bx.tcx().data_layout.i32_align.abi;
1021 bx.store(ret, dest, i32_align);
1022 }
1023
1024 // Helper function to give a Block to a closure to codegen a shim function.
1025 // This is currently primarily used for the `try` intrinsic functions above.
1026 fn gen_fn<'ll, 'tcx>(
1027 cx: &CodegenCx<'ll, 'tcx>,
1028 name: &str,
1029 inputs: Vec<Ty<'tcx>>,
1030 output: Ty<'tcx>,
1031 codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
1032 ) -> &'ll Value {
1033 let rust_fn_sig = ty::Binder::bind(cx.tcx.mk_fn_sig(
1034 inputs.into_iter(),
1035 output,
1036 false,
1037 hir::Unsafety::Unsafe,
1038 Abi::Rust,
1039 ));
1040 let fn_abi = FnAbi::of_fn_ptr(cx, rust_fn_sig, &[]);
1041 let llfn = cx.declare_fn(name, &fn_abi);
1042 cx.set_frame_pointer_elimination(llfn);
1043 cx.apply_target_cpu_attr(llfn);
1044 // FIXME(eddyb) find a nicer way to do this.
1045 unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
1046 let bx = Builder::new_block(cx, llfn, "entry-block");
1047 codegen(bx);
1048 llfn
1049 }
1050
1051 // Helper function used to get a handle to the `__rust_try` function used to
1052 // catch exceptions.
1053 //
1054 // This function is only generated once and is then cached.
1055 fn get_rust_try_fn<'ll, 'tcx>(
1056 cx: &CodegenCx<'ll, 'tcx>,
1057 codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>),
1058 ) -> &'ll Value {
1059 if let Some(llfn) = cx.rust_try_fn.get() {
1060 return llfn;
1061 }
1062
1063 // Define the type up front for the signature of the rust_try function.
1064 let tcx = cx.tcx;
1065 let i8p = tcx.mk_mut_ptr(tcx.types.i8);
1066 let try_fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
1067 iter::once(i8p),
1068 tcx.mk_unit(),
1069 false,
1070 hir::Unsafety::Unsafe,
1071 Abi::Rust,
1072 )));
1073 let catch_fn_ty = tcx.mk_fn_ptr(ty::Binder::bind(tcx.mk_fn_sig(
1074 [i8p, i8p].iter().cloned(),
1075 tcx.mk_unit(),
1076 false,
1077 hir::Unsafety::Unsafe,
1078 Abi::Rust,
1079 )));
1080 let output = tcx.types.i32;
1081 let rust_try = gen_fn(cx, "__rust_try", vec![try_fn_ty, i8p, catch_fn_ty], output, codegen);
1082 cx.rust_try_fn.set(Some(rust_try));
1083 rust_try
1084 }
1085
1086 fn generic_simd_intrinsic(
1087 bx: &mut Builder<'a, 'll, 'tcx>,
1088 name: &str,
1089 callee_ty: Ty<'tcx>,
1090 args: &[OperandRef<'tcx, &'ll Value>],
1091 ret_ty: Ty<'tcx>,
1092 llret_ty: &'ll Type,
1093 span: Span,
1094 ) -> Result<&'ll Value, ()> {
1095 // macros for error handling:
1096 macro_rules! emit_error {
1097 ($msg: tt) => {
1098 emit_error!($msg, )
1099 };
1100 ($msg: tt, $($fmt: tt)*) => {
1101 span_invalid_monomorphization_error(
1102 bx.sess(), span,
1103 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
1104 name, $($fmt)*));
1105 }
1106 }
1107
1108 macro_rules! return_error {
1109 ($($fmt: tt)*) => {
1110 {
1111 emit_error!($($fmt)*);
1112 return Err(());
1113 }
1114 }
1115 }
1116
1117 macro_rules! require {
1118 ($cond: expr, $($fmt: tt)*) => {
1119 if !$cond {
1120 return_error!($($fmt)*);
1121 }
1122 };
1123 }
1124
1125 macro_rules! require_simd {
1126 ($ty: expr, $position: expr) => {
1127 require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
1128 };
1129 }
1130
1131 let tcx = bx.tcx();
1132 let sig = tcx
1133 .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &callee_ty.fn_sig(tcx));
1134 let arg_tys = sig.inputs();
1135
1136 if name == "simd_select_bitmask" {
1137 let in_ty = arg_tys[0];
1138 let m_len = match in_ty.kind {
1139 // Note that this `.unwrap()` crashes for isize/usize, that's sort
1140 // of intentional as there's not currently a use case for that.
1141 ty::Int(i) => i.bit_width().unwrap(),
1142 ty::Uint(i) => i.bit_width().unwrap(),
1143 _ => return_error!("`{}` is not an integral type", in_ty),
1144 };
1145 require_simd!(arg_tys[1], "argument");
1146 let v_len = arg_tys[1].simd_size(tcx);
1147 require!(
1148 m_len == v_len,
1149 "mismatched lengths: mask length `{}` != other vector length `{}`",
1150 m_len,
1151 v_len
1152 );
1153 let i1 = bx.type_i1();
1154 let i1xn = bx.type_vector(i1, m_len);
1155 let m_i1s = bx.bitcast(args[0].immediate(), i1xn);
1156 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1157 }
1158
1159 // every intrinsic below takes a SIMD vector as its first argument
1160 require_simd!(arg_tys[0], "input");
1161 let in_ty = arg_tys[0];
1162 let in_elem = arg_tys[0].simd_type(tcx);
1163 let in_len = arg_tys[0].simd_size(tcx);
1164
1165 let comparison = match name {
1166 "simd_eq" => Some(hir::BinOpKind::Eq),
1167 "simd_ne" => Some(hir::BinOpKind::Ne),
1168 "simd_lt" => Some(hir::BinOpKind::Lt),
1169 "simd_le" => Some(hir::BinOpKind::Le),
1170 "simd_gt" => Some(hir::BinOpKind::Gt),
1171 "simd_ge" => Some(hir::BinOpKind::Ge),
1172 _ => None,
1173 };
1174
1175 if let Some(cmp_op) = comparison {
1176 require_simd!(ret_ty, "return");
1177
1178 let out_len = ret_ty.simd_size(tcx);
1179 require!(
1180 in_len == out_len,
1181 "expected return type with length {} (same as input type `{}`), \
1182 found `{}` with length {}",
1183 in_len,
1184 in_ty,
1185 ret_ty,
1186 out_len
1187 );
1188 require!(
1189 bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
1190 "expected return type with integer elements, found `{}` with non-integer `{}`",
1191 ret_ty,
1192 ret_ty.simd_type(tcx)
1193 );
1194
1195 return Ok(compare_simd_types(
1196 bx,
1197 args[0].immediate(),
1198 args[1].immediate(),
1199 in_elem,
1200 llret_ty,
1201 cmp_op,
1202 ));
1203 }
1204
1205 if name.starts_with("simd_shuffle") {
1206 let n: u64 = name["simd_shuffle".len()..].parse().unwrap_or_else(|_| {
1207 span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?")
1208 });
1209
1210 require_simd!(ret_ty, "return");
1211
1212 let out_len = ret_ty.simd_size(tcx);
1213 require!(
1214 out_len == n,
1215 "expected return type of length {}, found `{}` with length {}",
1216 n,
1217 ret_ty,
1218 out_len
1219 );
1220 require!(
1221 in_elem == ret_ty.simd_type(tcx),
1222 "expected return element type `{}` (element of input `{}`), \
1223 found `{}` with element type `{}`",
1224 in_elem,
1225 in_ty,
1226 ret_ty,
1227 ret_ty.simd_type(tcx)
1228 );
1229
1230 let total_len = u128::from(in_len) * 2;
1231
1232 let vector = args[2].immediate();
1233
1234 let indices: Option<Vec<_>> = (0..n)
1235 .map(|i| {
1236 let arg_idx = i;
1237 let val = bx.const_get_elt(vector, i as u64);
1238 match bx.const_to_opt_u128(val, true) {
1239 None => {
1240 emit_error!("shuffle index #{} is not a constant", arg_idx);
1241 None
1242 }
1243 Some(idx) if idx >= total_len => {
1244 emit_error!(
1245 "shuffle index #{} is out of bounds (limit {})",
1246 arg_idx,
1247 total_len
1248 );
1249 None
1250 }
1251 Some(idx) => Some(bx.const_i32(idx as i32)),
1252 }
1253 })
1254 .collect();
1255 let indices = match indices {
1256 Some(i) => i,
1257 None => return Ok(bx.const_null(llret_ty)),
1258 };
1259
1260 return Ok(bx.shuffle_vector(
1261 args[0].immediate(),
1262 args[1].immediate(),
1263 bx.const_vector(&indices),
1264 ));
1265 }
1266
1267 if name == "simd_insert" {
1268 require!(
1269 in_elem == arg_tys[2],
1270 "expected inserted type `{}` (element of input `{}`), found `{}`",
1271 in_elem,
1272 in_ty,
1273 arg_tys[2]
1274 );
1275 return Ok(bx.insert_element(
1276 args[0].immediate(),
1277 args[2].immediate(),
1278 args[1].immediate(),
1279 ));
1280 }
1281 if name == "simd_extract" {
1282 require!(
1283 ret_ty == in_elem,
1284 "expected return type `{}` (element of input `{}`), found `{}`",
1285 in_elem,
1286 in_ty,
1287 ret_ty
1288 );
1289 return Ok(bx.extract_element(args[0].immediate(), args[1].immediate()));
1290 }
1291
1292 if name == "simd_select" {
1293 let m_elem_ty = in_elem;
1294 let m_len = in_len;
1295 require_simd!(arg_tys[1], "argument");
1296 let v_len = arg_tys[1].simd_size(tcx);
1297 require!(
1298 m_len == v_len,
1299 "mismatched lengths: mask length `{}` != other vector length `{}`",
1300 m_len,
1301 v_len
1302 );
1303 match m_elem_ty.kind {
1304 ty::Int(_) => {}
1305 _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty),
1306 }
1307 // truncate the mask to a vector of i1s
1308 let i1 = bx.type_i1();
1309 let i1xn = bx.type_vector(i1, m_len as u64);
1310 let m_i1s = bx.trunc(args[0].immediate(), i1xn);
1311 return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
1312 }
1313
1314 if name == "simd_bitmask" {
1315 // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
1316 // vector mask and returns an unsigned integer containing the most
1317 // significant bit (MSB) of each lane.
1318
1319 // If the vector has less than 8 lanes, an u8 is returned with zeroed
1320 // trailing bits.
1321 let expected_int_bits = in_len.max(8);
1322 match ret_ty.kind {
1323 ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => (),
1324 _ => return_error!("bitmask `{}`, expected `u{}`", ret_ty, expected_int_bits),
1325 }
1326
1327 // Integer vector <i{in_bitwidth} x in_len>:
1328 let (i_xn, in_elem_bitwidth) = match in_elem.kind {
1329 ty::Int(i) => {
1330 (args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits()))
1331 }
1332 ty::Uint(i) => {
1333 (args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits()))
1334 }
1335 _ => return_error!(
1336 "vector argument `{}`'s element type `{}`, expected integer element type",
1337 in_ty,
1338 in_elem
1339 ),
1340 };
1341
1342 // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
1343 let shift_indices =
1344 vec![
1345 bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
1346 in_len as _
1347 ];
1348 let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
1349 // Truncate vector to an <i1 x N>
1350 let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len));
1351 // Bitcast <i1 x N> to iN:
1352 let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
1353 // Zero-extend iN to the bitmask type:
1354 return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
1355 }
1356
1357 fn simd_simple_float_intrinsic(
1358 name: &str,
1359 in_elem: &::rustc_middle::ty::TyS<'_>,
1360 in_ty: &::rustc_middle::ty::TyS<'_>,
1361 in_len: u64,
1362 bx: &mut Builder<'a, 'll, 'tcx>,
1363 span: Span,
1364 args: &[OperandRef<'tcx, &'ll Value>],
1365 ) -> Result<&'ll Value, ()> {
1366 macro_rules! emit_error {
1367 ($msg: tt) => {
1368 emit_error!($msg, )
1369 };
1370 ($msg: tt, $($fmt: tt)*) => {
1371 span_invalid_monomorphization_error(
1372 bx.sess(), span,
1373 &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
1374 name, $($fmt)*));
1375 }
1376 }
1377 macro_rules! return_error {
1378 ($($fmt: tt)*) => {
1379 {
1380 emit_error!($($fmt)*);
1381 return Err(());
1382 }
1383 }
1384 }
1385 let ety = match in_elem.kind {
1386 ty::Float(f) if f.bit_width() == 32 => {
1387 if in_len < 2 || in_len > 16 {
1388 return_error!(
1389 "unsupported floating-point vector `{}` with length `{}` \
1390 out-of-range [2, 16]",
1391 in_ty,
1392 in_len
1393 );
1394 }
1395 "f32"
1396 }
1397 ty::Float(f) if f.bit_width() == 64 => {
1398 if in_len < 2 || in_len > 8 {
1399 return_error!(
1400 "unsupported floating-point vector `{}` with length `{}` \
1401 out-of-range [2, 8]",
1402 in_ty,
1403 in_len
1404 );
1405 }
1406 "f64"
1407 }
1408 ty::Float(f) => {
1409 return_error!(
1410 "unsupported element type `{}` of floating-point vector `{}`",
1411 f.name_str(),
1412 in_ty
1413 );
1414 }
1415 _ => {
1416 return_error!("`{}` is not a floating-point type", in_ty);
1417 }
1418 };
1419
1420 let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
1421 let intrinsic = bx.get_intrinsic(&llvm_name);
1422 let c =
1423 bx.call(intrinsic, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None);
1424 unsafe { llvm::LLVMRustSetHasUnsafeAlgebra(c) };
1425 Ok(c)
1426 }
1427
1428 match name {
1429 "simd_fsqrt" => {
1430 return simd_simple_float_intrinsic("sqrt", in_elem, in_ty, in_len, bx, span, args);
1431 }
1432 "simd_fsin" => {
1433 return simd_simple_float_intrinsic("sin", in_elem, in_ty, in_len, bx, span, args);
1434 }
1435 "simd_fcos" => {
1436 return simd_simple_float_intrinsic("cos", in_elem, in_ty, in_len, bx, span, args);
1437 }
1438 "simd_fabs" => {
1439 return simd_simple_float_intrinsic("fabs", in_elem, in_ty, in_len, bx, span, args);
1440 }
1441 "simd_floor" => {
1442 return simd_simple_float_intrinsic("floor", in_elem, in_ty, in_len, bx, span, args);
1443 }
1444 "simd_ceil" => {
1445 return simd_simple_float_intrinsic("ceil", in_elem, in_ty, in_len, bx, span, args);
1446 }
1447 "simd_fexp" => {
1448 return simd_simple_float_intrinsic("exp", in_elem, in_ty, in_len, bx, span, args);
1449 }
1450 "simd_fexp2" => {
1451 return simd_simple_float_intrinsic("exp2", in_elem, in_ty, in_len, bx, span, args);
1452 }
1453 "simd_flog10" => {
1454 return simd_simple_float_intrinsic("log10", in_elem, in_ty, in_len, bx, span, args);
1455 }
1456 "simd_flog2" => {
1457 return simd_simple_float_intrinsic("log2", in_elem, in_ty, in_len, bx, span, args);
1458 }
1459 "simd_flog" => {
1460 return simd_simple_float_intrinsic("log", in_elem, in_ty, in_len, bx, span, args);
1461 }
1462 "simd_fpowi" => {
1463 return simd_simple_float_intrinsic("powi", in_elem, in_ty, in_len, bx, span, args);
1464 }
1465 "simd_fpow" => {
1466 return simd_simple_float_intrinsic("pow", in_elem, in_ty, in_len, bx, span, args);
1467 }
1468 "simd_fma" => {
1469 return simd_simple_float_intrinsic("fma", in_elem, in_ty, in_len, bx, span, args);
1470 }
1471 _ => { /* fallthrough */ }
1472 }
1473
1474 // FIXME: use:
1475 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
1476 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
1477 fn llvm_vector_str(elem_ty: Ty<'_>, vec_len: u64, no_pointers: usize) -> String {
1478 let p0s: String = "p0".repeat(no_pointers);
1479 match elem_ty.kind {
1480 ty::Int(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1481 ty::Uint(v) => format!("v{}{}i{}", vec_len, p0s, v.bit_width().unwrap()),
1482 ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()),
1483 _ => unreachable!(),
1484 }
1485 }
1486
1487 fn llvm_vector_ty(
1488 cx: &CodegenCx<'ll, '_>,
1489 elem_ty: Ty<'_>,
1490 vec_len: u64,
1491 mut no_pointers: usize,
1492 ) -> &'ll Type {
1493 // FIXME: use cx.layout_of(ty).llvm_type() ?
1494 let mut elem_ty = match elem_ty.kind {
1495 ty::Int(v) => cx.type_int_from_ty(v),
1496 ty::Uint(v) => cx.type_uint_from_ty(v),
1497 ty::Float(v) => cx.type_float_from_ty(v),
1498 _ => unreachable!(),
1499 };
1500 while no_pointers > 0 {
1501 elem_ty = cx.type_ptr_to(elem_ty);
1502 no_pointers -= 1;
1503 }
1504 cx.type_vector(elem_ty, vec_len)
1505 }
1506
1507 if name == "simd_gather" {
1508 // simd_gather(values: <N x T>, pointers: <N x *_ T>,
1509 // mask: <N x i{M}>) -> <N x T>
1510 // * N: number of elements in the input vectors
1511 // * T: type of the element to load
1512 // * M: any integer width is supported, will be truncated to i1
1513
1514 // All types must be simd vector types
1515 require_simd!(in_ty, "first");
1516 require_simd!(arg_tys[1], "second");
1517 require_simd!(arg_tys[2], "third");
1518 require_simd!(ret_ty, "return");
1519
1520 // Of the same length:
1521 require!(
1522 in_len == arg_tys[1].simd_size(tcx),
1523 "expected {} argument with length {} (same as input type `{}`), \
1524 found `{}` with length {}",
1525 "second",
1526 in_len,
1527 in_ty,
1528 arg_tys[1],
1529 arg_tys[1].simd_size(tcx)
1530 );
1531 require!(
1532 in_len == arg_tys[2].simd_size(tcx),
1533 "expected {} argument with length {} (same as input type `{}`), \
1534 found `{}` with length {}",
1535 "third",
1536 in_len,
1537 in_ty,
1538 arg_tys[2],
1539 arg_tys[2].simd_size(tcx)
1540 );
1541
1542 // The return type must match the first argument type
1543 require!(ret_ty == in_ty, "expected return type `{}`, found `{}`", in_ty, ret_ty);
1544
1545 // This counts how many pointers
1546 fn ptr_count(t: Ty<'_>) -> usize {
1547 match t.kind {
1548 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1549 _ => 0,
1550 }
1551 }
1552
1553 // Non-ptr type
1554 fn non_ptr(t: Ty<'_>) -> Ty<'_> {
1555 match t.kind {
1556 ty::RawPtr(p) => non_ptr(p.ty),
1557 _ => t,
1558 }
1559 }
1560
1561 // The second argument must be a simd vector with an element type that's a pointer
1562 // to the element type of the first argument
1563 let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).kind {
1564 ty::RawPtr(p) if p.ty == in_elem => {
1565 (ptr_count(arg_tys[1].simd_type(tcx)), non_ptr(arg_tys[1].simd_type(tcx)))
1566 }
1567 _ => {
1568 require!(
1569 false,
1570 "expected element type `{}` of second argument `{}` \
1571 to be a pointer to the element type `{}` of the first \
1572 argument `{}`, found `{}` != `*_ {}`",
1573 arg_tys[1].simd_type(tcx),
1574 arg_tys[1],
1575 in_elem,
1576 in_ty,
1577 arg_tys[1].simd_type(tcx),
1578 in_elem
1579 );
1580 unreachable!();
1581 }
1582 };
1583 assert!(pointer_count > 0);
1584 assert_eq!(pointer_count - 1, ptr_count(arg_tys[0].simd_type(tcx)));
1585 assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
1586
1587 // The element type of the third argument must be a signed integer type of any width:
1588 match arg_tys[2].simd_type(tcx).kind {
1589 ty::Int(_) => (),
1590 _ => {
1591 require!(
1592 false,
1593 "expected element type `{}` of third argument `{}` \
1594 to be a signed integer type",
1595 arg_tys[2].simd_type(tcx),
1596 arg_tys[2]
1597 );
1598 }
1599 }
1600
1601 // Alignment of T, must be a constant integer value:
1602 let alignment_ty = bx.type_i32();
1603 let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1604
1605 // Truncate the mask vector to a vector of i1s:
1606 let (mask, mask_ty) = {
1607 let i1 = bx.type_i1();
1608 let i1xn = bx.type_vector(i1, in_len);
1609 (bx.trunc(args[2].immediate(), i1xn), i1xn)
1610 };
1611
1612 // Type of the vector of pointers:
1613 let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
1614 let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1615
1616 // Type of the vector of elements:
1617 let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
1618 let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1619
1620 let llvm_intrinsic =
1621 format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
1622 let f = bx.declare_cfn(
1623 &llvm_intrinsic,
1624 bx.type_func(
1625 &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty],
1626 llvm_elem_vec_ty,
1627 ),
1628 );
1629 llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
1630 let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None);
1631 return Ok(v);
1632 }
1633
1634 if name == "simd_scatter" {
1635 // simd_scatter(values: <N x T>, pointers: <N x *mut T>,
1636 // mask: <N x i{M}>) -> ()
1637 // * N: number of elements in the input vectors
1638 // * T: type of the element to load
1639 // * M: any integer width is supported, will be truncated to i1
1640
1641 // All types must be simd vector types
1642 require_simd!(in_ty, "first");
1643 require_simd!(arg_tys[1], "second");
1644 require_simd!(arg_tys[2], "third");
1645
1646 // Of the same length:
1647 require!(
1648 in_len == arg_tys[1].simd_size(tcx),
1649 "expected {} argument with length {} (same as input type `{}`), \
1650 found `{}` with length {}",
1651 "second",
1652 in_len,
1653 in_ty,
1654 arg_tys[1],
1655 arg_tys[1].simd_size(tcx)
1656 );
1657 require!(
1658 in_len == arg_tys[2].simd_size(tcx),
1659 "expected {} argument with length {} (same as input type `{}`), \
1660 found `{}` with length {}",
1661 "third",
1662 in_len,
1663 in_ty,
1664 arg_tys[2],
1665 arg_tys[2].simd_size(tcx)
1666 );
1667
1668 // This counts how many pointers
1669 fn ptr_count(t: Ty<'_>) -> usize {
1670 match t.kind {
1671 ty::RawPtr(p) => 1 + ptr_count(p.ty),
1672 _ => 0,
1673 }
1674 }
1675
1676 // Non-ptr type
1677 fn non_ptr(t: Ty<'_>) -> Ty<'_> {
1678 match t.kind {
1679 ty::RawPtr(p) => non_ptr(p.ty),
1680 _ => t,
1681 }
1682 }
1683
1684 // The second argument must be a simd vector with an element type that's a pointer
1685 // to the element type of the first argument
1686 let (pointer_count, underlying_ty) = match arg_tys[1].simd_type(tcx).kind {
1687 ty::RawPtr(p) if p.ty == in_elem && p.mutbl == hir::Mutability::Mut => {
1688 (ptr_count(arg_tys[1].simd_type(tcx)), non_ptr(arg_tys[1].simd_type(tcx)))
1689 }
1690 _ => {
1691 require!(
1692 false,
1693 "expected element type `{}` of second argument `{}` \
1694 to be a pointer to the element type `{}` of the first \
1695 argument `{}`, found `{}` != `*mut {}`",
1696 arg_tys[1].simd_type(tcx),
1697 arg_tys[1],
1698 in_elem,
1699 in_ty,
1700 arg_tys[1].simd_type(tcx),
1701 in_elem
1702 );
1703 unreachable!();
1704 }
1705 };
1706 assert!(pointer_count > 0);
1707 assert_eq!(pointer_count - 1, ptr_count(arg_tys[0].simd_type(tcx)));
1708 assert_eq!(underlying_ty, non_ptr(arg_tys[0].simd_type(tcx)));
1709
1710 // The element type of the third argument must be a signed integer type of any width:
1711 match arg_tys[2].simd_type(tcx).kind {
1712 ty::Int(_) => (),
1713 _ => {
1714 require!(
1715 false,
1716 "expected element type `{}` of third argument `{}` \
1717 to be a signed integer type",
1718 arg_tys[2].simd_type(tcx),
1719 arg_tys[2]
1720 );
1721 }
1722 }
1723
1724 // Alignment of T, must be a constant integer value:
1725 let alignment_ty = bx.type_i32();
1726 let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
1727
1728 // Truncate the mask vector to a vector of i1s:
1729 let (mask, mask_ty) = {
1730 let i1 = bx.type_i1();
1731 let i1xn = bx.type_vector(i1, in_len);
1732 (bx.trunc(args[2].immediate(), i1xn), i1xn)
1733 };
1734
1735 let ret_t = bx.type_void();
1736
1737 // Type of the vector of pointers:
1738 let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
1739 let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
1740
1741 // Type of the vector of elements:
1742 let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
1743 let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
1744
1745 let llvm_intrinsic =
1746 format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str);
1747 let f = bx.declare_cfn(
1748 &llvm_intrinsic,
1749 bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t),
1750 );
1751 llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
1752 let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None);
1753 return Ok(v);
1754 }
1755
1756 macro_rules! arith_red {
1757 ($name:tt : $integer_reduce:ident, $float_reduce:ident, $ordered:expr) => {
1758 if name == $name {
1759 require!(
1760 ret_ty == in_elem,
1761 "expected return type `{}` (element of input `{}`), found `{}`",
1762 in_elem,
1763 in_ty,
1764 ret_ty
1765 );
1766 return match in_elem.kind {
1767 ty::Int(_) | ty::Uint(_) => {
1768 let r = bx.$integer_reduce(args[0].immediate());
1769 if $ordered {
1770 // if overflow occurs, the result is the
1771 // mathematical result modulo 2^n:
1772 if name.contains("mul") {
1773 Ok(bx.mul(args[1].immediate(), r))
1774 } else {
1775 Ok(bx.add(args[1].immediate(), r))
1776 }
1777 } else {
1778 Ok(bx.$integer_reduce(args[0].immediate()))
1779 }
1780 }
1781 ty::Float(f) => {
1782 let acc = if $ordered {
1783 // ordered arithmetic reductions take an accumulator
1784 args[1].immediate()
1785 } else {
1786 // unordered arithmetic reductions use the identity accumulator
1787 let identity_acc = if $name.contains("mul") { 1.0 } else { 0.0 };
1788 match f.bit_width() {
1789 32 => bx.const_real(bx.type_f32(), identity_acc),
1790 64 => bx.const_real(bx.type_f64(), identity_acc),
1791 v => return_error!(
1792 r#"
1793 unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
1794 $name,
1795 in_ty,
1796 in_elem,
1797 v,
1798 ret_ty
1799 ),
1800 }
1801 };
1802 Ok(bx.$float_reduce(acc, args[0].immediate()))
1803 }
1804 _ => return_error!(
1805 "unsupported {} from `{}` with element `{}` to `{}`",
1806 $name,
1807 in_ty,
1808 in_elem,
1809 ret_ty
1810 ),
1811 };
1812 }
1813 };
1814 }
1815
1816 arith_red!("simd_reduce_add_ordered": vector_reduce_add, vector_reduce_fadd, true);
1817 arith_red!("simd_reduce_mul_ordered": vector_reduce_mul, vector_reduce_fmul, true);
1818 arith_red!("simd_reduce_add_unordered": vector_reduce_add, vector_reduce_fadd_fast, false);
1819 arith_red!("simd_reduce_mul_unordered": vector_reduce_mul, vector_reduce_fmul_fast, false);
1820
1821 macro_rules! minmax_red {
1822 ($name:tt: $int_red:ident, $float_red:ident) => {
1823 if name == $name {
1824 require!(
1825 ret_ty == in_elem,
1826 "expected return type `{}` (element of input `{}`), found `{}`",
1827 in_elem,
1828 in_ty,
1829 ret_ty
1830 );
1831 return match in_elem.kind {
1832 ty::Int(_i) => Ok(bx.$int_red(args[0].immediate(), true)),
1833 ty::Uint(_u) => Ok(bx.$int_red(args[0].immediate(), false)),
1834 ty::Float(_f) => Ok(bx.$float_red(args[0].immediate())),
1835 _ => return_error!(
1836 "unsupported {} from `{}` with element `{}` to `{}`",
1837 $name,
1838 in_ty,
1839 in_elem,
1840 ret_ty
1841 ),
1842 };
1843 }
1844 };
1845 }
1846
1847 minmax_red!("simd_reduce_min": vector_reduce_min, vector_reduce_fmin);
1848 minmax_red!("simd_reduce_max": vector_reduce_max, vector_reduce_fmax);
1849
1850 minmax_red!("simd_reduce_min_nanless": vector_reduce_min, vector_reduce_fmin_fast);
1851 minmax_red!("simd_reduce_max_nanless": vector_reduce_max, vector_reduce_fmax_fast);
1852
1853 macro_rules! bitwise_red {
1854 ($name:tt : $red:ident, $boolean:expr) => {
1855 if name == $name {
1856 let input = if !$boolean {
1857 require!(
1858 ret_ty == in_elem,
1859 "expected return type `{}` (element of input `{}`), found `{}`",
1860 in_elem,
1861 in_ty,
1862 ret_ty
1863 );
1864 args[0].immediate()
1865 } else {
1866 match in_elem.kind {
1867 ty::Int(_) | ty::Uint(_) => {}
1868 _ => return_error!(
1869 "unsupported {} from `{}` with element `{}` to `{}`",
1870 $name,
1871 in_ty,
1872 in_elem,
1873 ret_ty
1874 ),
1875 }
1876
1877 // boolean reductions operate on vectors of i1s:
1878 let i1 = bx.type_i1();
1879 let i1xn = bx.type_vector(i1, in_len as u64);
1880 bx.trunc(args[0].immediate(), i1xn)
1881 };
1882 return match in_elem.kind {
1883 ty::Int(_) | ty::Uint(_) => {
1884 let r = bx.$red(input);
1885 Ok(if !$boolean { r } else { bx.zext(r, bx.type_bool()) })
1886 }
1887 _ => return_error!(
1888 "unsupported {} from `{}` with element `{}` to `{}`",
1889 $name,
1890 in_ty,
1891 in_elem,
1892 ret_ty
1893 ),
1894 };
1895 }
1896 };
1897 }
1898
1899 bitwise_red!("simd_reduce_and": vector_reduce_and, false);
1900 bitwise_red!("simd_reduce_or": vector_reduce_or, false);
1901 bitwise_red!("simd_reduce_xor": vector_reduce_xor, false);
1902 bitwise_red!("simd_reduce_all": vector_reduce_and, true);
1903 bitwise_red!("simd_reduce_any": vector_reduce_or, true);
1904
1905 if name == "simd_cast" {
1906 require_simd!(ret_ty, "return");
1907 let out_len = ret_ty.simd_size(tcx);
1908 require!(
1909 in_len == out_len,
1910 "expected return type with length {} (same as input type `{}`), \
1911 found `{}` with length {}",
1912 in_len,
1913 in_ty,
1914 ret_ty,
1915 out_len
1916 );
1917 // casting cares about nominal type, not just structural type
1918 let out_elem = ret_ty.simd_type(tcx);
1919
1920 if in_elem == out_elem {
1921 return Ok(args[0].immediate());
1922 }
1923
1924 enum Style {
1925 Float,
1926 Int(/* is signed? */ bool),
1927 Unsupported,
1928 }
1929
1930 let (in_style, in_width) = match in_elem.kind {
1931 // vectors of pointer-sized integers should've been
1932 // disallowed before here, so this unwrap is safe.
1933 ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
1934 ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
1935 ty::Float(f) => (Style::Float, f.bit_width()),
1936 _ => (Style::Unsupported, 0),
1937 };
1938 let (out_style, out_width) = match out_elem.kind {
1939 ty::Int(i) => (Style::Int(true), i.bit_width().unwrap()),
1940 ty::Uint(u) => (Style::Int(false), u.bit_width().unwrap()),
1941 ty::Float(f) => (Style::Float, f.bit_width()),
1942 _ => (Style::Unsupported, 0),
1943 };
1944
1945 match (in_style, out_style) {
1946 (Style::Int(in_is_signed), Style::Int(_)) => {
1947 return Ok(match in_width.cmp(&out_width) {
1948 Ordering::Greater => bx.trunc(args[0].immediate(), llret_ty),
1949 Ordering::Equal => args[0].immediate(),
1950 Ordering::Less => {
1951 if in_is_signed {
1952 bx.sext(args[0].immediate(), llret_ty)
1953 } else {
1954 bx.zext(args[0].immediate(), llret_ty)
1955 }
1956 }
1957 });
1958 }
1959 (Style::Int(in_is_signed), Style::Float) => {
1960 return Ok(if in_is_signed {
1961 bx.sitofp(args[0].immediate(), llret_ty)
1962 } else {
1963 bx.uitofp(args[0].immediate(), llret_ty)
1964 });
1965 }
1966 (Style::Float, Style::Int(out_is_signed)) => {
1967 return Ok(if out_is_signed {
1968 bx.fptosi(args[0].immediate(), llret_ty)
1969 } else {
1970 bx.fptoui(args[0].immediate(), llret_ty)
1971 });
1972 }
1973 (Style::Float, Style::Float) => {
1974 return Ok(match in_width.cmp(&out_width) {
1975 Ordering::Greater => bx.fptrunc(args[0].immediate(), llret_ty),
1976 Ordering::Equal => args[0].immediate(),
1977 Ordering::Less => bx.fpext(args[0].immediate(), llret_ty),
1978 });
1979 }
1980 _ => { /* Unsupported. Fallthrough. */ }
1981 }
1982 require!(
1983 false,
1984 "unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
1985 in_ty,
1986 in_elem,
1987 ret_ty,
1988 out_elem
1989 );
1990 }
1991 macro_rules! arith {
1992 ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
1993 $(if name == stringify!($name) {
1994 match in_elem.kind {
1995 $($(ty::$p(_))|* => {
1996 return Ok(bx.$call(args[0].immediate(), args[1].immediate()))
1997 })*
1998 _ => {},
1999 }
2000 require!(false,
2001 "unsupported operation on `{}` with element `{}`",
2002 in_ty,
2003 in_elem)
2004 })*
2005 }
2006 }
2007 arith! {
2008 simd_add: Uint, Int => add, Float => fadd;
2009 simd_sub: Uint, Int => sub, Float => fsub;
2010 simd_mul: Uint, Int => mul, Float => fmul;
2011 simd_div: Uint => udiv, Int => sdiv, Float => fdiv;
2012 simd_rem: Uint => urem, Int => srem, Float => frem;
2013 simd_shl: Uint, Int => shl;
2014 simd_shr: Uint => lshr, Int => ashr;
2015 simd_and: Uint, Int => and;
2016 simd_or: Uint, Int => or;
2017 simd_xor: Uint, Int => xor;
2018 simd_fmax: Float => maxnum;
2019 simd_fmin: Float => minnum;
2020
2021 }
2022
2023 if name == "simd_saturating_add" || name == "simd_saturating_sub" {
2024 let lhs = args[0].immediate();
2025 let rhs = args[1].immediate();
2026 let is_add = name == "simd_saturating_add";
2027 let ptr_bits = bx.tcx().data_layout.pointer_size.bits() as _;
2028 let (signed, elem_width, elem_ty) = match in_elem.kind {
2029 ty::Int(i) => (true, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_int_from_ty(i)),
2030 ty::Uint(i) => (false, i.bit_width().unwrap_or(ptr_bits), bx.cx.type_uint_from_ty(i)),
2031 _ => {
2032 return_error!(
2033 "expected element type `{}` of vector type `{}` \
2034 to be a signed or unsigned integer type",
2035 arg_tys[0].simd_type(tcx),
2036 arg_tys[0]
2037 );
2038 }
2039 };
2040 let llvm_intrinsic = &format!(
2041 "llvm.{}{}.sat.v{}i{}",
2042 if signed { 's' } else { 'u' },
2043 if is_add { "add" } else { "sub" },
2044 in_len,
2045 elem_width
2046 );
2047 let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64);
2048
2049 let f = bx.declare_cfn(&llvm_intrinsic, bx.type_func(&[vec_ty, vec_ty], vec_ty));
2050 llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
2051 let v = bx.call(f, &[lhs, rhs], None);
2052 return Ok(v);
2053 }
2054
2055 span_bug!(span, "unknown SIMD intrinsic");
2056 }
2057
2058 // Returns the width of an int Ty, and if it's signed or not
2059 // Returns None if the type is not an integer
2060 // FIXME: there’s multiple of this functions, investigate using some of the already existing
2061 // stuffs.
2062 fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, bool)> {
2063 match ty.kind {
2064 ty::Int(t) => Some((
2065 match t {
2066 ast::IntTy::Isize => u64::from(cx.tcx.sess.target.ptr_width),
2067 ast::IntTy::I8 => 8,
2068 ast::IntTy::I16 => 16,
2069 ast::IntTy::I32 => 32,
2070 ast::IntTy::I64 => 64,
2071 ast::IntTy::I128 => 128,
2072 },
2073 true,
2074 )),
2075 ty::Uint(t) => Some((
2076 match t {
2077 ast::UintTy::Usize => u64::from(cx.tcx.sess.target.ptr_width),
2078 ast::UintTy::U8 => 8,
2079 ast::UintTy::U16 => 16,
2080 ast::UintTy::U32 => 32,
2081 ast::UintTy::U64 => 64,
2082 ast::UintTy::U128 => 128,
2083 },
2084 false,
2085 )),
2086 _ => None,
2087 }
2088 }
2089
2090 // Returns the width of a float Ty
2091 // Returns None if the type is not a float
2092 fn float_type_width(ty: Ty<'_>) -> Option<u64> {
2093 match ty.kind {
2094 ty::Float(t) => Some(t.bit_width()),
2095 _ => None,
2096 }
2097 }