]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_codegen_gcc/src/builder.rs
New upstream version 1.64.0+dfsg1
[rustc.git] / compiler / rustc_codegen_gcc / src / builder.rs
CommitLineData
c295e0f8
XL
1use std::borrow::Cow;
2use std::cell::Cell;
3use std::convert::TryFrom;
4use std::ops::Deref;
5
c295e0f8
XL
6use gccjit::{
7 BinaryOp,
8 Block,
9 ComparisonOp,
923072b8 10 Context,
c295e0f8
XL
11 Function,
12 LValue,
13 RValue,
14 ToRValue,
15 Type,
16 UnaryOp,
17};
18use rustc_codegen_ssa::MemFlags;
19use rustc_codegen_ssa::common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope};
20use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
21use rustc_codegen_ssa::mir::place::PlaceRef;
22use rustc_codegen_ssa::traits::{
23 BackendTypes,
24 BaseTypeMethods,
25 BuilderMethods,
26 ConstMethods,
27 DerivedTypeMethods,
28 LayoutTypeMethods,
29 HasCodegen,
30 OverflowOp,
31 StaticBuilderMethods,
32};
064997fb 33use rustc_data_structures::fx::FxHashSet;
c295e0f8
XL
34use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
35use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout};
36use rustc_span::Span;
37use rustc_span::def_id::DefId;
38use rustc_target::abi::{
39 self,
40 call::FnAbi,
41 Align,
42 HasDataLayout,
43 Size,
44 TargetDataLayout,
45 WrappingRange,
46};
47use rustc_target::spec::{HasTargetSpec, Target};
48
49use crate::common::{SignType, TypeReflection, type_is_pointer};
50use crate::context::CodegenCx;
923072b8 51use crate::intrinsic::llvm;
c295e0f8
XL
52use crate::type_of::LayoutGccExt;
53
54// TODO(antoyo)
55type Funclet = ();
56
57// TODO(antoyo): remove this variable.
58static mut RETURN_VALUE_COUNT: usize = 0;
59
60enum ExtremumOperation {
61 Max,
62 Min,
63}
64
c295e0f8
XL
65pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
66 pub cx: &'a CodegenCx<'gcc, 'tcx>,
5e7ed085 67 pub block: Block<'gcc>,
c295e0f8
XL
68 stack_var_count: Cell<usize>,
69}
70
71impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
5e7ed085 72 fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
c295e0f8
XL
73 Builder {
74 cx,
5e7ed085 75 block,
c295e0f8
XL
76 stack_var_count: Cell::new(0),
77 }
78 }
79
80 fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
5e7ed085 81 let size = src.get_type().get_size();
c295e0f8
XL
82
83 let func = self.current_func();
84
85 let load_ordering =
86 match order {
87 // TODO(antoyo): does this make sense?
88 AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
923072b8 89 _ => order,
c295e0f8 90 };
923072b8 91 let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering, Size::from_bytes(size));
c295e0f8
XL
92 let previous_var = func.new_local(None, previous_value.get_type(), "previous_value");
93 let return_value = func.new_local(None, previous_value.get_type(), "return_value");
94 self.llbb().add_assignment(None, previous_var, previous_value);
95 self.llbb().add_assignment(None, return_value, previous_var.to_rvalue());
96
97 let while_block = func.new_block("while");
98 let after_block = func.new_block("after_while");
99 self.llbb().end_with_jump(None, while_block);
100
5e7ed085 101 // NOTE: since jumps were added and compare_exchange doesn't expect this, the current block in the
c295e0f8 102 // state need to be updated.
5e7ed085 103 self.switch_to_block(while_block);
c295e0f8
XL
104
105 let comparison_operator =
106 match operation {
107 ExtremumOperation::Max => ComparisonOp::LessThan,
108 ExtremumOperation::Min => ComparisonOp::GreaterThan,
109 };
110
111 let cond1 = self.context.new_comparison(None, comparison_operator, previous_var.to_rvalue(), self.context.new_cast(None, src, previous_value.get_type()));
112 let compare_exchange = self.compare_exchange(dst, previous_var, src, order, load_ordering, false);
113 let cond2 = self.cx.context.new_unary_op(None, UnaryOp::LogicalNegate, compare_exchange.get_type(), compare_exchange);
114 let cond = self.cx.context.new_binary_op(None, BinaryOp::LogicalAnd, self.cx.bool_type, cond1, cond2);
115
116 while_block.end_with_conditional(None, cond, while_block, after_block);
117
5e7ed085 118 // NOTE: since jumps were added in a place rustc does not expect, the current block in the
c295e0f8 119 // state need to be updated.
5e7ed085 120 self.switch_to_block(after_block);
c295e0f8
XL
121
122 return_value.to_rvalue()
123 }
124
125 fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
5e7ed085
FG
126 let size = src.get_type().get_size();
127 let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size));
c295e0f8
XL
128 let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
129 let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
130 let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
131
132 let void_ptr_type = self.context.new_type::<*mut ()>();
133 let volatile_void_ptr_type = void_ptr_type.make_volatile();
134 let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
135 let expected = self.context.new_cast(None, cmp.get_address(None), void_ptr_type);
136
137 // NOTE: not sure why, but we have the wrong type here.
138 let int_type = compare_exchange.get_param(2).to_rvalue().get_type();
139 let src = self.context.new_cast(None, src, int_type);
140 self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order])
141 }
142
143 pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) {
144 self.llbb().add_assignment(None, lvalue, value);
145 }
146
147 fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
148 let mut all_args_match = true;
149 let mut param_types = vec![];
150 let param_count = func.get_param_count();
151 for (index, arg) in args.iter().enumerate().take(param_count) {
152 let param = func.get_param(index as i32);
153 let param = param.to_rvalue().get_type();
154 if param != arg.get_type() {
155 all_args_match = false;
156 }
157 param_types.push(param);
158 }
159
160 if all_args_match {
161 return Cow::Borrowed(args);
162 }
163
164 let casted_args: Vec<_> = param_types
165 .into_iter()
166 .zip(args.iter())
167 .enumerate()
168 .map(|(_i, (expected_ty, &actual_val))| {
169 let actual_ty = actual_val.get_type();
170 if expected_ty != actual_ty {
171 self.bitcast(actual_val, expected_ty)
172 }
173 else {
174 actual_val
175 }
176 })
177 .collect();
178
179 Cow::Owned(casted_args)
180 }
181
182 fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
183 let mut all_args_match = true;
184 let mut param_types = vec![];
a2a8927a 185 let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr");
c295e0f8
XL
186 for (index, arg) in args.iter().enumerate().take(gcc_func.get_param_count()) {
187 let param = gcc_func.get_param_type(index);
188 if param != arg.get_type() {
189 all_args_match = false;
190 }
191 param_types.push(param);
192 }
193
5e7ed085
FG
194 let mut on_stack_param_indices = FxHashSet::default();
195 if let Some(indices) = self.on_stack_params.borrow().get(&gcc_func) {
196 on_stack_param_indices = indices.clone();
197 }
198
c295e0f8
XL
199 if all_args_match {
200 return Cow::Borrowed(args);
201 }
202
923072b8
FG
203 let func_name = format!("{:?}", func_ptr);
204
c295e0f8
XL
205 let casted_args: Vec<_> = param_types
206 .into_iter()
207 .zip(args.iter())
208 .enumerate()
5e7ed085 209 .map(|(index, (expected_ty, &actual_val))| {
923072b8
FG
210 if llvm::ignore_arg_cast(&func_name, index, args.len()) {
211 return actual_val;
212 }
213
c295e0f8
XL
214 let actual_ty = actual_val.get_type();
215 if expected_ty != actual_ty {
923072b8
FG
216 if !actual_ty.is_vector() && !expected_ty.is_vector() && actual_ty.is_integral() && expected_ty.is_integral() && actual_ty.get_size() != expected_ty.get_size() {
217 self.context.new_cast(None, actual_val, expected_ty)
218 }
219 else if on_stack_param_indices.contains(&index) {
5e7ed085
FG
220 actual_val.dereference(None).to_rvalue()
221 }
222 else {
923072b8
FG
223 assert!(!((actual_ty.is_vector() && !expected_ty.is_vector()) || (!actual_ty.is_vector() && expected_ty.is_vector())), "{:?} ({}) -> {:?} ({}), index: {:?}[{}]", actual_ty, actual_ty.is_vector(), expected_ty, expected_ty.is_vector(), func_ptr, index);
224 // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
5e7ed085
FG
225 self.bitcast(actual_val, expected_ty)
226 }
c295e0f8
XL
227 }
228 else {
229 actual_val
230 }
231 })
232 .collect();
233
234 Cow::Owned(casted_args)
235 }
236
237 fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
238 let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO(antoyo): make sure make_pointer() is okay here.
239 let stored_ty = self.cx.val_ty(val);
240 let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
241
242 if dest_ptr_ty == stored_ptr_ty {
243 ptr
244 }
245 else {
246 self.bitcast(ptr, stored_ptr_ty)
247 }
248 }
249
250 pub fn current_func(&self) -> Function<'gcc> {
5e7ed085 251 self.block.get_function()
c295e0f8
XL
252 }
253
254 fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
255 // TODO(antoyo): remove when the API supports a different type for functions.
256 let func: Function<'gcc> = self.cx.rvalue_as_function(func);
257 let args = self.check_call("call", func, args);
258
259 // gccjit requires to use the result of functions, even when it's not used.
260 // That's why we assign the result to a local or call add_eval().
261 let return_type = func.get_return_type();
c295e0f8 262 let void_type = self.context.new_type::<()>();
5e7ed085 263 let current_func = self.block.get_function();
c295e0f8
XL
264 if return_type != void_type {
265 unsafe { RETURN_VALUE_COUNT += 1 };
266 let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
5e7ed085 267 self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
c295e0f8
XL
268 result.to_rvalue()
269 }
270 else {
5e7ed085 271 self.block.add_eval(None, self.cx.context.new_call(None, func, &args));
c295e0f8
XL
272 // Return dummy value when not having return value.
273 self.context.new_rvalue_from_long(self.isize_type, 0)
274 }
275 }
276
277 fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
278 let args = self.check_ptr_call("call", func_ptr, args);
279
280 // gccjit requires to use the result of functions, even when it's not used.
281 // That's why we assign the result to a local or call add_eval().
a2a8927a 282 let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr");
923072b8 283 let return_type = gcc_func.get_return_type();
c295e0f8 284 let void_type = self.context.new_type::<()>();
5e7ed085 285 let current_func = self.block.get_function();
c295e0f8 286
c295e0f8
XL
287 if return_type != void_type {
288 unsafe { RETURN_VALUE_COUNT += 1 };
5e7ed085 289 let result = current_func.new_local(None, return_type, &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
923072b8
FG
290 let func_name = format!("{:?}", func_ptr);
291 let args = llvm::adjust_intrinsic_arguments(&self, gcc_func, args, &func_name);
5e7ed085 292 self.block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
c295e0f8
XL
293 result.to_rvalue()
294 }
295 else {
923072b8 296 #[cfg(not(feature="master"))]
c295e0f8
XL
297 if gcc_func.get_param_count() == 0 {
298 // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
5e7ed085 299 self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
c295e0f8
XL
300 }
301 else {
5e7ed085 302 self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
c295e0f8 303 }
923072b8
FG
304 #[cfg(feature="master")]
305 self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
c295e0f8
XL
306 // Return dummy value when not having return value.
307 let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed");
5e7ed085 308 self.block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0));
c295e0f8
XL
309 result.to_rvalue()
310 }
311 }
312
5e7ed085 313 pub fn overflow_call(&self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
c295e0f8
XL
314 // gccjit requires to use the result of functions, even when it's not used.
315 // That's why we assign the result to a local.
316 let return_type = self.context.new_type::<bool>();
5e7ed085 317 let current_func = self.block.get_function();
c295e0f8
XL
318 // TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
319 unsafe { RETURN_VALUE_COUNT += 1 };
5e7ed085
FG
320 let result = current_func.new_local(None, return_type, &format!("overflowReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
321 self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
c295e0f8
XL
322 result.to_rvalue()
323 }
324}
325
326impl<'gcc, 'tcx> HasCodegen<'tcx> for Builder<'_, 'gcc, 'tcx> {
327 type CodegenCx = CodegenCx<'gcc, 'tcx>;
328}
329
330impl<'tcx> HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
331 fn tcx(&self) -> TyCtxt<'tcx> {
332 self.cx.tcx()
333 }
334}
335
336impl HasDataLayout for Builder<'_, '_, '_> {
337 fn data_layout(&self) -> &TargetDataLayout {
338 self.cx.data_layout()
339 }
340}
341
342impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
343 type LayoutOfResult = TyAndLayout<'tcx>;
344
345 #[inline]
346 fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
347 self.cx.handle_layout_err(err, span, ty)
348 }
349}
350
351impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
352 type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
353
354 #[inline]
355 fn handle_fn_abi_err(
356 &self,
357 err: FnAbiError<'tcx>,
358 span: Span,
359 fn_abi_request: FnAbiRequest<'tcx>,
360 ) -> ! {
361 self.cx.handle_fn_abi_err(err, span, fn_abi_request)
362 }
363}
364
365impl<'gcc, 'tcx> Deref for Builder<'_, 'gcc, 'tcx> {
366 type Target = CodegenCx<'gcc, 'tcx>;
367
368 fn deref(&self) -> &Self::Target {
369 self.cx
370 }
371}
372
373impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> {
374 type Value = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Value;
375 type Function = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Function;
376 type BasicBlock = <CodegenCx<'gcc, 'tcx> as BackendTypes>::BasicBlock;
377 type Type = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Type;
378 type Funclet = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Funclet;
379
380 type DIScope = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIScope;
381 type DILocation = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DILocation;
382 type DIVariable = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIVariable;
383}
384
385impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
386 fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
5e7ed085 387 Builder::with_cx(cx, block)
c295e0f8
XL
388 }
389
390 fn llbb(&self) -> Block<'gcc> {
5e7ed085 391 self.block
c295e0f8
XL
392 }
393
394 fn append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc> {
395 let func = cx.rvalue_as_function(func);
396 func.new_block(name)
397 }
398
399 fn append_sibling_block(&mut self, name: &str) -> Block<'gcc> {
400 let func = self.current_func();
401 func.new_block(name)
402 }
403
5e7ed085
FG
404 fn switch_to_block(&mut self, block: Self::BasicBlock) {
405 self.block = block;
406 }
407
c295e0f8
XL
408 fn ret_void(&mut self) {
409 self.llbb().end_with_void_return(None)
410 }
411
412 fn ret(&mut self, value: RValue<'gcc>) {
413 let value =
414 if self.structs_as_pointer.borrow().contains(&value) {
415 // NOTE: hack to workaround a limitation of the rustc API: see comment on
416 // CodegenCx.structs_as_pointer
417 value.dereference(None).to_rvalue()
418 }
419 else {
420 value
421 };
422 self.llbb().end_with_return(None, value);
423 }
424
425 fn br(&mut self, dest: Block<'gcc>) {
426 self.llbb().end_with_jump(None, dest)
427 }
428
429 fn cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>) {
430 self.llbb().end_with_conditional(None, cond, then_block, else_block)
431 }
432
433 fn switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>) {
434 let mut gcc_cases = vec![];
435 let typ = self.val_ty(value);
436 for (on_val, dest) in cases {
437 let on_val = self.const_uint_big(typ, on_val);
438 gcc_cases.push(self.context.new_case(on_val, on_val, dest));
439 }
5e7ed085 440 self.block.end_with_switch(None, value, default_block, &gcc_cases);
c295e0f8
XL
441 }
442
5e7ed085
FG
443 fn invoke(&mut self, typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
444 // TODO(bjorn3): Properly implement unwinding.
445 let call_site = self.call(typ, func, args, None);
446 let condition = self.context.new_rvalue_from_int(self.bool_type, 1);
c295e0f8 447 self.llbb().end_with_conditional(None, condition, then, catch);
5e7ed085 448 call_site
c295e0f8
XL
449 }
450
451 fn unreachable(&mut self) {
452 let func = self.context.get_builtin_function("__builtin_unreachable");
5e7ed085
FG
453 self.block.add_eval(None, self.context.new_call(None, func, &[]));
454 let return_type = self.block.get_function().get_return_type();
c295e0f8
XL
455 let void_type = self.context.new_type::<()>();
456 if return_type == void_type {
5e7ed085 457 self.block.end_with_void_return(None)
c295e0f8
XL
458 }
459 else {
460 let return_value = self.current_func()
461 .new_local(None, return_type, "unreachableReturn");
5e7ed085 462 self.block.end_with_return(None, return_value)
c295e0f8
XL
463 }
464 }
465
5e7ed085
FG
466 fn add(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
467 self.gcc_add(a, b)
c295e0f8
XL
468 }
469
470 fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
471 a + b
472 }
473
5e7ed085
FG
474 fn sub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
475 self.gcc_sub(a, b)
c295e0f8
XL
476 }
477
478 fn fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
479 a - b
480 }
481
482 fn mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
5e7ed085 483 self.gcc_mul(a, b)
c295e0f8
XL
484 }
485
486 fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
487 a * b
488 }
489
490 fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
5e7ed085 491 self.gcc_udiv(a, b)
c295e0f8
XL
492 }
493
494 fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
c295e0f8 495 // TODO(antoyo): poison if not exact.
923072b8
FG
496 let a_type = a.get_type().to_unsigned(self);
497 let a = self.gcc_int_cast(a, a_type);
498 let b_type = b.get_type().to_unsigned(self);
499 let b = self.gcc_int_cast(b, b_type);
c295e0f8
XL
500 a / b
501 }
502
503 fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
5e7ed085 504 self.gcc_sdiv(a, b)
c295e0f8
XL
505 }
506
507 fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
5e7ed085 508 // TODO(antoyo): poison if not exact.
c295e0f8
XL
509 // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
510 // should be the same.
511 let typ = a.get_type().to_signed(self);
512 let b = self.context.new_cast(None, b, typ);
513 a / b
514 }
515
516 fn fdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
517 a / b
518 }
519
520 fn urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
5e7ed085 521 self.gcc_urem(a, b)
c295e0f8
XL
522 }
523
524 fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
5e7ed085 525 self.gcc_srem(a, b)
c295e0f8
XL
526 }
527
528 fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
923072b8 529 if a.get_type().is_compatible_with(self.cx.float_type) {
c295e0f8
XL
530 let fmodf = self.context.get_builtin_function("fmodf");
531 // FIXME(antoyo): this seems to produce the wrong result.
532 return self.context.new_call(None, fmodf, &[a, b]);
533 }
923072b8 534 assert_eq!(a.get_type().unqualified(), self.cx.double_type);
c295e0f8
XL
535
536 let fmod = self.context.get_builtin_function("fmod");
537 return self.context.new_call(None, fmod, &[a, b]);
538 }
539
540 fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
5e7ed085 541 self.gcc_shl(a, b)
c295e0f8
XL
542 }
543
544 fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
5e7ed085 545 self.gcc_lshr(a, b)
c295e0f8
XL
546 }
547
548 fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
549 // TODO(antoyo): check whether behavior is an arithmetic shift for >> .
5e7ed085
FG
550 // It seems to be if the value is signed.
551 self.gcc_lshr(a, b)
c295e0f8
XL
552 }
553
5e7ed085
FG
554 fn and(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
555 self.gcc_and(a, b)
c295e0f8
XL
556 }
557
5e7ed085
FG
558 fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
559 self.cx.gcc_or(a, b)
c295e0f8
XL
560 }
561
562 fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
5e7ed085 563 self.gcc_xor(a, b)
c295e0f8
XL
564 }
565
566 fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
5e7ed085 567 self.gcc_neg(a)
c295e0f8
XL
568 }
569
570 fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
571 self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
572 }
573
574 fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
5e7ed085 575 self.gcc_not(a)
c295e0f8
XL
576 }
577
578 fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
579 a + b
580 }
581
582 fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
5e7ed085 583 self.gcc_add(a, b)
c295e0f8
XL
584 }
585
586 fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
587 a - b
588 }
589
590 fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
591 // TODO(antoyo): should generate poison value?
5e7ed085 592 self.gcc_sub(a, b)
c295e0f8
XL
593 }
594
595 fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
596 a * b
597 }
598
599 fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
600 a * b
601 }
602
603 fn fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
604 unimplemented!();
605 }
606
607 fn fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
608 unimplemented!();
609 }
610
611 fn fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
612 unimplemented!();
613 }
614
615 fn fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
616 unimplemented!();
617 }
618
619 fn frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
620 unimplemented!();
621 }
622
623 fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
5e7ed085 624 self.gcc_checked_binop(oop, typ, lhs, rhs)
c295e0f8
XL
625 }
626
627 fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
628 // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
629 // Ideally, we shouldn't need to do this check.
630 let aligned_type =
631 if ty == self.cx.u128_type || ty == self.cx.i128_type {
632 ty
633 }
634 else {
635 ty.get_aligned(align.bytes())
636 };
637 // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
638 self.stack_var_count.set(self.stack_var_count.get() + 1);
639 self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
640 }
641
642 fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> {
643 unimplemented!();
644 }
645
646 fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
647 unimplemented!();
648 }
649
923072b8 650 fn load(&mut self, pointee_ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
c295e0f8
XL
651 let block = self.llbb();
652 let function = block.get_function();
653 // NOTE: instead of returning the dereference here, we have to assign it to a variable in
654 // the current basic block. Otherwise, it could be used in another basic block, causing a
655 // dereference after a drop, for instance.
923072b8
FG
656 // TODO(antoyo): handle align of the load instruction.
657 let ptr = self.context.new_cast(None, ptr, pointee_ty.make_pointer());
c295e0f8 658 let deref = ptr.dereference(None).to_rvalue();
c295e0f8 659 unsafe { RETURN_VALUE_COUNT += 1 };
923072b8 660 let loaded_value = function.new_local(None, pointee_ty, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }));
c295e0f8
XL
661 block.add_assignment(None, loaded_value, deref);
662 loaded_value.to_rvalue()
663 }
664
665 fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
666 // TODO(antoyo): use ty.
667 let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile());
668 ptr.dereference(None).to_rvalue()
669 }
670
671 fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> {
672 // TODO(antoyo): use ty.
673 // TODO(antoyo): handle alignment.
674 let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes()));
675 let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
676
a2a8927a
XL
677 let volatile_const_void_ptr_type = self.context.new_type::<()>()
678 .make_const()
679 .make_volatile()
680 .make_pointer();
c295e0f8
XL
681 let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
682 self.context.new_call(None, atomic_load, &[ptr, ordering])
683 }
684
685 fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> {
686 assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
687
688 if place.layout.is_zst() {
689 return OperandRef::new_zst(self, place.layout);
690 }
691
692 fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) {
04454e1e
FG
693 let vr = scalar.valid_range(bx);
694 match scalar.primitive() {
c295e0f8
XL
695 abi::Int(..) => {
696 if !scalar.is_always_valid(bx) {
04454e1e 697 bx.range_metadata(load, vr);
c295e0f8
XL
698 }
699 }
700 abi::Pointer if vr.start < vr.end && !vr.contains(0) => {
701 bx.nonnull_metadata(load);
702 }
703 _ => {}
704 }
705 }
706
707 let val =
708 if let Some(llextra) = place.llextra {
709 OperandValue::Ref(place.llval, Some(llextra), place.align)
710 }
711 else if place.layout.is_gcc_immediate() {
923072b8
FG
712 let load = self.load(
713 place.layout.gcc_type(self, false),
714 place.llval,
715 place.align,
716 );
c295e0f8
XL
717 if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
718 scalar_load_metadata(self, load, scalar);
719 }
720 OperandValue::Immediate(self.to_immediate(load, place.layout))
721 }
722 else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
04454e1e 723 let b_offset = a.size(self).align_to(b.align(self).abi);
c295e0f8
XL
724 let pair_type = place.layout.gcc_type(self, false);
725
726 let mut load = |i, scalar: &abi::Scalar, align| {
727 let llptr = self.struct_gep(pair_type, place.llval, i as u64);
923072b8
FG
728 let llty = place.layout.scalar_pair_element_gcc_type(self, i, false);
729 let load = self.load(llty, llptr, align);
c295e0f8
XL
730 scalar_load_metadata(self, load, scalar);
731 if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
732 };
733
734 OperandValue::Pair(
735 load(0, a, place.align),
736 load(1, b, place.align.restrict_for_offset(b_offset)),
737 )
738 }
739 else {
740 OperandValue::Ref(place.llval, None, place.align)
741 };
742
743 OperandRef { val, layout: place.layout }
744 }
745
746 fn write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) -> Self {
747 let zero = self.const_usize(0);
748 let count = self.const_usize(count);
749 let start = dest.project_index(&mut self, zero).llval;
750 let end = dest.project_index(&mut self, count).llval;
751
5e7ed085
FG
752 let header_bb = self.append_sibling_block("repeat_loop_header");
753 let body_bb = self.append_sibling_block("repeat_loop_body");
754 let next_bb = self.append_sibling_block("repeat_loop_next");
c295e0f8
XL
755
756 let ptr_type = start.get_type();
757 let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var");
758 let current_val = current.to_rvalue();
759 self.assign(current, start);
760
5e7ed085 761 self.br(header_bb);
c295e0f8 762
5e7ed085
FG
763 self.switch_to_block(header_bb);
764 let keep_going = self.icmp(IntPredicate::IntNE, current_val, end);
765 self.cond_br(keep_going, body_bb, next_bb);
c295e0f8 766
5e7ed085 767 self.switch_to_block(body_bb);
c295e0f8 768 let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
5e7ed085 769 cg_elem.val.store(&mut self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
c295e0f8 770
5e7ed085
FG
771 let next = self.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]);
772 self.llbb().add_assignment(None, current, next);
773 self.br(header_bb);
c295e0f8 774
5e7ed085
FG
775 self.switch_to_block(next_bb);
776 self
c295e0f8
XL
777 }
778
779 fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) {
780 // TODO(antoyo)
781 }
782
783 fn nonnull_metadata(&mut self, _load: RValue<'gcc>) {
784 // TODO(antoyo)
785 }
786
787 fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
788 self.store_with_flags(val, ptr, align, MemFlags::empty())
789 }
790
923072b8 791 fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align, _flags: MemFlags) -> RValue<'gcc> {
c295e0f8 792 let ptr = self.check_store(val, ptr);
923072b8
FG
793 let destination = ptr.dereference(None);
794 // NOTE: libgccjit does not support specifying the alignment on the assignment, so we cast
795 // to type so it gets the proper alignment.
796 let destination_type = destination.to_rvalue().get_type().unqualified();
797 let aligned_type = destination_type.get_aligned(align.bytes()).make_pointer();
798 let aligned_destination = self.cx.context.new_bitcast(None, ptr, aligned_type);
799 let aligned_destination = aligned_destination.dereference(None);
800 self.llbb().add_assignment(None, aligned_destination, val);
c295e0f8
XL
801 // TODO(antoyo): handle align and flags.
802 // NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here?
803 self.cx.context.new_rvalue_zero(self.type_i32())
804 }
805
806 fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) {
807 // TODO(antoyo): handle alignment.
808 let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes()));
809 let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
a2a8927a
XL
810 let volatile_const_void_ptr_type = self.context.new_type::<()>()
811 .make_volatile()
812 .make_pointer();
c295e0f8
XL
813 let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
814
815 // FIXME(antoyo): fix libgccjit to allow comparing an integer type with an aligned integer type because
816 // the following cast is required to avoid this error:
817 // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int __attribute__((aligned(4))))
818 let int_type = atomic_store.get_param(1).to_rvalue().get_type();
819 let value = self.context.new_cast(None, value, int_type);
820 self.llbb()
821 .add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering]));
822 }
823
824 fn gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
825 let mut result = ptr;
826 for index in indices {
827 result = self.context.new_array_access(None, result, *index).get_address(None).to_rvalue();
828 }
829 result
830 }
831
832 fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
833 // FIXME(antoyo): would be safer if doing the same thing (loop) as gep.
834 // TODO(antoyo): specify inbounds somehow.
835 match indices.len() {
836 1 => {
837 self.context.new_array_access(None, ptr, indices[0]).get_address(None)
838 },
839 2 => {
840 let array = ptr.dereference(None); // TODO(antoyo): assert that first index is 0?
841 self.context.new_array_access(None, array, indices[1]).get_address(None)
842 },
843 _ => unimplemented!(),
844 }
845 }
846
847 fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
848 // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
849 assert_eq!(idx as usize as u64, idx);
850 let value = ptr.dereference(None).to_rvalue();
851
a2a8927a 852 if value_type.dyncast_array().is_some() {
c295e0f8
XL
853 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
854 let element = self.context.new_array_access(None, value, index);
855 element.get_address(None)
856 }
a2a8927a 857 else if let Some(vector_type) = value_type.dyncast_vector() {
c295e0f8
XL
858 let array_type = vector_type.get_element_type().make_pointer();
859 let array = self.bitcast(ptr, array_type);
860 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
861 let element = self.context.new_array_access(None, array, index);
862 element.get_address(None)
863 }
864 else if let Some(struct_type) = value_type.is_struct() {
865 ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None)
866 }
867 else {
868 panic!("Unexpected type {:?}", value_type);
869 }
870 }
871
872 /* Casts */
873 fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
874 // TODO(antoyo): check that it indeed truncate the value.
5e7ed085 875 self.gcc_int_cast(value, dest_ty)
c295e0f8
XL
876 }
877
878 fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
879 // TODO(antoyo): check that it indeed sign extend the value.
a2a8927a 880 if dest_ty.dyncast_vector().is_some() {
c295e0f8
XL
881 // TODO(antoyo): nothing to do as it is only for LLVM?
882 return value;
883 }
884 self.context.new_cast(None, value, dest_ty)
885 }
886
887 fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
5e7ed085 888 self.gcc_float_to_uint_cast(value, dest_ty)
c295e0f8
XL
889 }
890
891 fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
5e7ed085 892 self.gcc_float_to_int_cast(value, dest_ty)
c295e0f8
XL
893 }
894
895 fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
5e7ed085 896 self.gcc_uint_to_float_cast(value, dest_ty)
c295e0f8
XL
897 }
898
899 fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
5e7ed085 900 self.gcc_int_to_float_cast(value, dest_ty)
c295e0f8
XL
901 }
902
903 fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
904 // TODO(antoyo): make sure it truncates.
905 self.context.new_cast(None, value, dest_ty)
906 }
907
908 fn fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
909 self.context.new_cast(None, value, dest_ty)
910 }
911
912 fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
5e7ed085
FG
913 let usize_value = self.cx.const_bitcast(value, self.cx.type_isize());
914 self.intcast(usize_value, dest_ty, false)
c295e0f8
XL
915 }
916
917 fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
5e7ed085
FG
918 let usize_value = self.intcast(value, self.cx.type_isize(), false);
919 self.cx.const_bitcast(usize_value, dest_ty)
c295e0f8
XL
920 }
921
922 fn bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
923 self.cx.const_bitcast(value, dest_ty)
924 }
925
926 fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
927 // NOTE: is_signed is for value, not dest_typ.
5e7ed085 928 self.gcc_int_cast(value, dest_typ)
c295e0f8
XL
929 }
930
931 fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
932 let val_type = value.get_type();
933 match (type_is_pointer(val_type), type_is_pointer(dest_ty)) {
934 (false, true) => {
5e7ed085 935 // NOTE: Projecting a field of a pointer type will attempt a cast from a signed char to
c295e0f8
XL
936 // a pointer, which is not supported by gccjit.
937 return self.cx.context.new_cast(None, self.inttoptr(value, val_type.make_pointer()), dest_ty);
938 },
939 (false, false) => {
940 // When they are not pointers, we want a transmute (or reinterpret_cast).
941 self.bitcast(value, dest_ty)
942 },
943 (true, true) => self.cx.context.new_cast(None, value, dest_ty),
944 (true, false) => unimplemented!(),
945 }
946 }
947
948 /* Comparisons */
5e7ed085
FG
949 fn icmp(&mut self, op: IntPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
950 self.gcc_icmp(op, lhs, rhs)
c295e0f8
XL
951 }
952
953 fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
954 self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
955 }
956
957 /* Miscellaneous instructions */
5e7ed085
FG
958 fn memcpy(&mut self, dst: RValue<'gcc>, _dst_align: Align, src: RValue<'gcc>, _src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
959 assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
c295e0f8
XL
960 let size = self.intcast(size, self.type_size_t(), false);
961 let _is_volatile = flags.contains(MemFlags::VOLATILE);
962 let dst = self.pointercast(dst, self.type_i8p());
963 let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
964 let memcpy = self.context.get_builtin_function("memcpy");
c295e0f8 965 // TODO(antoyo): handle aligns and is_volatile.
5e7ed085 966 self.block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
c295e0f8
XL
967 }
968
969 fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
970 if flags.contains(MemFlags::NONTEMPORAL) {
971 // HACK(nox): This is inefficient but there is no nontemporal memmove.
923072b8 972 let val = self.load(src.get_type().get_pointee().expect("get_pointee"), src, src_align);
c295e0f8
XL
973 let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
974 self.store_with_flags(val, ptr, dst_align, flags);
975 return;
976 }
977 let size = self.intcast(size, self.type_size_t(), false);
978 let _is_volatile = flags.contains(MemFlags::VOLATILE);
979 let dst = self.pointercast(dst, self.type_i8p());
980 let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
981
982 let memmove = self.context.get_builtin_function("memmove");
c295e0f8 983 // TODO(antoyo): handle is_volatile.
5e7ed085 984 self.block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
c295e0f8
XL
985 }
986
987 fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) {
988 let _is_volatile = flags.contains(MemFlags::VOLATILE);
989 let ptr = self.pointercast(ptr, self.type_i8p());
990 let memset = self.context.get_builtin_function("memset");
c295e0f8
XL
991 // TODO(antoyo): handle align and is_volatile.
992 let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type);
993 let size = self.intcast(size, self.type_size_t(), false);
5e7ed085 994 self.block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
c295e0f8
XL
995 }
996
997 fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> {
998 let func = self.current_func();
999 let variable = func.new_local(None, then_val.get_type(), "selectVar");
1000 let then_block = func.new_block("then");
1001 let else_block = func.new_block("else");
1002 let after_block = func.new_block("after");
1003 self.llbb().end_with_conditional(None, cond, then_block, else_block);
1004
1005 then_block.add_assignment(None, variable, then_val);
1006 then_block.end_with_jump(None, after_block);
1007
5e7ed085 1008 if !then_val.get_type().is_compatible_with(else_val.get_type()) {
c295e0f8
XL
1009 else_val = self.context.new_cast(None, else_val, then_val.get_type());
1010 }
1011 else_block.add_assignment(None, variable, else_val);
1012 else_block.end_with_jump(None, after_block);
1013
5e7ed085 1014 // NOTE: since jumps were added in a place rustc does not expect, the current block in the
c295e0f8 1015 // state need to be updated.
5e7ed085 1016 self.switch_to_block(after_block);
c295e0f8
XL
1017
1018 variable.to_rvalue()
1019 }
1020
1021 #[allow(dead_code)]
1022 fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> {
1023 unimplemented!();
1024 }
1025
1026 fn extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc> {
1027 unimplemented!();
1028 }
1029
1030 fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> {
1031 unimplemented!();
1032 }
1033
1034 fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
1035 // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
1036 assert_eq!(idx as usize as u64, idx);
1037 let value_type = aggregate_value.get_type();
1038
a2a8927a 1039 if value_type.dyncast_array().is_some() {
c295e0f8
XL
1040 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
1041 let element = self.context.new_array_access(None, aggregate_value, index);
1042 element.get_address(None)
1043 }
a2a8927a 1044 else if value_type.dyncast_vector().is_some() {
c295e0f8
XL
1045 panic!();
1046 }
1047 else if let Some(pointer_type) = value_type.get_pointee() {
1048 if let Some(struct_type) = pointer_type.is_struct() {
1049 // NOTE: hack to workaround a limitation of the rustc API: see comment on
1050 // CodegenCx.structs_as_pointer
1051 aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)).to_rvalue()
1052 }
1053 else {
1054 panic!("Unexpected type {:?}", value_type);
1055 }
1056 }
1057 else if let Some(struct_type) = value_type.is_struct() {
1058 aggregate_value.access_field(None, struct_type.get_field(idx as i32)).to_rvalue()
1059 }
1060 else {
1061 panic!("Unexpected type {:?}", value_type);
1062 }
1063 }
1064
1065 fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
1066 // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
1067 assert_eq!(idx as usize as u64, idx);
1068 let value_type = aggregate_value.get_type();
1069
1070 let lvalue =
a2a8927a 1071 if value_type.dyncast_array().is_some() {
c295e0f8
XL
1072 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
1073 self.context.new_array_access(None, aggregate_value, index)
1074 }
a2a8927a 1075 else if value_type.dyncast_vector().is_some() {
c295e0f8
XL
1076 panic!();
1077 }
1078 else if let Some(pointer_type) = value_type.get_pointee() {
1079 if let Some(struct_type) = pointer_type.is_struct() {
1080 // NOTE: hack to workaround a limitation of the rustc API: see comment on
1081 // CodegenCx.structs_as_pointer
1082 aggregate_value.dereference_field(None, struct_type.get_field(idx as i32))
1083 }
1084 else {
1085 panic!("Unexpected type {:?}", value_type);
1086 }
1087 }
1088 else {
1089 panic!("Unexpected type {:?}", value_type);
1090 };
1091
1092 let lvalue_type = lvalue.to_rvalue().get_type();
1093 let value =
1094 // NOTE: sometimes, rustc will create a value with the wrong type.
1095 if lvalue_type != value.get_type() {
1096 self.context.new_cast(None, value, lvalue_type)
1097 }
1098 else {
1099 value
1100 };
1101
1102 self.llbb().add_assignment(None, lvalue, value);
1103
1104 aggregate_value
1105 }
1106
5099ac24
FG
1107 fn set_personality_fn(&mut self, _personality: RValue<'gcc>) {
1108 // TODO(antoyo)
1109 }
1110
1111 fn cleanup_landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>) -> RValue<'gcc> {
5e7ed085 1112 let field1 = self.context.new_field(None, self.u8_type.make_pointer(), "landing_pad_field_1");
c295e0f8
XL
1113 let field2 = self.context.new_field(None, self.i32_type, "landing_pad_field_1");
1114 let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]);
1115 self.current_func().new_local(None, struct_type.as_type(), "landing_pad")
1116 .to_rvalue()
1117 // TODO(antoyo): Properly implement unwinding.
1118 // the above is just to make the compilation work as it seems
1119 // rustc_codegen_ssa now calls the unwinding builder methods even on panic=abort.
1120 }
1121
5099ac24 1122 fn resume(&mut self, _exn: RValue<'gcc>) {
5e7ed085
FG
1123 // TODO(bjorn3): Properly implement unwinding.
1124 self.unreachable();
c295e0f8
XL
1125 }
1126
1127 fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet {
1128 unimplemented!();
1129 }
1130
5099ac24 1131 fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) {
c295e0f8
XL
1132 unimplemented!();
1133 }
1134
1135 fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet {
1136 unimplemented!();
1137 }
1138
5099ac24
FG
1139 fn catch_switch(
1140 &mut self,
1141 _parent: Option<RValue<'gcc>>,
1142 _unwind: Option<Block<'gcc>>,
1143 _handlers: &[Block<'gcc>],
1144 ) -> RValue<'gcc> {
c295e0f8
XL
1145 unimplemented!();
1146 }
1147
c295e0f8
XL
1148 // Atomic Operations
1149 fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
1150 let expected = self.current_func().new_local(None, cmp.get_type(), "expected");
1151 self.llbb().add_assignment(None, expected, cmp);
1152 let success = self.compare_exchange(dst, expected, src, order, failure_order, weak);
1153
1154 let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false);
1155 let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result");
1156 let align = Align::from_bits(64).expect("align"); // TODO(antoyo): use good align.
1157
1158 let value_type = result.to_rvalue().get_type();
1159 if let Some(struct_type) = value_type.is_struct() {
1160 self.store(success, result.access_field(None, struct_type.get_field(1)).get_address(None), align);
1161 // NOTE: since success contains the call to the intrinsic, it must be stored before
1162 // expected so that we store expected after the call.
1163 self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align);
1164 }
1165 // TODO(antoyo): handle when value is not a struct.
1166
1167 result.to_rvalue()
1168 }
1169
1170 fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
5e7ed085 1171 let size = src.get_type().get_size();
c295e0f8
XL
1172 let name =
1173 match op {
1174 AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
1175 AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size),
1176 AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size),
1177 AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size),
1178 AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size),
1179 AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size),
1180 AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size),
1181 AtomicRmwBinOp::AtomicMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
1182 AtomicRmwBinOp::AtomicMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
1183 AtomicRmwBinOp::AtomicUMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
1184 AtomicRmwBinOp::AtomicUMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
1185 };
1186
1187
1188 let atomic_function = self.context.get_builtin_function(name);
1189 let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
1190
1191 let void_ptr_type = self.context.new_type::<*mut ()>();
1192 let volatile_void_ptr_type = void_ptr_type.make_volatile();
1193 let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
1194 // FIXME(antoyo): not sure why, but we have the wrong type here.
1195 let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
1196 let src = self.context.new_cast(None, src, new_src_type);
1197 let res = self.context.new_call(None, atomic_function, &[dst, src, order]);
1198 self.context.new_cast(None, res, src.get_type())
1199 }
1200
1201 fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {
1202 let name =
1203 match scope {
1204 SynchronizationScope::SingleThread => "__atomic_signal_fence",
1205 SynchronizationScope::CrossThread => "__atomic_thread_fence",
1206 };
1207 let thread_fence = self.context.get_builtin_function(name);
1208 let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
1209 self.llbb().add_eval(None, self.context.new_call(None, thread_fence, &[order]));
1210 }
1211
1212 fn set_invariant_load(&mut self, load: RValue<'gcc>) {
1213 // NOTE: Hack to consider vtable function pointer as non-global-variable function pointer.
1214 self.normal_function_addresses.borrow_mut().insert(load);
1215 // TODO(antoyo)
1216 }
1217
1218 fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) {
1219 // TODO(antoyo)
1220 }
1221
1222 fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) {
1223 // TODO(antoyo)
1224 }
1225
1226 fn call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> {
1227 // FIXME(antoyo): remove when having a proper API.
1228 let gcc_func = unsafe { std::mem::transmute(func) };
1229 if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() {
1230 self.function_call(func, args, funclet)
1231 }
1232 else {
1233 // If it's a not function that was defined, it's a function pointer.
1234 self.function_ptr_call(func, args, funclet)
1235 }
1236 }
1237
1238 fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
1239 // FIXME(antoyo): this does not zero-extend.
1240 if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) {
1241 // FIXME(antoyo): hack because base::from_immediate converts i1 to i8.
1242 // Fix the code in codegen_ssa::base::from_immediate.
1243 return value;
1244 }
5e7ed085 1245 self.gcc_int_cast(value, dest_typ)
c295e0f8
XL
1246 }
1247
1248 fn cx(&self) -> &CodegenCx<'gcc, 'tcx> {
1249 self.cx
1250 }
1251
5e7ed085
FG
1252 fn do_not_inline(&mut self, _llret: RValue<'gcc>) {
1253 // FIXME(bjorn3): implement
c295e0f8
XL
1254 }
1255
1256 fn set_span(&mut self, _span: Span) {}
1257
1258 fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
1259 if self.cx().val_ty(val) == self.cx().type_i1() {
1260 self.zext(val, self.cx().type_i8())
1261 }
1262 else {
1263 val
1264 }
1265 }
1266
1267 fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
1268 if scalar.is_bool() {
1269 return self.trunc(val, self.cx().type_i1());
1270 }
1271 val
1272 }
1273
1274 fn fptoui_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
1275 None
1276 }
1277
1278 fn fptosi_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
1279 None
1280 }
1281
1282 fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) {
1283 unimplemented!();
1284 }
1285}
1286
1287impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
923072b8 1288 #[cfg(feature="master")]
c295e0f8 1289 pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> {
923072b8
FG
1290 let struct_type = mask.get_type().is_struct().expect("mask of struct type");
1291
1292 // TODO(antoyo): use a recursive unqualified() here.
1293 let vector_type = v1.get_type().unqualified().dyncast_vector().expect("vector type");
1294 let element_type = vector_type.get_element_type();
1295 let vec_num_units = vector_type.get_num_units();
1296
1297 let mask_num_units = struct_type.get_field_count();
1298 let mut vector_elements = vec![];
1299 let mask_element_type =
1300 if element_type.is_integral() {
1301 element_type
1302 }
1303 else {
1304 #[cfg(feature="master")]
1305 {
1306 self.cx.type_ix(element_type.get_size() as u64 * 8)
1307 }
1308 #[cfg(not(feature="master"))]
1309 self.int_type
1310 };
1311 for i in 0..mask_num_units {
1312 let field = struct_type.get_field(i as i32);
1313 vector_elements.push(self.context.new_cast(None, mask.access_field(None, field).to_rvalue(), mask_element_type));
1314 }
1315
1316 // NOTE: the mask needs to be the same length as the input vectors, so add the missing
1317 // elements in the mask if needed.
1318 for _ in mask_num_units..vec_num_units {
1319 vector_elements.push(self.context.new_rvalue_zero(mask_element_type));
1320 }
1321
1322 let array_type = self.context.new_array_type(None, element_type, vec_num_units as i32);
1323 let result_type = self.context.new_vector_type(element_type, mask_num_units as u64);
1324 let (v1, v2) =
1325 if vec_num_units < mask_num_units {
1326 // NOTE: the mask needs to be the same length as the input vectors, so join the 2
1327 // vectors and create a dummy second vector.
1328 // TODO(antoyo): switch to using new_vector_access.
1329 let array = self.context.new_bitcast(None, v1, array_type);
1330 let mut elements = vec![];
1331 for i in 0..vec_num_units {
1332 elements.push(self.context.new_array_access(None, array, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue());
1333 }
1334 // TODO(antoyo): switch to using new_vector_access.
1335 let array = self.context.new_bitcast(None, v2, array_type);
1336 for i in 0..(mask_num_units - vec_num_units) {
1337 elements.push(self.context.new_array_access(None, array, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue());
1338 }
1339 let v1 = self.context.new_rvalue_from_vector(None, result_type, &elements);
1340 let zero = self.context.new_rvalue_zero(element_type);
1341 let v2 = self.context.new_rvalue_from_vector(None, result_type, &vec![zero; mask_num_units]);
1342 (v1, v2)
1343 }
1344 else {
1345 (v1, v2)
1346 };
1347
1348 let new_mask_num_units = std::cmp::max(mask_num_units, vec_num_units);
1349 let mask_type = self.context.new_vector_type(mask_element_type, new_mask_num_units as u64);
1350 let mask = self.context.new_rvalue_from_vector(None, mask_type, &vector_elements);
1351 let result = self.context.new_rvalue_vector_perm(None, v1, v2, mask);
1352
1353 if vec_num_units != mask_num_units {
1354 // NOTE: if padding was added, only select the number of elements of the masks to
1355 // remove that padding in the result.
1356 let mut elements = vec![];
1357 // TODO(antoyo): switch to using new_vector_access.
1358 let array = self.context.new_bitcast(None, result, array_type);
1359 for i in 0..mask_num_units {
1360 elements.push(self.context.new_array_access(None, array, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue());
1361 }
1362 self.context.new_rvalue_from_vector(None, result_type, &elements)
1363 }
1364 else {
1365 result
1366 }
1367 }
1368
1369 #[cfg(not(feature="master"))]
1370 pub fn shuffle_vector(&mut self, _v1: RValue<'gcc>, _v2: RValue<'gcc>, _mask: RValue<'gcc>) -> RValue<'gcc> {
1371 unimplemented!();
1372 }
1373
1374 #[cfg(feature="master")]
1375 pub fn vector_reduce<F>(&mut self, src: RValue<'gcc>, op: F) -> RValue<'gcc>
1376 where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>
1377 {
1378 let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
1379 let element_count = vector_type.get_num_units();
1380 let mut vector_elements = vec![];
1381 for i in 0..element_count {
1382 vector_elements.push(i);
1383 }
1384 let mask_type = self.context.new_vector_type(self.int_type, element_count as u64);
1385 let mut shift = 1;
1386 let mut res = src;
1387 while shift < element_count {
1388 let vector_elements: Vec<_> =
1389 vector_elements.iter()
1390 .map(|i| self.context.new_rvalue_from_int(self.int_type, ((i + shift) % element_count) as i32))
1391 .collect();
1392 let mask = self.context.new_rvalue_from_vector(None, mask_type, &vector_elements);
1393 let shifted = self.context.new_rvalue_vector_perm(None, res, res, mask);
1394 shift *= 2;
1395 res = op(res, shifted, &self.context);
1396 }
1397 self.context.new_vector_access(None, res, self.context.new_rvalue_zero(self.int_type))
1398 .to_rvalue()
1399 }
1400
1401 #[cfg(not(feature="master"))]
1402 pub fn vector_reduce<F>(&mut self, src: RValue<'gcc>, op: F) -> RValue<'gcc>
1403 where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>
1404 {
1405 unimplemented!();
c295e0f8 1406 }
923072b8
FG
1407
1408 pub fn vector_reduce_op(&mut self, src: RValue<'gcc>, op: BinaryOp) -> RValue<'gcc> {
1409 self.vector_reduce(src, |a, b, context| context.new_binary_op(None, op, a.get_type(), a, b))
1410 }
1411
1412 pub fn vector_reduce_fadd_fast(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
1413 unimplemented!();
1414 }
1415
1416 pub fn vector_reduce_fmul_fast(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
1417 unimplemented!();
1418 }
1419
1420 // Inspired by Hacker's Delight min implementation.
1421 pub fn vector_reduce_min(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
1422 self.vector_reduce(src, |a, b, context| {
1423 let differences_or_zeros = difference_or_zero(a, b, context);
1424 context.new_binary_op(None, BinaryOp::Minus, a.get_type(), a, differences_or_zeros)
1425 })
1426 }
1427
1428 // Inspired by Hacker's Delight max implementation.
1429 pub fn vector_reduce_max(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
1430 self.vector_reduce(src, |a, b, context| {
1431 let differences_or_zeros = difference_or_zero(a, b, context);
1432 context.new_binary_op(None, BinaryOp::Plus, b.get_type(), b, differences_or_zeros)
1433 })
1434 }
1435
1436 pub fn vector_select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, else_val: RValue<'gcc>) -> RValue<'gcc> {
1437 // cond is a vector of integers, not of bools.
1438 let cond_type = cond.get_type();
1439 let vector_type = cond_type.unqualified().dyncast_vector().expect("vector type");
1440 let num_units = vector_type.get_num_units();
1441 let element_type = vector_type.get_element_type();
1442 let zeros = vec![self.context.new_rvalue_zero(element_type); num_units];
1443 let zeros = self.context.new_rvalue_from_vector(None, cond_type, &zeros);
1444
1445 let masks = self.context.new_comparison(None, ComparisonOp::NotEquals, cond, zeros);
1446 let then_vals = masks & then_val;
1447
1448 let ones = vec![self.context.new_rvalue_one(element_type); num_units];
1449 let ones = self.context.new_rvalue_from_vector(None, cond_type, &ones);
1450 let inverted_masks = masks + ones;
1451 // NOTE: sometimes, the type of else_val can be different than the type of then_val in
1452 // libgccjit (vector of int vs vector of int32_t), but they should be the same for the AND
1453 // operation to work.
1454 let else_val = self.context.new_bitcast(None, else_val, then_val.get_type());
1455 let else_vals = inverted_masks & else_val;
1456
1457 then_vals | else_vals
1458 }
1459}
1460
1461fn difference_or_zero<'gcc>(a: RValue<'gcc>, b: RValue<'gcc>, context: &'gcc Context<'gcc>) -> RValue<'gcc> {
1462 let difference = a - b;
1463 let masks = context.new_comparison(None, ComparisonOp::GreaterThanEquals, b, a);
1464 difference & masks
c295e0f8
XL
1465}
1466
1467impl<'a, 'gcc, 'tcx> StaticBuilderMethods for Builder<'a, 'gcc, 'tcx> {
1468 fn get_static(&mut self, def_id: DefId) -> RValue<'gcc> {
1469 // Forward to the `get_static` method of `CodegenCx`
1470 self.cx().get_static(def_id).get_address(None)
1471 }
1472}
1473
1474impl<'tcx> HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
1475 fn param_env(&self) -> ParamEnv<'tcx> {
1476 self.cx.param_env()
1477 }
1478}
1479
1480impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> {
1481 fn target_spec(&self) -> &Target {
1482 &self.cx.target_spec()
1483 }
1484}
1485
5e7ed085 1486pub trait ToGccComp {
c295e0f8
XL
1487 fn to_gcc_comparison(&self) -> ComparisonOp;
1488}
1489
1490impl ToGccComp for IntPredicate {
1491 fn to_gcc_comparison(&self) -> ComparisonOp {
1492 match *self {
1493 IntPredicate::IntEQ => ComparisonOp::Equals,
1494 IntPredicate::IntNE => ComparisonOp::NotEquals,
1495 IntPredicate::IntUGT => ComparisonOp::GreaterThan,
1496 IntPredicate::IntUGE => ComparisonOp::GreaterThanEquals,
1497 IntPredicate::IntULT => ComparisonOp::LessThan,
1498 IntPredicate::IntULE => ComparisonOp::LessThanEquals,
1499 IntPredicate::IntSGT => ComparisonOp::GreaterThan,
1500 IntPredicate::IntSGE => ComparisonOp::GreaterThanEquals,
1501 IntPredicate::IntSLT => ComparisonOp::LessThan,
1502 IntPredicate::IntSLE => ComparisonOp::LessThanEquals,
1503 }
1504 }
1505}
1506
1507impl ToGccComp for RealPredicate {
1508 fn to_gcc_comparison(&self) -> ComparisonOp {
1509 // TODO(antoyo): check that ordered vs non-ordered is respected.
1510 match *self {
1511 RealPredicate::RealPredicateFalse => unreachable!(),
1512 RealPredicate::RealOEQ => ComparisonOp::Equals,
1513 RealPredicate::RealOGT => ComparisonOp::GreaterThan,
1514 RealPredicate::RealOGE => ComparisonOp::GreaterThanEquals,
1515 RealPredicate::RealOLT => ComparisonOp::LessThan,
1516 RealPredicate::RealOLE => ComparisonOp::LessThanEquals,
1517 RealPredicate::RealONE => ComparisonOp::NotEquals,
1518 RealPredicate::RealORD => unreachable!(),
1519 RealPredicate::RealUNO => unreachable!(),
1520 RealPredicate::RealUEQ => ComparisonOp::Equals,
1521 RealPredicate::RealUGT => ComparisonOp::GreaterThan,
1522 RealPredicate::RealUGE => ComparisonOp::GreaterThan,
1523 RealPredicate::RealULT => ComparisonOp::LessThan,
1524 RealPredicate::RealULE => ComparisonOp::LessThan,
1525 RealPredicate::RealUNE => ComparisonOp::NotEquals,
1526 RealPredicate::RealPredicateTrue => unreachable!(),
1527 }
1528 }
1529}
1530
1531#[repr(C)]
1532#[allow(non_camel_case_types)]
1533enum MemOrdering {
1534 __ATOMIC_RELAXED,
1535 __ATOMIC_CONSUME,
1536 __ATOMIC_ACQUIRE,
1537 __ATOMIC_RELEASE,
1538 __ATOMIC_ACQ_REL,
1539 __ATOMIC_SEQ_CST,
1540}
1541
1542trait ToGccOrdering {
1543 fn to_gcc(self) -> i32;
1544}
1545
1546impl ToGccOrdering for AtomicOrdering {
1547 fn to_gcc(self) -> i32 {
1548 use MemOrdering::*;
1549
1550 let ordering =
1551 match self {
c295e0f8 1552 AtomicOrdering::Unordered => __ATOMIC_RELAXED,
923072b8 1553 AtomicOrdering::Relaxed => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
c295e0f8
XL
1554 AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
1555 AtomicOrdering::Release => __ATOMIC_RELEASE,
1556 AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
1557 AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST,
1558 };
1559 ordering as i32
1560 }
1561}