]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_codegen_gcc/src/builder.rs
New upstream version 1.59.0+dfsg1
[rustc.git] / compiler / rustc_codegen_gcc / src / builder.rs
CommitLineData
c295e0f8
XL
1use std::borrow::Cow;
2use std::cell::Cell;
3use std::convert::TryFrom;
4use std::ops::Deref;
5
6use gccjit::FunctionType;
7use gccjit::{
8 BinaryOp,
9 Block,
10 ComparisonOp,
11 Function,
12 LValue,
13 RValue,
14 ToRValue,
15 Type,
16 UnaryOp,
17};
18use rustc_codegen_ssa::MemFlags;
19use rustc_codegen_ssa::common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope};
20use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
21use rustc_codegen_ssa::mir::place::PlaceRef;
22use rustc_codegen_ssa::traits::{
23 BackendTypes,
24 BaseTypeMethods,
25 BuilderMethods,
26 ConstMethods,
27 DerivedTypeMethods,
28 LayoutTypeMethods,
29 HasCodegen,
30 OverflowOp,
31 StaticBuilderMethods,
32};
33use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
34use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout};
35use rustc_span::Span;
36use rustc_span::def_id::DefId;
37use rustc_target::abi::{
38 self,
39 call::FnAbi,
40 Align,
41 HasDataLayout,
42 Size,
43 TargetDataLayout,
44 WrappingRange,
45};
46use rustc_target::spec::{HasTargetSpec, Target};
47
48use crate::common::{SignType, TypeReflection, type_is_pointer};
49use crate::context::CodegenCx;
50use crate::type_of::LayoutGccExt;
51
52// TODO(antoyo)
53type Funclet = ();
54
55// TODO(antoyo): remove this variable.
56static mut RETURN_VALUE_COUNT: usize = 0;
57
58enum ExtremumOperation {
59 Max,
60 Min,
61}
62
63trait EnumClone {
64 fn clone(&self) -> Self;
65}
66
67impl EnumClone for AtomicOrdering {
68 fn clone(&self) -> Self {
69 match *self {
70 AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic,
71 AtomicOrdering::Unordered => AtomicOrdering::Unordered,
72 AtomicOrdering::Monotonic => AtomicOrdering::Monotonic,
73 AtomicOrdering::Acquire => AtomicOrdering::Acquire,
74 AtomicOrdering::Release => AtomicOrdering::Release,
75 AtomicOrdering::AcquireRelease => AtomicOrdering::AcquireRelease,
76 AtomicOrdering::SequentiallyConsistent => AtomicOrdering::SequentiallyConsistent,
77 }
78 }
79}
80
81pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
82 pub cx: &'a CodegenCx<'gcc, 'tcx>,
83 pub block: Option<Block<'gcc>>,
84 stack_var_count: Cell<usize>,
85}
86
87impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
88 fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>) -> Self {
89 Builder {
90 cx,
91 block: None,
92 stack_var_count: Cell::new(0),
93 }
94 }
95
96 fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
97 let size = self.cx.int_width(src.get_type()) / 8;
98
99 let func = self.current_func();
100
101 let load_ordering =
102 match order {
103 // TODO(antoyo): does this make sense?
104 AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
105 _ => order.clone(),
106 };
107 let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering.clone(), Size::from_bytes(size));
108 let previous_var = func.new_local(None, previous_value.get_type(), "previous_value");
109 let return_value = func.new_local(None, previous_value.get_type(), "return_value");
110 self.llbb().add_assignment(None, previous_var, previous_value);
111 self.llbb().add_assignment(None, return_value, previous_var.to_rvalue());
112
113 let while_block = func.new_block("while");
114 let after_block = func.new_block("after_while");
115 self.llbb().end_with_jump(None, while_block);
116
117 // NOTE: since jumps were added and compare_exchange doesn't expect this, the current blocks in the
118 // state need to be updated.
119 self.block = Some(while_block);
120 *self.cx.current_block.borrow_mut() = Some(while_block);
121
122 let comparison_operator =
123 match operation {
124 ExtremumOperation::Max => ComparisonOp::LessThan,
125 ExtremumOperation::Min => ComparisonOp::GreaterThan,
126 };
127
128 let cond1 = self.context.new_comparison(None, comparison_operator, previous_var.to_rvalue(), self.context.new_cast(None, src, previous_value.get_type()));
129 let compare_exchange = self.compare_exchange(dst, previous_var, src, order, load_ordering, false);
130 let cond2 = self.cx.context.new_unary_op(None, UnaryOp::LogicalNegate, compare_exchange.get_type(), compare_exchange);
131 let cond = self.cx.context.new_binary_op(None, BinaryOp::LogicalAnd, self.cx.bool_type, cond1, cond2);
132
133 while_block.end_with_conditional(None, cond, while_block, after_block);
134
135 // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
136 // state need to be updated.
137 self.block = Some(after_block);
138 *self.cx.current_block.borrow_mut() = Some(after_block);
139
140 return_value.to_rvalue()
141 }
142
143 fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
144 let size = self.cx.int_width(src.get_type());
145 let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size / 8));
146 let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
147 let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
148 let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
149
150 let void_ptr_type = self.context.new_type::<*mut ()>();
151 let volatile_void_ptr_type = void_ptr_type.make_volatile();
152 let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
153 let expected = self.context.new_cast(None, cmp.get_address(None), void_ptr_type);
154
155 // NOTE: not sure why, but we have the wrong type here.
156 let int_type = compare_exchange.get_param(2).to_rvalue().get_type();
157 let src = self.context.new_cast(None, src, int_type);
158 self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order])
159 }
160
161 pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) {
162 self.llbb().add_assignment(None, lvalue, value);
163 }
164
165 fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
166 let mut all_args_match = true;
167 let mut param_types = vec![];
168 let param_count = func.get_param_count();
169 for (index, arg) in args.iter().enumerate().take(param_count) {
170 let param = func.get_param(index as i32);
171 let param = param.to_rvalue().get_type();
172 if param != arg.get_type() {
173 all_args_match = false;
174 }
175 param_types.push(param);
176 }
177
178 if all_args_match {
179 return Cow::Borrowed(args);
180 }
181
182 let casted_args: Vec<_> = param_types
183 .into_iter()
184 .zip(args.iter())
185 .enumerate()
186 .map(|(_i, (expected_ty, &actual_val))| {
187 let actual_ty = actual_val.get_type();
188 if expected_ty != actual_ty {
189 self.bitcast(actual_val, expected_ty)
190 }
191 else {
192 actual_val
193 }
194 })
195 .collect();
196
197 Cow::Owned(casted_args)
198 }
199
200 fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
201 let mut all_args_match = true;
202 let mut param_types = vec![];
a2a8927a 203 let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr");
c295e0f8
XL
204 for (index, arg) in args.iter().enumerate().take(gcc_func.get_param_count()) {
205 let param = gcc_func.get_param_type(index);
206 if param != arg.get_type() {
207 all_args_match = false;
208 }
209 param_types.push(param);
210 }
211
212 if all_args_match {
213 return Cow::Borrowed(args);
214 }
215
216 let casted_args: Vec<_> = param_types
217 .into_iter()
218 .zip(args.iter())
219 .enumerate()
220 .map(|(_i, (expected_ty, &actual_val))| {
221 let actual_ty = actual_val.get_type();
222 if expected_ty != actual_ty {
223 self.bitcast(actual_val, expected_ty)
224 }
225 else {
226 actual_val
227 }
228 })
229 .collect();
230
231 Cow::Owned(casted_args)
232 }
233
234 fn check_store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
235 let dest_ptr_ty = self.cx.val_ty(ptr).make_pointer(); // TODO(antoyo): make sure make_pointer() is okay here.
236 let stored_ty = self.cx.val_ty(val);
237 let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
238
239 if dest_ptr_ty == stored_ptr_ty {
240 ptr
241 }
242 else {
243 self.bitcast(ptr, stored_ptr_ty)
244 }
245 }
246
247 pub fn current_func(&self) -> Function<'gcc> {
248 self.block.expect("block").get_function()
249 }
250
251 fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
252 // TODO(antoyo): remove when the API supports a different type for functions.
253 let func: Function<'gcc> = self.cx.rvalue_as_function(func);
254 let args = self.check_call("call", func, args);
255
256 // gccjit requires to use the result of functions, even when it's not used.
257 // That's why we assign the result to a local or call add_eval().
258 let return_type = func.get_return_type();
259 let current_block = self.current_block.borrow().expect("block");
260 let void_type = self.context.new_type::<()>();
261 let current_func = current_block.get_function();
262 if return_type != void_type {
263 unsafe { RETURN_VALUE_COUNT += 1 };
264 let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
265 current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
266 result.to_rvalue()
267 }
268 else {
269 current_block.add_eval(None, self.cx.context.new_call(None, func, &args));
270 // Return dummy value when not having return value.
271 self.context.new_rvalue_from_long(self.isize_type, 0)
272 }
273 }
274
275 fn function_ptr_call(&mut self, func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
276 let args = self.check_ptr_call("call", func_ptr, args);
277
278 // gccjit requires to use the result of functions, even when it's not used.
279 // That's why we assign the result to a local or call add_eval().
a2a8927a 280 let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr");
c295e0f8
XL
281 let mut return_type = gcc_func.get_return_type();
282 let current_block = self.current_block.borrow().expect("block");
283 let void_type = self.context.new_type::<()>();
284 let current_func = current_block.get_function();
285
286 // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
287 if gcc_func.get_param_count() == 0 && format!("{:?}", func_ptr) == "__builtin_ia32_pmovmskb128" {
288 return_type = self.int_type;
289 }
290
291 if return_type != void_type {
292 unsafe { RETURN_VALUE_COUNT += 1 };
293 let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
294 current_block.add_assignment(None, result, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
295 result.to_rvalue()
296 }
297 else {
298 if gcc_func.get_param_count() == 0 {
299 // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
300 current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
301 }
302 else {
303 current_block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
304 }
305 // Return dummy value when not having return value.
306 let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed");
307 current_block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0));
308 result.to_rvalue()
309 }
310 }
311
312 pub fn overflow_call(&mut self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
313 // gccjit requires to use the result of functions, even when it's not used.
314 // That's why we assign the result to a local.
315 let return_type = self.context.new_type::<bool>();
316 let current_block = self.current_block.borrow().expect("block");
317 let current_func = current_block.get_function();
318 // TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
319 unsafe { RETURN_VALUE_COUNT += 1 };
320 let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
321 current_block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
322 result.to_rvalue()
323 }
324}
325
326impl<'gcc, 'tcx> HasCodegen<'tcx> for Builder<'_, 'gcc, 'tcx> {
327 type CodegenCx = CodegenCx<'gcc, 'tcx>;
328}
329
330impl<'tcx> HasTyCtxt<'tcx> for Builder<'_, '_, 'tcx> {
331 fn tcx(&self) -> TyCtxt<'tcx> {
332 self.cx.tcx()
333 }
334}
335
336impl HasDataLayout for Builder<'_, '_, '_> {
337 fn data_layout(&self) -> &TargetDataLayout {
338 self.cx.data_layout()
339 }
340}
341
342impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
343 type LayoutOfResult = TyAndLayout<'tcx>;
344
345 #[inline]
346 fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
347 self.cx.handle_layout_err(err, span, ty)
348 }
349}
350
351impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> {
352 type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
353
354 #[inline]
355 fn handle_fn_abi_err(
356 &self,
357 err: FnAbiError<'tcx>,
358 span: Span,
359 fn_abi_request: FnAbiRequest<'tcx>,
360 ) -> ! {
361 self.cx.handle_fn_abi_err(err, span, fn_abi_request)
362 }
363}
364
365impl<'gcc, 'tcx> Deref for Builder<'_, 'gcc, 'tcx> {
366 type Target = CodegenCx<'gcc, 'tcx>;
367
368 fn deref(&self) -> &Self::Target {
369 self.cx
370 }
371}
372
373impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> {
374 type Value = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Value;
375 type Function = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Function;
376 type BasicBlock = <CodegenCx<'gcc, 'tcx> as BackendTypes>::BasicBlock;
377 type Type = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Type;
378 type Funclet = <CodegenCx<'gcc, 'tcx> as BackendTypes>::Funclet;
379
380 type DIScope = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIScope;
381 type DILocation = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DILocation;
382 type DIVariable = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIVariable;
383}
384
385impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
386 fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
387 let mut bx = Builder::with_cx(cx);
388 *cx.current_block.borrow_mut() = Some(block);
389 bx.block = Some(block);
390 bx
391 }
392
393 fn build_sibling_block(&mut self, name: &str) -> Self {
394 let block = self.append_sibling_block(name);
395 Self::build(self.cx, block)
396 }
397
398 fn llbb(&self) -> Block<'gcc> {
399 self.block.expect("block")
400 }
401
402 fn append_block(cx: &'a CodegenCx<'gcc, 'tcx>, func: RValue<'gcc>, name: &str) -> Block<'gcc> {
403 let func = cx.rvalue_as_function(func);
404 func.new_block(name)
405 }
406
407 fn append_sibling_block(&mut self, name: &str) -> Block<'gcc> {
408 let func = self.current_func();
409 func.new_block(name)
410 }
411
412 fn ret_void(&mut self) {
413 self.llbb().end_with_void_return(None)
414 }
415
416 fn ret(&mut self, value: RValue<'gcc>) {
417 let value =
418 if self.structs_as_pointer.borrow().contains(&value) {
419 // NOTE: hack to workaround a limitation of the rustc API: see comment on
420 // CodegenCx.structs_as_pointer
421 value.dereference(None).to_rvalue()
422 }
423 else {
424 value
425 };
426 self.llbb().end_with_return(None, value);
427 }
428
429 fn br(&mut self, dest: Block<'gcc>) {
430 self.llbb().end_with_jump(None, dest)
431 }
432
433 fn cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>) {
434 self.llbb().end_with_conditional(None, cond, then_block, else_block)
435 }
436
437 fn switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>) {
438 let mut gcc_cases = vec![];
439 let typ = self.val_ty(value);
440 for (on_val, dest) in cases {
441 let on_val = self.const_uint_big(typ, on_val);
442 gcc_cases.push(self.context.new_case(on_val, on_val, dest));
443 }
444 self.block.expect("block").end_with_switch(None, value, default_block, &gcc_cases);
445 }
446
447 fn invoke(&mut self, _typ: Type<'gcc>, _func: RValue<'gcc>, _args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
448 let condition = self.context.new_rvalue_from_int(self.bool_type, 0);
449 self.llbb().end_with_conditional(None, condition, then, catch);
450 self.context.new_rvalue_from_int(self.int_type, 0)
451
452 // TODO(antoyo)
453 }
454
455 fn unreachable(&mut self) {
456 let func = self.context.get_builtin_function("__builtin_unreachable");
457 let block = self.block.expect("block");
458 block.add_eval(None, self.context.new_call(None, func, &[]));
459 let return_type = block.get_function().get_return_type();
460 let void_type = self.context.new_type::<()>();
461 if return_type == void_type {
462 block.end_with_void_return(None)
463 }
464 else {
465 let return_value = self.current_func()
466 .new_local(None, return_type, "unreachableReturn");
467 block.end_with_return(None, return_value)
468 }
469 }
470
471 fn add(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
472 // FIXME(antoyo): this should not be required.
473 if format!("{:?}", a.get_type()) != format!("{:?}", b.get_type()) {
474 b = self.context.new_cast(None, b, a.get_type());
475 }
476 a + b
477 }
478
479 fn fadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
480 a + b
481 }
482
483 fn sub(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
484 if a.get_type() != b.get_type() {
485 b = self.context.new_cast(None, b, a.get_type());
486 }
487 a - b
488 }
489
490 fn fsub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
491 a - b
492 }
493
494 fn mul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
495 a * b
496 }
497
498 fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
499 a * b
500 }
501
502 fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
503 // TODO(antoyo): convert the arguments to unsigned?
504 a / b
505 }
506
507 fn exactudiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
508 // TODO(antoyo): convert the arguments to unsigned?
509 // TODO(antoyo): poison if not exact.
510 a / b
511 }
512
513 fn sdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
514 // TODO(antoyo): convert the arguments to signed?
515 a / b
516 }
517
518 fn exactsdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
519 // TODO(antoyo): posion if not exact.
520 // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
521 // should be the same.
522 let typ = a.get_type().to_signed(self);
523 let b = self.context.new_cast(None, b, typ);
524 a / b
525 }
526
527 fn fdiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
528 a / b
529 }
530
531 fn urem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
532 a % b
533 }
534
535 fn srem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
536 a % b
537 }
538
539 fn frem(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
540 if a.get_type() == self.cx.float_type {
541 let fmodf = self.context.get_builtin_function("fmodf");
542 // FIXME(antoyo): this seems to produce the wrong result.
543 return self.context.new_call(None, fmodf, &[a, b]);
544 }
545 assert_eq!(a.get_type(), self.cx.double_type);
546
547 let fmod = self.context.get_builtin_function("fmod");
548 return self.context.new_call(None, fmod, &[a, b]);
549 }
550
551 fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
552 // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
553 let a_type = a.get_type();
554 let b_type = b.get_type();
555 if a_type.is_unsigned(self) && b_type.is_signed(self) {
556 let a = self.context.new_cast(None, a, b_type);
557 let result = a << b;
558 self.context.new_cast(None, result, a_type)
559 }
560 else if a_type.is_signed(self) && b_type.is_unsigned(self) {
561 let b = self.context.new_cast(None, b, a_type);
562 a << b
563 }
564 else {
565 a << b
566 }
567 }
568
569 fn lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
570 // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
571 // TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
572 let a_type = a.get_type();
573 let b_type = b.get_type();
574 if a_type.is_unsigned(self) && b_type.is_signed(self) {
575 let a = self.context.new_cast(None, a, b_type);
576 let result = a >> b;
577 self.context.new_cast(None, result, a_type)
578 }
579 else if a_type.is_signed(self) && b_type.is_unsigned(self) {
580 let b = self.context.new_cast(None, b, a_type);
581 a >> b
582 }
583 else {
584 a >> b
585 }
586 }
587
588 fn ashr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
589 // TODO(antoyo): check whether behavior is an arithmetic shift for >> .
590 // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
591 let a_type = a.get_type();
592 let b_type = b.get_type();
593 if a_type.is_unsigned(self) && b_type.is_signed(self) {
594 let a = self.context.new_cast(None, a, b_type);
595 let result = a >> b;
596 self.context.new_cast(None, result, a_type)
597 }
598 else if a_type.is_signed(self) && b_type.is_unsigned(self) {
599 let b = self.context.new_cast(None, b, a_type);
600 a >> b
601 }
602 else {
603 a >> b
604 }
605 }
606
607 fn and(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
c295e0f8
XL
608 if a.get_type() != b.get_type() {
609 b = self.context.new_cast(None, b, a.get_type());
610 }
a2a8927a 611 a & b
c295e0f8
XL
612 }
613
a2a8927a
XL
614 fn or(&mut self, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
615 if a.get_type() != b.get_type() {
616 b = self.context.new_cast(None, b, a.get_type());
617 }
618 a | b
c295e0f8
XL
619 }
620
621 fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
622 a ^ b
623 }
624
625 fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
a2a8927a 626 self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
c295e0f8
XL
627 }
628
629 fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
630 self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
631 }
632
633 fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
634 let operation =
635 if a.get_type().is_bool() {
636 UnaryOp::LogicalNegate
637 }
638 else {
639 UnaryOp::BitwiseNegate
640 };
641 self.cx.context.new_unary_op(None, operation, a.get_type(), a)
642 }
643
644 fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
645 a + b
646 }
647
648 fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
649 a + b
650 }
651
652 fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
653 a - b
654 }
655
656 fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
657 // TODO(antoyo): should generate poison value?
658 a - b
659 }
660
661 fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
662 a * b
663 }
664
665 fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
666 a * b
667 }
668
669 fn fadd_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
670 unimplemented!();
671 }
672
673 fn fsub_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
674 unimplemented!();
675 }
676
677 fn fmul_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
678 unimplemented!();
679 }
680
681 fn fdiv_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
682 unimplemented!();
683 }
684
685 fn frem_fast(&mut self, _lhs: RValue<'gcc>, _rhs: RValue<'gcc>) -> RValue<'gcc> {
686 unimplemented!();
687 }
688
689 fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
690 use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
691
692 let new_kind =
693 match typ.kind() {
694 Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
695 Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
696 t @ (Uint(_) | Int(_)) => t.clone(),
697 _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
698 };
699
700 // TODO(antoyo): remove duplication with intrinsic?
701 let name =
702 match oop {
703 OverflowOp::Add =>
704 match new_kind {
705 Int(I8) => "__builtin_add_overflow",
706 Int(I16) => "__builtin_add_overflow",
707 Int(I32) => "__builtin_sadd_overflow",
708 Int(I64) => "__builtin_saddll_overflow",
709 Int(I128) => "__builtin_add_overflow",
710
711 Uint(U8) => "__builtin_add_overflow",
712 Uint(U16) => "__builtin_add_overflow",
713 Uint(U32) => "__builtin_uadd_overflow",
714 Uint(U64) => "__builtin_uaddll_overflow",
715 Uint(U128) => "__builtin_add_overflow",
716
717 _ => unreachable!(),
718 },
719 OverflowOp::Sub =>
720 match new_kind {
721 Int(I8) => "__builtin_sub_overflow",
722 Int(I16) => "__builtin_sub_overflow",
723 Int(I32) => "__builtin_ssub_overflow",
724 Int(I64) => "__builtin_ssubll_overflow",
725 Int(I128) => "__builtin_sub_overflow",
726
727 Uint(U8) => "__builtin_sub_overflow",
728 Uint(U16) => "__builtin_sub_overflow",
729 Uint(U32) => "__builtin_usub_overflow",
730 Uint(U64) => "__builtin_usubll_overflow",
731 Uint(U128) => "__builtin_sub_overflow",
732
733 _ => unreachable!(),
734 },
735 OverflowOp::Mul =>
736 match new_kind {
737 Int(I8) => "__builtin_mul_overflow",
738 Int(I16) => "__builtin_mul_overflow",
739 Int(I32) => "__builtin_smul_overflow",
740 Int(I64) => "__builtin_smulll_overflow",
741 Int(I128) => "__builtin_mul_overflow",
742
743 Uint(U8) => "__builtin_mul_overflow",
744 Uint(U16) => "__builtin_mul_overflow",
745 Uint(U32) => "__builtin_umul_overflow",
746 Uint(U64) => "__builtin_umulll_overflow",
747 Uint(U128) => "__builtin_mul_overflow",
748
749 _ => unreachable!(),
750 },
751 };
752
753 let intrinsic = self.context.get_builtin_function(&name);
754 let res = self.current_func()
755 // TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
756 .new_local(None, rhs.get_type(), "binopResult")
757 .get_address(None);
758 let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
759 (res.dereference(None).to_rvalue(), overflow)
760 }
761
762 fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
763 // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
764 // Ideally, we shouldn't need to do this check.
765 let aligned_type =
766 if ty == self.cx.u128_type || ty == self.cx.i128_type {
767 ty
768 }
769 else {
770 ty.get_aligned(align.bytes())
771 };
772 // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
773 self.stack_var_count.set(self.stack_var_count.get() + 1);
774 self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
775 }
776
777 fn dynamic_alloca(&mut self, _ty: Type<'gcc>, _align: Align) -> RValue<'gcc> {
778 unimplemented!();
779 }
780
781 fn array_alloca(&mut self, _ty: Type<'gcc>, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
782 unimplemented!();
783 }
784
785 fn load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
786 // TODO(antoyo): use ty.
787 let block = self.llbb();
788 let function = block.get_function();
789 // NOTE: instead of returning the dereference here, we have to assign it to a variable in
790 // the current basic block. Otherwise, it could be used in another basic block, causing a
791 // dereference after a drop, for instance.
792 // TODO(antoyo): handle align.
793 let deref = ptr.dereference(None).to_rvalue();
794 let value_type = deref.get_type();
795 unsafe { RETURN_VALUE_COUNT += 1 };
796 let loaded_value = function.new_local(None, value_type, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }));
797 block.add_assignment(None, loaded_value, deref);
798 loaded_value.to_rvalue()
799 }
800
801 fn volatile_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
802 // TODO(antoyo): use ty.
803 let ptr = self.context.new_cast(None, ptr, ptr.get_type().make_volatile());
804 ptr.dereference(None).to_rvalue()
805 }
806
807 fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> {
808 // TODO(antoyo): use ty.
809 // TODO(antoyo): handle alignment.
810 let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes()));
811 let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
812
a2a8927a
XL
813 let volatile_const_void_ptr_type = self.context.new_type::<()>()
814 .make_const()
815 .make_volatile()
816 .make_pointer();
c295e0f8
XL
817 let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
818 self.context.new_call(None, atomic_load, &[ptr, ordering])
819 }
820
821 fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> {
822 assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
823
824 if place.layout.is_zst() {
825 return OperandRef::new_zst(self, place.layout);
826 }
827
828 fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) {
829 let vr = scalar.valid_range.clone();
830 match scalar.value {
831 abi::Int(..) => {
832 if !scalar.is_always_valid(bx) {
833 bx.range_metadata(load, scalar.valid_range);
834 }
835 }
836 abi::Pointer if vr.start < vr.end && !vr.contains(0) => {
837 bx.nonnull_metadata(load);
838 }
839 _ => {}
840 }
841 }
842
843 let val =
844 if let Some(llextra) = place.llextra {
845 OperandValue::Ref(place.llval, Some(llextra), place.align)
846 }
847 else if place.layout.is_gcc_immediate() {
848 let load = self.load(place.llval.get_type(), place.llval, place.align);
849 if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
850 scalar_load_metadata(self, load, scalar);
851 }
852 OperandValue::Immediate(self.to_immediate(load, place.layout))
853 }
854 else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
855 let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
856 let pair_type = place.layout.gcc_type(self, false);
857
858 let mut load = |i, scalar: &abi::Scalar, align| {
859 let llptr = self.struct_gep(pair_type, place.llval, i as u64);
860 let load = self.load(llptr.get_type(), llptr, align);
861 scalar_load_metadata(self, load, scalar);
862 if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
863 };
864
865 OperandValue::Pair(
866 load(0, a, place.align),
867 load(1, b, place.align.restrict_for_offset(b_offset)),
868 )
869 }
870 else {
871 OperandValue::Ref(place.llval, None, place.align)
872 };
873
874 OperandRef { val, layout: place.layout }
875 }
876
877 fn write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) -> Self {
878 let zero = self.const_usize(0);
879 let count = self.const_usize(count);
880 let start = dest.project_index(&mut self, zero).llval;
881 let end = dest.project_index(&mut self, count).llval;
882
883 let mut header_bx = self.build_sibling_block("repeat_loop_header");
884 let mut body_bx = self.build_sibling_block("repeat_loop_body");
885 let next_bx = self.build_sibling_block("repeat_loop_next");
886
887 let ptr_type = start.get_type();
888 let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var");
889 let current_val = current.to_rvalue();
890 self.assign(current, start);
891
892 self.br(header_bx.llbb());
893
894 let keep_going = header_bx.icmp(IntPredicate::IntNE, current_val, end);
895 header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
896
897 let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
898 cg_elem.val.store(&mut body_bx, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
899
900 let next = body_bx.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]);
901 body_bx.llbb().add_assignment(None, current, next);
902 body_bx.br(header_bx.llbb());
903
904 next_bx
905 }
906
907 fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) {
908 // TODO(antoyo)
909 }
910
911 fn nonnull_metadata(&mut self, _load: RValue<'gcc>) {
912 // TODO(antoyo)
913 }
914
3c0e092e
XL
915 fn type_metadata(&mut self, _function: RValue<'gcc>, _typeid: String) {
916 // Unsupported.
917 }
918
919 fn typeid_metadata(&mut self, _typeid: String) -> RValue<'gcc> {
920 // Unsupported.
921 self.context.new_rvalue_from_int(self.int_type, 0)
922 }
923
924
c295e0f8
XL
925 fn store(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align) -> RValue<'gcc> {
926 self.store_with_flags(val, ptr, align, MemFlags::empty())
927 }
928
929 fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, _align: Align, _flags: MemFlags) -> RValue<'gcc> {
930 let ptr = self.check_store(val, ptr);
931 self.llbb().add_assignment(None, ptr.dereference(None), val);
932 // TODO(antoyo): handle align and flags.
933 // NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here?
934 self.cx.context.new_rvalue_zero(self.type_i32())
935 }
936
937 fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) {
938 // TODO(antoyo): handle alignment.
939 let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes()));
940 let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
a2a8927a
XL
941 let volatile_const_void_ptr_type = self.context.new_type::<()>()
942 .make_volatile()
943 .make_pointer();
c295e0f8
XL
944 let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
945
946 // FIXME(antoyo): fix libgccjit to allow comparing an integer type with an aligned integer type because
947 // the following cast is required to avoid this error:
948 // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int __attribute__((aligned(4))))
949 let int_type = atomic_store.get_param(1).to_rvalue().get_type();
950 let value = self.context.new_cast(None, value, int_type);
951 self.llbb()
952 .add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering]));
953 }
954
955 fn gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
956 let mut result = ptr;
957 for index in indices {
958 result = self.context.new_array_access(None, result, *index).get_address(None).to_rvalue();
959 }
960 result
961 }
962
963 fn inbounds_gep(&mut self, _typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
964 // FIXME(antoyo): would be safer if doing the same thing (loop) as gep.
965 // TODO(antoyo): specify inbounds somehow.
966 match indices.len() {
967 1 => {
968 self.context.new_array_access(None, ptr, indices[0]).get_address(None)
969 },
970 2 => {
971 let array = ptr.dereference(None); // TODO(antoyo): assert that first index is 0?
972 self.context.new_array_access(None, array, indices[1]).get_address(None)
973 },
974 _ => unimplemented!(),
975 }
976 }
977
978 fn struct_gep(&mut self, value_type: Type<'gcc>, ptr: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
979 // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
980 assert_eq!(idx as usize as u64, idx);
981 let value = ptr.dereference(None).to_rvalue();
982
a2a8927a 983 if value_type.dyncast_array().is_some() {
c295e0f8
XL
984 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
985 let element = self.context.new_array_access(None, value, index);
986 element.get_address(None)
987 }
a2a8927a 988 else if let Some(vector_type) = value_type.dyncast_vector() {
c295e0f8
XL
989 let array_type = vector_type.get_element_type().make_pointer();
990 let array = self.bitcast(ptr, array_type);
991 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
992 let element = self.context.new_array_access(None, array, index);
993 element.get_address(None)
994 }
995 else if let Some(struct_type) = value_type.is_struct() {
996 ptr.dereference_field(None, struct_type.get_field(idx as i32)).get_address(None)
997 }
998 else {
999 panic!("Unexpected type {:?}", value_type);
1000 }
1001 }
1002
1003 /* Casts */
1004 fn trunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1005 // TODO(antoyo): check that it indeed truncate the value.
1006 self.context.new_cast(None, value, dest_ty)
1007 }
1008
1009 fn sext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1010 // TODO(antoyo): check that it indeed sign extend the value.
a2a8927a 1011 if dest_ty.dyncast_vector().is_some() {
c295e0f8
XL
1012 // TODO(antoyo): nothing to do as it is only for LLVM?
1013 return value;
1014 }
1015 self.context.new_cast(None, value, dest_ty)
1016 }
1017
1018 fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1019 self.context.new_cast(None, value, dest_ty)
1020 }
1021
1022 fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1023 self.context.new_cast(None, value, dest_ty)
1024 }
1025
1026 fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1027 self.context.new_cast(None, value, dest_ty)
1028 }
1029
1030 fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1031 self.context.new_cast(None, value, dest_ty)
1032 }
1033
1034 fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1035 // TODO(antoyo): make sure it truncates.
1036 self.context.new_cast(None, value, dest_ty)
1037 }
1038
1039 fn fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1040 self.context.new_cast(None, value, dest_ty)
1041 }
1042
1043 fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1044 self.cx.ptrtoint(self.block.expect("block"), value, dest_ty)
1045 }
1046
1047 fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1048 self.cx.inttoptr(self.block.expect("block"), value, dest_ty)
1049 }
1050
1051 fn bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1052 self.cx.const_bitcast(value, dest_ty)
1053 }
1054
1055 fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
1056 // NOTE: is_signed is for value, not dest_typ.
1057 self.cx.context.new_cast(None, value, dest_typ)
1058 }
1059
1060 fn pointercast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
1061 let val_type = value.get_type();
1062 match (type_is_pointer(val_type), type_is_pointer(dest_ty)) {
1063 (false, true) => {
1064 // NOTE: Projecting a field of a pointer type will attemp a cast from a signed char to
1065 // a pointer, which is not supported by gccjit.
1066 return self.cx.context.new_cast(None, self.inttoptr(value, val_type.make_pointer()), dest_ty);
1067 },
1068 (false, false) => {
1069 // When they are not pointers, we want a transmute (or reinterpret_cast).
1070 self.bitcast(value, dest_ty)
1071 },
1072 (true, true) => self.cx.context.new_cast(None, value, dest_ty),
1073 (true, false) => unimplemented!(),
1074 }
1075 }
1076
1077 /* Comparisons */
1078 fn icmp(&mut self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
1079 let left_type = lhs.get_type();
1080 let right_type = rhs.get_type();
1081 if left_type != right_type {
1082 // NOTE: because libgccjit cannot compare function pointers.
a2a8927a 1083 if left_type.dyncast_function_ptr_type().is_some() && right_type.dyncast_function_ptr_type().is_some() {
c295e0f8
XL
1084 lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer());
1085 rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer());
1086 }
1087 // NOTE: hack because we try to cast a vector type to the same vector type.
1088 else if format!("{:?}", left_type) != format!("{:?}", right_type) {
1089 rhs = self.context.new_cast(None, rhs, left_type);
1090 }
1091 }
1092 self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
1093 }
1094
1095 fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
1096 self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
1097 }
1098
1099 /* Miscellaneous instructions */
1100 fn memcpy(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
1101 if flags.contains(MemFlags::NONTEMPORAL) {
1102 // HACK(nox): This is inefficient but there is no nontemporal memcpy.
1103 let val = self.load(src.get_type(), src, src_align);
1104 let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
1105 self.store_with_flags(val, ptr, dst_align, flags);
1106 return;
1107 }
1108 let size = self.intcast(size, self.type_size_t(), false);
1109 let _is_volatile = flags.contains(MemFlags::VOLATILE);
1110 let dst = self.pointercast(dst, self.type_i8p());
1111 let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
1112 let memcpy = self.context.get_builtin_function("memcpy");
1113 let block = self.block.expect("block");
1114 // TODO(antoyo): handle aligns and is_volatile.
1115 block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
1116 }
1117
1118 fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
1119 if flags.contains(MemFlags::NONTEMPORAL) {
1120 // HACK(nox): This is inefficient but there is no nontemporal memmove.
1121 let val = self.load(src.get_type(), src, src_align);
1122 let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
1123 self.store_with_flags(val, ptr, dst_align, flags);
1124 return;
1125 }
1126 let size = self.intcast(size, self.type_size_t(), false);
1127 let _is_volatile = flags.contains(MemFlags::VOLATILE);
1128 let dst = self.pointercast(dst, self.type_i8p());
1129 let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
1130
1131 let memmove = self.context.get_builtin_function("memmove");
1132 let block = self.block.expect("block");
1133 // TODO(antoyo): handle is_volatile.
1134 block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
1135 }
1136
1137 fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) {
1138 let _is_volatile = flags.contains(MemFlags::VOLATILE);
1139 let ptr = self.pointercast(ptr, self.type_i8p());
1140 let memset = self.context.get_builtin_function("memset");
1141 let block = self.block.expect("block");
1142 // TODO(antoyo): handle align and is_volatile.
1143 let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type);
1144 let size = self.intcast(size, self.type_size_t(), false);
1145 block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
1146 }
1147
1148 fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> {
1149 let func = self.current_func();
1150 let variable = func.new_local(None, then_val.get_type(), "selectVar");
1151 let then_block = func.new_block("then");
1152 let else_block = func.new_block("else");
1153 let after_block = func.new_block("after");
1154 self.llbb().end_with_conditional(None, cond, then_block, else_block);
1155
1156 then_block.add_assignment(None, variable, then_val);
1157 then_block.end_with_jump(None, after_block);
1158
1159 if then_val.get_type() != else_val.get_type() {
1160 else_val = self.context.new_cast(None, else_val, then_val.get_type());
1161 }
1162 else_block.add_assignment(None, variable, else_val);
1163 else_block.end_with_jump(None, after_block);
1164
1165 // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
1166 // state need to be updated.
1167 self.block = Some(after_block);
1168 *self.cx.current_block.borrow_mut() = Some(after_block);
1169
1170 variable.to_rvalue()
1171 }
1172
1173 #[allow(dead_code)]
1174 fn va_arg(&mut self, _list: RValue<'gcc>, _ty: Type<'gcc>) -> RValue<'gcc> {
1175 unimplemented!();
1176 }
1177
1178 fn extract_element(&mut self, _vec: RValue<'gcc>, _idx: RValue<'gcc>) -> RValue<'gcc> {
1179 unimplemented!();
1180 }
1181
1182 fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> {
1183 unimplemented!();
1184 }
1185
1186 fn extract_value(&mut self, aggregate_value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
1187 // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
1188 assert_eq!(idx as usize as u64, idx);
1189 let value_type = aggregate_value.get_type();
1190
a2a8927a 1191 if value_type.dyncast_array().is_some() {
c295e0f8
XL
1192 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
1193 let element = self.context.new_array_access(None, aggregate_value, index);
1194 element.get_address(None)
1195 }
a2a8927a 1196 else if value_type.dyncast_vector().is_some() {
c295e0f8
XL
1197 panic!();
1198 }
1199 else if let Some(pointer_type) = value_type.get_pointee() {
1200 if let Some(struct_type) = pointer_type.is_struct() {
1201 // NOTE: hack to workaround a limitation of the rustc API: see comment on
1202 // CodegenCx.structs_as_pointer
1203 aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)).to_rvalue()
1204 }
1205 else {
1206 panic!("Unexpected type {:?}", value_type);
1207 }
1208 }
1209 else if let Some(struct_type) = value_type.is_struct() {
1210 aggregate_value.access_field(None, struct_type.get_field(idx as i32)).to_rvalue()
1211 }
1212 else {
1213 panic!("Unexpected type {:?}", value_type);
1214 }
1215 }
1216
1217 fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
1218 // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
1219 assert_eq!(idx as usize as u64, idx);
1220 let value_type = aggregate_value.get_type();
1221
1222 let lvalue =
a2a8927a 1223 if value_type.dyncast_array().is_some() {
c295e0f8
XL
1224 let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
1225 self.context.new_array_access(None, aggregate_value, index)
1226 }
a2a8927a 1227 else if value_type.dyncast_vector().is_some() {
c295e0f8
XL
1228 panic!();
1229 }
1230 else if let Some(pointer_type) = value_type.get_pointee() {
1231 if let Some(struct_type) = pointer_type.is_struct() {
1232 // NOTE: hack to workaround a limitation of the rustc API: see comment on
1233 // CodegenCx.structs_as_pointer
1234 aggregate_value.dereference_field(None, struct_type.get_field(idx as i32))
1235 }
1236 else {
1237 panic!("Unexpected type {:?}", value_type);
1238 }
1239 }
1240 else {
1241 panic!("Unexpected type {:?}", value_type);
1242 };
1243
1244 let lvalue_type = lvalue.to_rvalue().get_type();
1245 let value =
1246 // NOTE: sometimes, rustc will create a value with the wrong type.
1247 if lvalue_type != value.get_type() {
1248 self.context.new_cast(None, value, lvalue_type)
1249 }
1250 else {
1251 value
1252 };
1253
1254 self.llbb().add_assignment(None, lvalue, value);
1255
1256 aggregate_value
1257 }
1258
1259 fn landing_pad(&mut self, _ty: Type<'gcc>, _pers_fn: RValue<'gcc>, _num_clauses: usize) -> RValue<'gcc> {
1260 let field1 = self.context.new_field(None, self.u8_type, "landing_pad_field_1");
1261 let field2 = self.context.new_field(None, self.i32_type, "landing_pad_field_1");
1262 let struct_type = self.context.new_struct_type(None, "landing_pad", &[field1, field2]);
1263 self.current_func().new_local(None, struct_type.as_type(), "landing_pad")
1264 .to_rvalue()
1265 // TODO(antoyo): Properly implement unwinding.
1266 // the above is just to make the compilation work as it seems
1267 // rustc_codegen_ssa now calls the unwinding builder methods even on panic=abort.
1268 }
1269
1270 fn set_cleanup(&mut self, _landing_pad: RValue<'gcc>) {
1271 // TODO(antoyo)
1272 }
1273
1274 fn resume(&mut self, _exn: RValue<'gcc>) -> RValue<'gcc> {
1275 unimplemented!();
1276 }
1277
1278 fn cleanup_pad(&mut self, _parent: Option<RValue<'gcc>>, _args: &[RValue<'gcc>]) -> Funclet {
1279 unimplemented!();
1280 }
1281
1282 fn cleanup_ret(&mut self, _funclet: &Funclet, _unwind: Option<Block<'gcc>>) -> RValue<'gcc> {
1283 unimplemented!();
1284 }
1285
1286 fn catch_pad(&mut self, _parent: RValue<'gcc>, _args: &[RValue<'gcc>]) -> Funclet {
1287 unimplemented!();
1288 }
1289
1290 fn catch_switch(&mut self, _parent: Option<RValue<'gcc>>, _unwind: Option<Block<'gcc>>, _num_handlers: usize) -> RValue<'gcc> {
1291 unimplemented!();
1292 }
1293
1294 fn add_handler(&mut self, _catch_switch: RValue<'gcc>, _handler: Block<'gcc>) {
1295 unimplemented!();
1296 }
1297
1298 fn set_personality_fn(&mut self, _personality: RValue<'gcc>) {
1299 // TODO(antoyo)
1300 }
1301
1302 // Atomic Operations
1303 fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
1304 let expected = self.current_func().new_local(None, cmp.get_type(), "expected");
1305 self.llbb().add_assignment(None, expected, cmp);
1306 let success = self.compare_exchange(dst, expected, src, order, failure_order, weak);
1307
1308 let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false);
1309 let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result");
1310 let align = Align::from_bits(64).expect("align"); // TODO(antoyo): use good align.
1311
1312 let value_type = result.to_rvalue().get_type();
1313 if let Some(struct_type) = value_type.is_struct() {
1314 self.store(success, result.access_field(None, struct_type.get_field(1)).get_address(None), align);
1315 // NOTE: since success contains the call to the intrinsic, it must be stored before
1316 // expected so that we store expected after the call.
1317 self.store(expected.to_rvalue(), result.access_field(None, struct_type.get_field(0)).get_address(None), align);
1318 }
1319 // TODO(antoyo): handle when value is not a struct.
1320
1321 result.to_rvalue()
1322 }
1323
1324 fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
1325 let size = self.cx.int_width(src.get_type()) / 8;
1326 let name =
1327 match op {
1328 AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
1329 AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size),
1330 AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size),
1331 AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size),
1332 AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size),
1333 AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size),
1334 AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size),
1335 AtomicRmwBinOp::AtomicMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
1336 AtomicRmwBinOp::AtomicMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
1337 AtomicRmwBinOp::AtomicUMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
1338 AtomicRmwBinOp::AtomicUMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
1339 };
1340
1341
1342 let atomic_function = self.context.get_builtin_function(name);
1343 let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
1344
1345 let void_ptr_type = self.context.new_type::<*mut ()>();
1346 let volatile_void_ptr_type = void_ptr_type.make_volatile();
1347 let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
1348 // FIXME(antoyo): not sure why, but we have the wrong type here.
1349 let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
1350 let src = self.context.new_cast(None, src, new_src_type);
1351 let res = self.context.new_call(None, atomic_function, &[dst, src, order]);
1352 self.context.new_cast(None, res, src.get_type())
1353 }
1354
1355 fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {
1356 let name =
1357 match scope {
1358 SynchronizationScope::SingleThread => "__atomic_signal_fence",
1359 SynchronizationScope::CrossThread => "__atomic_thread_fence",
1360 };
1361 let thread_fence = self.context.get_builtin_function(name);
1362 let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
1363 self.llbb().add_eval(None, self.context.new_call(None, thread_fence, &[order]));
1364 }
1365
1366 fn set_invariant_load(&mut self, load: RValue<'gcc>) {
1367 // NOTE: Hack to consider vtable function pointer as non-global-variable function pointer.
1368 self.normal_function_addresses.borrow_mut().insert(load);
1369 // TODO(antoyo)
1370 }
1371
1372 fn lifetime_start(&mut self, _ptr: RValue<'gcc>, _size: Size) {
1373 // TODO(antoyo)
1374 }
1375
1376 fn lifetime_end(&mut self, _ptr: RValue<'gcc>, _size: Size) {
1377 // TODO(antoyo)
1378 }
1379
1380 fn call(&mut self, _typ: Type<'gcc>, func: RValue<'gcc>, args: &[RValue<'gcc>], funclet: Option<&Funclet>) -> RValue<'gcc> {
1381 // FIXME(antoyo): remove when having a proper API.
1382 let gcc_func = unsafe { std::mem::transmute(func) };
1383 if self.functions.borrow().values().find(|value| **value == gcc_func).is_some() {
1384 self.function_call(func, args, funclet)
1385 }
1386 else {
1387 // If it's a not function that was defined, it's a function pointer.
1388 self.function_ptr_call(func, args, funclet)
1389 }
1390 }
1391
1392 fn zext(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
1393 // FIXME(antoyo): this does not zero-extend.
1394 if value.get_type().is_bool() && dest_typ.is_i8(&self.cx) {
1395 // FIXME(antoyo): hack because base::from_immediate converts i1 to i8.
1396 // Fix the code in codegen_ssa::base::from_immediate.
1397 return value;
1398 }
1399 self.context.new_cast(None, value, dest_typ)
1400 }
1401
1402 fn cx(&self) -> &CodegenCx<'gcc, 'tcx> {
1403 self.cx
1404 }
1405
a2a8927a 1406 fn apply_attrs_to_cleanup_callsite(&mut self, _llret: RValue<'gcc>) {
c295e0f8
XL
1407 unimplemented!();
1408 }
1409
1410 fn set_span(&mut self, _span: Span) {}
1411
1412 fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
1413 if self.cx().val_ty(val) == self.cx().type_i1() {
1414 self.zext(val, self.cx().type_i8())
1415 }
1416 else {
1417 val
1418 }
1419 }
1420
1421 fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
1422 if scalar.is_bool() {
1423 return self.trunc(val, self.cx().type_i1());
1424 }
1425 val
1426 }
1427
1428 fn fptoui_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
1429 None
1430 }
1431
1432 fn fptosi_sat(&mut self, _val: RValue<'gcc>, _dest_ty: Type<'gcc>) -> Option<RValue<'gcc>> {
1433 None
1434 }
1435
1436 fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) {
1437 unimplemented!();
1438 }
1439}
1440
1441impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
1442 pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> {
1443 let return_type = v1.get_type();
1444 let params = [
1445 self.context.new_parameter(None, return_type, "v1"),
1446 self.context.new_parameter(None, return_type, "v2"),
1447 self.context.new_parameter(None, mask.get_type(), "mask"),
1448 ];
1449 let shuffle = self.context.new_function(None, FunctionType::Extern, return_type, &params, "_mm_shuffle_epi8", false);
1450 self.context.new_call(None, shuffle, &[v1, v2, mask])
1451 }
1452}
1453
1454impl<'a, 'gcc, 'tcx> StaticBuilderMethods for Builder<'a, 'gcc, 'tcx> {
1455 fn get_static(&mut self, def_id: DefId) -> RValue<'gcc> {
1456 // Forward to the `get_static` method of `CodegenCx`
1457 self.cx().get_static(def_id).get_address(None)
1458 }
1459}
1460
1461impl<'tcx> HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> {
1462 fn param_env(&self) -> ParamEnv<'tcx> {
1463 self.cx.param_env()
1464 }
1465}
1466
1467impl<'tcx> HasTargetSpec for Builder<'_, '_, 'tcx> {
1468 fn target_spec(&self) -> &Target {
1469 &self.cx.target_spec()
1470 }
1471}
1472
1473trait ToGccComp {
1474 fn to_gcc_comparison(&self) -> ComparisonOp;
1475}
1476
1477impl ToGccComp for IntPredicate {
1478 fn to_gcc_comparison(&self) -> ComparisonOp {
1479 match *self {
1480 IntPredicate::IntEQ => ComparisonOp::Equals,
1481 IntPredicate::IntNE => ComparisonOp::NotEquals,
1482 IntPredicate::IntUGT => ComparisonOp::GreaterThan,
1483 IntPredicate::IntUGE => ComparisonOp::GreaterThanEquals,
1484 IntPredicate::IntULT => ComparisonOp::LessThan,
1485 IntPredicate::IntULE => ComparisonOp::LessThanEquals,
1486 IntPredicate::IntSGT => ComparisonOp::GreaterThan,
1487 IntPredicate::IntSGE => ComparisonOp::GreaterThanEquals,
1488 IntPredicate::IntSLT => ComparisonOp::LessThan,
1489 IntPredicate::IntSLE => ComparisonOp::LessThanEquals,
1490 }
1491 }
1492}
1493
1494impl ToGccComp for RealPredicate {
1495 fn to_gcc_comparison(&self) -> ComparisonOp {
1496 // TODO(antoyo): check that ordered vs non-ordered is respected.
1497 match *self {
1498 RealPredicate::RealPredicateFalse => unreachable!(),
1499 RealPredicate::RealOEQ => ComparisonOp::Equals,
1500 RealPredicate::RealOGT => ComparisonOp::GreaterThan,
1501 RealPredicate::RealOGE => ComparisonOp::GreaterThanEquals,
1502 RealPredicate::RealOLT => ComparisonOp::LessThan,
1503 RealPredicate::RealOLE => ComparisonOp::LessThanEquals,
1504 RealPredicate::RealONE => ComparisonOp::NotEquals,
1505 RealPredicate::RealORD => unreachable!(),
1506 RealPredicate::RealUNO => unreachable!(),
1507 RealPredicate::RealUEQ => ComparisonOp::Equals,
1508 RealPredicate::RealUGT => ComparisonOp::GreaterThan,
1509 RealPredicate::RealUGE => ComparisonOp::GreaterThan,
1510 RealPredicate::RealULT => ComparisonOp::LessThan,
1511 RealPredicate::RealULE => ComparisonOp::LessThan,
1512 RealPredicate::RealUNE => ComparisonOp::NotEquals,
1513 RealPredicate::RealPredicateTrue => unreachable!(),
1514 }
1515 }
1516}
1517
1518#[repr(C)]
1519#[allow(non_camel_case_types)]
1520enum MemOrdering {
1521 __ATOMIC_RELAXED,
1522 __ATOMIC_CONSUME,
1523 __ATOMIC_ACQUIRE,
1524 __ATOMIC_RELEASE,
1525 __ATOMIC_ACQ_REL,
1526 __ATOMIC_SEQ_CST,
1527}
1528
1529trait ToGccOrdering {
1530 fn to_gcc(self) -> i32;
1531}
1532
1533impl ToGccOrdering for AtomicOrdering {
1534 fn to_gcc(self) -> i32 {
1535 use MemOrdering::*;
1536
1537 let ordering =
1538 match self {
1539 AtomicOrdering::NotAtomic => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
1540 AtomicOrdering::Unordered => __ATOMIC_RELAXED,
1541 AtomicOrdering::Monotonic => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
1542 AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
1543 AtomicOrdering::Release => __ATOMIC_RELEASE,
1544 AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
1545 AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST,
1546 };
1547 ordering as i32
1548 }
1549}