]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_codegen_cranelift/src/value_and_place.rs
New upstream version 1.55.0+dfsg1
[rustc.git] / compiler / rustc_codegen_cranelift / src / value_and_place.rs
1 //! Definition of [`CValue`] and [`CPlace`]
2
3 use crate::prelude::*;
4
5 use cranelift_codegen::ir::immediates::Offset32;
6
7 fn codegen_field<'tcx>(
8 fx: &mut FunctionCx<'_, '_, 'tcx>,
9 base: Pointer,
10 extra: Option<Value>,
11 layout: TyAndLayout<'tcx>,
12 field: mir::Field,
13 ) -> (Pointer, TyAndLayout<'tcx>) {
14 let field_offset = layout.fields.offset(field.index());
15 let field_layout = layout.field(&*fx, field.index());
16
17 let simple = |fx: &mut FunctionCx<'_, '_, '_>| {
18 (base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), field_layout)
19 };
20
21 if let Some(extra) = extra {
22 if !field_layout.is_unsized() {
23 return simple(fx);
24 }
25 match field_layout.ty.kind() {
26 ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
27 ty::Adt(def, _) if def.repr.packed() => {
28 assert_eq!(layout.align.abi.bytes(), 1);
29 simple(fx)
30 }
31 _ => {
32 // We have to align the offset for DST's
33 let unaligned_offset = field_offset.bytes();
34 let (_, unsized_align) =
35 crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
36
37 let one = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 1);
38 let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
39 let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
40 let zero = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 0);
41 let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
42 let offset = fx.bcx.ins().band(and_lhs, and_rhs);
43
44 (base.offset_value(fx, offset), field_layout)
45 }
46 }
47 } else {
48 simple(fx)
49 }
50 }
51
52 fn scalar_pair_calculate_b_offset(
53 tcx: TyCtxt<'_>,
54 a_scalar: &Scalar,
55 b_scalar: &Scalar,
56 ) -> Offset32 {
57 let b_offset = a_scalar.value.size(&tcx).align_to(b_scalar.value.align(&tcx).abi);
58 Offset32::new(b_offset.bytes().try_into().unwrap())
59 }
60
61 /// A read-only value
62 #[derive(Debug, Copy, Clone)]
63 pub(crate) struct CValue<'tcx>(CValueInner, TyAndLayout<'tcx>);
64
65 #[derive(Debug, Copy, Clone)]
66 enum CValueInner {
67 ByRef(Pointer, Option<Value>),
68 ByVal(Value),
69 ByValPair(Value, Value),
70 }
71
72 impl<'tcx> CValue<'tcx> {
73 pub(crate) fn by_ref(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
74 CValue(CValueInner::ByRef(ptr, None), layout)
75 }
76
77 pub(crate) fn by_ref_unsized(
78 ptr: Pointer,
79 meta: Value,
80 layout: TyAndLayout<'tcx>,
81 ) -> CValue<'tcx> {
82 CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
83 }
84
85 pub(crate) fn by_val(value: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
86 CValue(CValueInner::ByVal(value), layout)
87 }
88
89 pub(crate) fn by_val_pair(
90 value: Value,
91 extra: Value,
92 layout: TyAndLayout<'tcx>,
93 ) -> CValue<'tcx> {
94 CValue(CValueInner::ByValPair(value, extra), layout)
95 }
96
97 pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
98 self.1
99 }
100
101 // FIXME remove
102 pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Pointer, Option<Value>) {
103 let layout = self.1;
104 match self.0 {
105 CValueInner::ByRef(ptr, meta) => (ptr, meta),
106 CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
107 let cplace = CPlace::new_stack_slot(fx, layout);
108 cplace.write_cvalue(fx, self);
109 (cplace.to_ptr(), None)
110 }
111 }
112 }
113
114 pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
115 match self.0 {
116 CValueInner::ByRef(ptr, meta) => Some((ptr, meta)),
117 CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
118 }
119 }
120
121 /// Load a value with layout.abi of scalar
122 pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
123 let layout = self.1;
124 match self.0 {
125 CValueInner::ByRef(ptr, None) => {
126 let clif_ty = match layout.abi {
127 Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.tcx, scalar.clone()),
128 Abi::Vector { ref element, count } => {
129 scalar_to_clif_type(fx.tcx, element.clone())
130 .by(u16::try_from(count).unwrap())
131 .unwrap()
132 }
133 _ => unreachable!("{:?}", layout.ty),
134 };
135 let mut flags = MemFlags::new();
136 flags.set_notrap();
137 ptr.load(fx, clif_ty, flags)
138 }
139 CValueInner::ByVal(value) => value,
140 CValueInner::ByRef(_, Some(_)) => bug!("load_scalar for unsized value not allowed"),
141 CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
142 }
143 }
144
145 /// Load a value pair with layout.abi of scalar pair
146 pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
147 let layout = self.1;
148 match self.0 {
149 CValueInner::ByRef(ptr, None) => {
150 let (a_scalar, b_scalar) = match &layout.abi {
151 Abi::ScalarPair(a, b) => (a, b),
152 _ => unreachable!("load_scalar_pair({:?})", self),
153 };
154 let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
155 let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar.clone());
156 let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar.clone());
157 let mut flags = MemFlags::new();
158 flags.set_notrap();
159 let val1 = ptr.load(fx, clif_ty1, flags);
160 let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
161 (val1, val2)
162 }
163 CValueInner::ByRef(_, Some(_)) => {
164 bug!("load_scalar_pair for unsized value not allowed")
165 }
166 CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
167 CValueInner::ByValPair(val1, val2) => (val1, val2),
168 }
169 }
170
171 pub(crate) fn value_field(
172 self,
173 fx: &mut FunctionCx<'_, '_, 'tcx>,
174 field: mir::Field,
175 ) -> CValue<'tcx> {
176 let layout = self.1;
177 match self.0 {
178 CValueInner::ByVal(val) => match layout.abi {
179 Abi::Vector { element: _, count } => {
180 let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
181 let field = u8::try_from(field.index()).unwrap();
182 assert!(field < count);
183 let lane = fx.bcx.ins().extractlane(val, field);
184 let field_layout = layout.field(&*fx, usize::from(field));
185 CValue::by_val(lane, field_layout)
186 }
187 _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
188 },
189 CValueInner::ByValPair(val1, val2) => match layout.abi {
190 Abi::ScalarPair(_, _) => {
191 let val = match field.as_u32() {
192 0 => val1,
193 1 => val2,
194 _ => bug!("field should be 0 or 1"),
195 };
196 let field_layout = layout.field(&*fx, usize::from(field));
197 CValue::by_val(val, field_layout)
198 }
199 _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
200 },
201 CValueInner::ByRef(ptr, None) => {
202 let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
203 CValue::by_ref(field_ptr, field_layout)
204 }
205 CValueInner::ByRef(_, Some(_)) => todo!(),
206 }
207 }
208
209 pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
210 crate::unsize::coerce_unsized_into(fx, self, dest);
211 }
212
213 /// If `ty` is signed, `const_val` must already be sign extended.
214 pub(crate) fn const_val(
215 fx: &mut FunctionCx<'_, '_, 'tcx>,
216 layout: TyAndLayout<'tcx>,
217 const_val: ty::ScalarInt,
218 ) -> CValue<'tcx> {
219 assert_eq!(const_val.size(), layout.size, "{:#?}: {:?}", const_val, layout);
220 use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
221
222 let clif_ty = fx.clif_type(layout.ty).unwrap();
223
224 if let ty::Bool = layout.ty.kind() {
225 assert!(
226 const_val == ty::ScalarInt::FALSE || const_val == ty::ScalarInt::TRUE,
227 "Invalid bool 0x{:032X}",
228 const_val
229 );
230 }
231
232 let val = match layout.ty.kind() {
233 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
234 let const_val = const_val.to_bits(layout.size).unwrap();
235 let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
236 let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
237 fx.bcx.ins().iconcat(lsb, msb)
238 }
239 ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..) | ty::RawPtr(..) => {
240 fx.bcx.ins().iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
241 }
242 ty::Float(FloatTy::F32) => {
243 fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
244 }
245 ty::Float(FloatTy::F64) => {
246 fx.bcx.ins().f64const(Ieee64::with_bits(u64::try_from(const_val).unwrap()))
247 }
248 _ => panic!(
249 "CValue::const_val for non bool/char/float/integer/pointer type {:?} is not allowed",
250 layout.ty
251 ),
252 };
253
254 CValue::by_val(val, layout)
255 }
256
257 pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
258 assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
259 assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
260 assert_eq!(self.layout().abi, layout.abi);
261 CValue(self.0, layout)
262 }
263 }
264
265 /// A place where you can write a value to or read a value from
266 #[derive(Debug, Copy, Clone)]
267 pub(crate) struct CPlace<'tcx> {
268 inner: CPlaceInner,
269 layout: TyAndLayout<'tcx>,
270 }
271
272 #[derive(Debug, Copy, Clone)]
273 pub(crate) enum CPlaceInner {
274 Var(Local, Variable),
275 VarPair(Local, Variable, Variable),
276 VarLane(Local, Variable, u8),
277 Addr(Pointer, Option<Value>),
278 }
279
280 impl<'tcx> CPlace<'tcx> {
281 pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
282 self.layout
283 }
284
285 pub(crate) fn inner(&self) -> &CPlaceInner {
286 &self.inner
287 }
288
289 pub(crate) fn no_place(layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
290 CPlace { inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None), layout }
291 }
292
293 pub(crate) fn new_stack_slot(
294 fx: &mut FunctionCx<'_, '_, 'tcx>,
295 layout: TyAndLayout<'tcx>,
296 ) -> CPlace<'tcx> {
297 assert!(!layout.is_unsized());
298 if layout.size.bytes() == 0 {
299 return CPlace::no_place(layout);
300 }
301
302 let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
303 kind: StackSlotKind::ExplicitSlot,
304 // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
305 // specify stack slot alignment.
306 size: (u32::try_from(layout.size.bytes()).unwrap() + 15) / 16 * 16,
307 offset: None,
308 });
309 CPlace { inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), layout }
310 }
311
312 pub(crate) fn new_var(
313 fx: &mut FunctionCx<'_, '_, 'tcx>,
314 local: Local,
315 layout: TyAndLayout<'tcx>,
316 ) -> CPlace<'tcx> {
317 let var = Variable::with_u32(fx.next_ssa_var);
318 fx.next_ssa_var += 1;
319 fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
320 CPlace { inner: CPlaceInner::Var(local, var), layout }
321 }
322
323 pub(crate) fn new_var_pair(
324 fx: &mut FunctionCx<'_, '_, 'tcx>,
325 local: Local,
326 layout: TyAndLayout<'tcx>,
327 ) -> CPlace<'tcx> {
328 let var1 = Variable::with_u32(fx.next_ssa_var);
329 fx.next_ssa_var += 1;
330 let var2 = Variable::with_u32(fx.next_ssa_var);
331 fx.next_ssa_var += 1;
332
333 let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap();
334 fx.bcx.declare_var(var1, ty1);
335 fx.bcx.declare_var(var2, ty2);
336 CPlace { inner: CPlaceInner::VarPair(local, var1, var2), layout }
337 }
338
339 pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
340 CPlace { inner: CPlaceInner::Addr(ptr, None), layout }
341 }
342
343 pub(crate) fn for_ptr_with_extra(
344 ptr: Pointer,
345 extra: Value,
346 layout: TyAndLayout<'tcx>,
347 ) -> CPlace<'tcx> {
348 CPlace { inner: CPlaceInner::Addr(ptr, Some(extra)), layout }
349 }
350
351 pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CValue<'tcx> {
352 let layout = self.layout();
353 match self.inner {
354 CPlaceInner::Var(_local, var) => {
355 let val = fx.bcx.use_var(var);
356 //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
357 CValue::by_val(val, layout)
358 }
359 CPlaceInner::VarPair(_local, var1, var2) => {
360 let val1 = fx.bcx.use_var(var1);
361 //fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
362 let val2 = fx.bcx.use_var(var2);
363 //fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
364 CValue::by_val_pair(val1, val2, layout)
365 }
366 CPlaceInner::VarLane(_local, var, lane) => {
367 let val = fx.bcx.use_var(var);
368 //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
369 let val = fx.bcx.ins().extractlane(val, lane);
370 CValue::by_val(val, layout)
371 }
372 CPlaceInner::Addr(ptr, extra) => {
373 if let Some(extra) = extra {
374 CValue::by_ref_unsized(ptr, extra, layout)
375 } else {
376 CValue::by_ref(ptr, layout)
377 }
378 }
379 }
380 }
381
382 pub(crate) fn to_ptr(self) -> Pointer {
383 match self.to_ptr_maybe_unsized() {
384 (ptr, None) => ptr,
385 (_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
386 }
387 }
388
389 pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
390 match self.inner {
391 CPlaceInner::Addr(ptr, extra) => (ptr, extra),
392 CPlaceInner::Var(_, _)
393 | CPlaceInner::VarPair(_, _, _)
394 | CPlaceInner::VarLane(_, _, _) => bug!("Expected CPlace::Addr, found {:?}", self),
395 }
396 }
397
398 pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) {
399 assert_assignable(fx, from.layout().ty, self.layout().ty);
400
401 self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
402 }
403
404 pub(crate) fn write_cvalue_transmute(
405 self,
406 fx: &mut FunctionCx<'_, '_, 'tcx>,
407 from: CValue<'tcx>,
408 ) {
409 self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute");
410 }
411
412 fn write_cvalue_maybe_transmute(
413 self,
414 fx: &mut FunctionCx<'_, '_, 'tcx>,
415 from: CValue<'tcx>,
416 method: &'static str,
417 ) {
418 fn transmute_value<'tcx>(
419 fx: &mut FunctionCx<'_, '_, 'tcx>,
420 var: Variable,
421 data: Value,
422 dst_ty: Type,
423 ) {
424 let src_ty = fx.bcx.func.dfg.value_type(data);
425 assert_eq!(
426 src_ty.bytes(),
427 dst_ty.bytes(),
428 "write_cvalue_transmute: {:?} -> {:?}",
429 src_ty,
430 dst_ty,
431 );
432 let data = match (src_ty, dst_ty) {
433 (_, _) if src_ty == dst_ty => data,
434
435 // This is a `write_cvalue_transmute`.
436 (types::I32, types::F32)
437 | (types::F32, types::I32)
438 | (types::I64, types::F64)
439 | (types::F64, types::I64) => fx.bcx.ins().bitcast(dst_ty, data),
440 _ if src_ty.is_vector() && dst_ty.is_vector() => {
441 fx.bcx.ins().raw_bitcast(dst_ty, data)
442 }
443 _ if src_ty.is_vector() || dst_ty.is_vector() => {
444 // FIXME do something more efficient for transmutes between vectors and integers.
445 let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
446 kind: StackSlotKind::ExplicitSlot,
447 // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
448 // specify stack slot alignment.
449 size: (src_ty.bytes() + 15) / 16 * 16,
450 offset: None,
451 });
452 let ptr = Pointer::stack_slot(stack_slot);
453 ptr.store(fx, data, MemFlags::trusted());
454 ptr.load(fx, dst_ty, MemFlags::trusted())
455 }
456
457 // `CValue`s should never contain SSA-only types, so if you ended
458 // up here having seen an error like `B1 -> I8`, then before
459 // calling `write_cvalue` you need to add a `bint` instruction.
460 _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
461 };
462 //fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
463 fx.bcx.def_var(var, data);
464 }
465
466 assert_eq!(self.layout().size, from.layout().size);
467
468 if fx.clif_comments.enabled() {
469 use cranelift_codegen::cursor::{Cursor, CursorPosition};
470 let cur_block = match fx.bcx.cursor().position() {
471 CursorPosition::After(block) => block,
472 _ => unreachable!(),
473 };
474 fx.add_comment(
475 fx.bcx.func.layout.last_inst(cur_block).unwrap(),
476 format!(
477 "{}: {:?}: {:?} <- {:?}: {:?}",
478 method,
479 self.inner(),
480 self.layout().ty,
481 from.0,
482 from.layout().ty
483 ),
484 );
485 }
486
487 let dst_layout = self.layout();
488 let to_ptr = match self.inner {
489 CPlaceInner::Var(_local, var) => {
490 let data = CValue(from.0, dst_layout).load_scalar(fx);
491 let dst_ty = fx.clif_type(self.layout().ty).unwrap();
492 transmute_value(fx, var, data, dst_ty);
493 return;
494 }
495 CPlaceInner::VarPair(_local, var1, var2) => {
496 let (data1, data2) = CValue(from.0, dst_layout).load_scalar_pair(fx);
497 let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
498 transmute_value(fx, var1, data1, dst_ty1);
499 transmute_value(fx, var2, data2, dst_ty2);
500 return;
501 }
502 CPlaceInner::VarLane(_local, var, lane) => {
503 let data = from.load_scalar(fx);
504
505 // First get the old vector
506 let vector = fx.bcx.use_var(var);
507 //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
508
509 // Next insert the written lane into the vector
510 let vector = fx.bcx.ins().insertlane(vector, data, lane);
511
512 // Finally write the new vector
513 //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
514 fx.bcx.def_var(var, vector);
515
516 return;
517 }
518 CPlaceInner::Addr(ptr, None) => {
519 if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
520 return;
521 }
522 ptr
523 }
524 CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
525 };
526
527 let mut flags = MemFlags::new();
528 flags.set_notrap();
529 match from.layout().abi {
530 // FIXME make Abi::Vector work too
531 Abi::Scalar(_) => {
532 let val = from.load_scalar(fx);
533 to_ptr.store(fx, val, flags);
534 return;
535 }
536 Abi::ScalarPair(ref a_scalar, ref b_scalar) => {
537 let (value, extra) = from.load_scalar_pair(fx);
538 let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
539 to_ptr.store(fx, value, flags);
540 to_ptr.offset(fx, b_offset).store(fx, extra, flags);
541 return;
542 }
543 _ => {}
544 }
545
546 match from.0 {
547 CValueInner::ByVal(val) => {
548 to_ptr.store(fx, val, flags);
549 }
550 CValueInner::ByValPair(_, _) => {
551 bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
552 }
553 CValueInner::ByRef(from_ptr, None) => {
554 let from_addr = from_ptr.get_addr(fx);
555 let to_addr = to_ptr.get_addr(fx);
556 let src_layout = from.1;
557 let size = dst_layout.size.bytes();
558 let src_align = src_layout.align.abi.bytes() as u8;
559 let dst_align = dst_layout.align.abi.bytes() as u8;
560 fx.bcx.emit_small_memory_copy(
561 fx.module.target_config(),
562 to_addr,
563 from_addr,
564 size,
565 dst_align,
566 src_align,
567 true,
568 MemFlags::trusted(),
569 );
570 }
571 CValueInner::ByRef(_, Some(_)) => todo!(),
572 }
573 }
574
575 pub(crate) fn place_field(
576 self,
577 fx: &mut FunctionCx<'_, '_, 'tcx>,
578 field: mir::Field,
579 ) -> CPlace<'tcx> {
580 let layout = self.layout();
581
582 match self.inner {
583 CPlaceInner::Var(local, var) => {
584 if let Abi::Vector { .. } = layout.abi {
585 return CPlace {
586 inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
587 layout: layout.field(fx, field.as_u32().try_into().unwrap()),
588 };
589 }
590 }
591 CPlaceInner::VarPair(local, var1, var2) => {
592 let layout = layout.field(&*fx, field.index());
593
594 match field.as_u32() {
595 0 => return CPlace { inner: CPlaceInner::Var(local, var1), layout },
596 1 => return CPlace { inner: CPlaceInner::Var(local, var2), layout },
597 _ => unreachable!("field should be 0 or 1"),
598 }
599 }
600 _ => {}
601 }
602
603 let (base, extra) = self.to_ptr_maybe_unsized();
604
605 let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
606 if field_layout.is_unsized() {
607 CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
608 } else {
609 CPlace::for_ptr(field_ptr, field_layout)
610 }
611 }
612
613 pub(crate) fn place_index(
614 self,
615 fx: &mut FunctionCx<'_, '_, 'tcx>,
616 index: Value,
617 ) -> CPlace<'tcx> {
618 let (elem_layout, ptr) = match self.layout().ty.kind() {
619 ty::Array(elem_ty, _) => (fx.layout_of(elem_ty), self.to_ptr()),
620 ty::Slice(elem_ty) => (fx.layout_of(elem_ty), self.to_ptr_maybe_unsized().0),
621 _ => bug!("place_index({:?})", self.layout().ty),
622 };
623
624 let offset = fx.bcx.ins().imul_imm(index, elem_layout.size.bytes() as i64);
625
626 CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
627 }
628
629 pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx> {
630 let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
631 if has_ptr_meta(fx.tcx, inner_layout.ty) {
632 let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
633 CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
634 } else {
635 CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
636 }
637 }
638
639 pub(crate) fn place_ref(
640 self,
641 fx: &mut FunctionCx<'_, '_, 'tcx>,
642 layout: TyAndLayout<'tcx>,
643 ) -> CValue<'tcx> {
644 if has_ptr_meta(fx.tcx, self.layout().ty) {
645 let (ptr, extra) = self.to_ptr_maybe_unsized();
646 CValue::by_val_pair(
647 ptr.get_addr(fx),
648 extra.expect("unsized type without metadata"),
649 layout,
650 )
651 } else {
652 CValue::by_val(self.to_ptr().get_addr(fx), layout)
653 }
654 }
655
656 pub(crate) fn downcast_variant(
657 self,
658 fx: &FunctionCx<'_, '_, 'tcx>,
659 variant: VariantIdx,
660 ) -> Self {
661 assert!(!self.layout().is_unsized());
662 let layout = self.layout().for_variant(fx, variant);
663 CPlace { inner: self.inner, layout }
664 }
665 }
666
667 #[track_caller]
668 pub(crate) fn assert_assignable<'tcx>(
669 fx: &FunctionCx<'_, '_, 'tcx>,
670 from_ty: Ty<'tcx>,
671 to_ty: Ty<'tcx>,
672 ) {
673 match (from_ty.kind(), to_ty.kind()) {
674 (ty::Ref(_, a, _), ty::Ref(_, b, _))
675 | (
676 ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
677 ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
678 ) => {
679 assert_assignable(fx, a, b);
680 }
681 (ty::Ref(_, a, _), ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }))
682 | (ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), ty::Ref(_, b, _)) => {
683 assert_assignable(fx, a, b);
684 }
685 (ty::FnPtr(_), ty::FnPtr(_)) => {
686 let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
687 ParamEnv::reveal_all(),
688 from_ty.fn_sig(fx.tcx),
689 );
690 let to_sig = fx
691 .tcx
692 .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx));
693 assert_eq!(
694 from_sig, to_sig,
695 "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
696 from_sig, to_sig, fx,
697 );
698 // fn(&T) -> for<'l> fn(&'l T) is allowed
699 }
700 (&ty::Dynamic(from_traits, _), &ty::Dynamic(to_traits, _)) => {
701 for (from, to) in from_traits.iter().zip(to_traits) {
702 let from =
703 fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
704 let to = fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to);
705 assert_eq!(
706 from, to,
707 "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
708 from_traits, to_traits, fx,
709 );
710 }
711 // dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
712 }
713 (&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b))
714 if adt_def_a.did == adt_def_b.did =>
715 {
716 let mut types_a = substs_a.types();
717 let mut types_b = substs_b.types();
718 loop {
719 match (types_a.next(), types_b.next()) {
720 (Some(a), Some(b)) => assert_assignable(fx, a, b),
721 (None, None) => return,
722 (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
723 }
724 }
725 }
726 _ => {
727 assert_eq!(
728 from_ty, to_ty,
729 "Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",
730 from_ty, to_ty, fx,
731 );
732 }
733 }
734 }