]>
Commit | Line | Data |
---|---|---|
60c5eb7d XL |
1 | use super::{InterpCx, Machine, MemoryKind, FnVal}; |
2 | ||
e1599b0c XL |
3 | use rustc::ty::{self, Ty, Instance, TypeFoldable}; |
4 | use rustc::ty::layout::{Size, Align, LayoutOf, HasDataLayout}; | |
416331ca | 5 | use rustc::mir::interpret::{Scalar, Pointer, InterpResult, PointerArithmetic,}; |
8faf50e0 | 6 | |
416331ca | 7 | impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { |
ff7c6d11 XL |
8 | /// Creates a dynamic vtable for the given type and vtable origin. This is used only for |
9 | /// objects. | |
10 | /// | |
60c5eb7d | 11 | /// The `trait_ref` encodes the erased self type. Hence, if we are |
ff7c6d11 | 12 | /// making an object `Foo<Trait>` from a value of type `Foo<T>`, then |
60c5eb7d | 13 | /// `trait_ref` would map `T: Trait`. |
ff7c6d11 XL |
14 | pub fn get_vtable( |
15 | &mut self, | |
16 | ty: Ty<'tcx>, | |
0731742a | 17 | poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>, |
dc9dc135 | 18 | ) -> InterpResult<'tcx, Pointer<M::PointerTag>> { |
0bf4aa26 XL |
19 | trace!("get_vtable(trait_ref={:?})", poly_trait_ref); |
20 | ||
21 | let (ty, poly_trait_ref) = self.tcx.erase_regions(&(ty, poly_trait_ref)); | |
22 | ||
e1599b0c XL |
23 | // All vtables must be monomorphic, bail out otherwise. |
24 | if ty.needs_subst() || poly_trait_ref.needs_subst() { | |
25 | throw_inval!(TooGeneric); | |
26 | } | |
27 | ||
0bf4aa26 | 28 | if let Some(&vtable) = self.vtables.get(&(ty, poly_trait_ref)) { |
9fa01778 XL |
29 | // This means we guarantee that there are no duplicate vtables, we will |
30 | // always use the same vtable for the same (Type, Trait) combination. | |
31 | // That's not what happens in rustc, but emulating per-crate deduplication | |
32 | // does not sound like it actually makes anything any better. | |
dc9dc135 | 33 | return Ok(vtable); |
0bf4aa26 XL |
34 | } |
35 | ||
0731742a XL |
36 | let methods = if let Some(poly_trait_ref) = poly_trait_ref { |
37 | let trait_ref = poly_trait_ref.with_self_ty(*self.tcx, ty); | |
38 | let trait_ref = self.tcx.erase_regions(&trait_ref); | |
ff7c6d11 | 39 | |
0731742a XL |
40 | self.tcx.vtable_methods(trait_ref) |
41 | } else { | |
48663c56 | 42 | &[] |
0731742a | 43 | }; |
0bf4aa26 XL |
44 | |
45 | let layout = self.layout_of(ty)?; | |
ff7c6d11 XL |
46 | assert!(!layout.is_unsized(), "can't create a vtable for an unsized type"); |
47 | let size = layout.size.bytes(); | |
a1dfa0c6 | 48 | let align = layout.align.abi.bytes(); |
ff7c6d11 | 49 | |
b7449926 | 50 | let ptr_size = self.pointer_size(); |
a1dfa0c6 | 51 | let ptr_align = self.tcx.data_layout.pointer_align.abi; |
0bf4aa26 XL |
52 | // ///////////////////////////////////////////////////////////////////////////////////////// |
53 | // If you touch this code, be sure to also make the corresponding changes to | |
60c5eb7d | 54 | // `get_vtable` in `rust_codegen_llvm/meth.rs`. |
0bf4aa26 | 55 | // ///////////////////////////////////////////////////////////////////////////////////////// |
ff7c6d11 XL |
56 | let vtable = self.memory.allocate( |
57 | ptr_size * (3 + methods.len() as u64), | |
58 | ptr_align, | |
0bf4aa26 | 59 | MemoryKind::Vtable, |
48663c56 | 60 | ); |
a1dfa0c6 XL |
61 | let tcx = &*self.tcx; |
62 | ||
dc9dc135 | 63 | let drop = Instance::resolve_drop_in_place(*tcx, ty); |
416331ca | 64 | let drop = self.memory.create_fn_alloc(FnVal::Instance(drop)); |
dc9dc135 | 65 | |
60c5eb7d | 66 | // No need to do any alignment checks on the memory accesses below, because we know the |
a1dfa0c6 XL |
67 | // allocation is correctly aligned as we created it above. Also we're only offsetting by |
68 | // multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`. | |
60c5eb7d XL |
69 | let vtable_alloc = self.memory.get_raw_mut(vtable.alloc_id)?; |
70 | vtable_alloc.write_ptr_sized(tcx, vtable, drop.into())?; | |
71 | ||
72 | let size_ptr = vtable.offset(ptr_size, tcx)?; | |
73 | vtable_alloc.write_ptr_sized(tcx, size_ptr, Scalar::from_uint(size, ptr_size).into())?; | |
74 | let align_ptr = vtable.offset(ptr_size * 2, tcx)?; | |
75 | vtable_alloc.write_ptr_sized(tcx, align_ptr, Scalar::from_uint(align, ptr_size).into())?; | |
ff7c6d11 XL |
76 | |
77 | for (i, method) in methods.iter().enumerate() { | |
78 | if let Some((def_id, substs)) = *method { | |
48663c56 | 79 | // resolve for vtable: insert shims where needed |
48663c56 | 80 | let instance = ty::Instance::resolve_for_vtable( |
60c5eb7d | 81 | *tcx, |
48663c56 XL |
82 | self.param_env, |
83 | def_id, | |
84 | substs, | |
416331ca XL |
85 | ).ok_or_else(|| err_inval!(TooGeneric))?; |
86 | let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance)); | |
60c5eb7d XL |
87 | // We cannot use `vtable_allic` as we are creating fn ptrs in this loop. |
88 | let method_ptr = vtable.offset(ptr_size * (3 + i as u64), tcx)?; | |
89 | self.memory.get_raw_mut(vtable.alloc_id)? | |
90 | .write_ptr_sized(tcx, method_ptr, fn_ptr.into())?; | |
ff7c6d11 XL |
91 | } |
92 | } | |
93 | ||
0bf4aa26 | 94 | self.memory.mark_immutable(vtable.alloc_id)?; |
dc9dc135 | 95 | assert!(self.vtables.insert((ty, poly_trait_ref), vtable).is_none()); |
ff7c6d11 XL |
96 | |
97 | Ok(vtable) | |
98 | } | |
99 | ||
60c5eb7d XL |
100 | /// Resolves the function at the specified slot in the provided |
101 | /// vtable. An index of '0' corresponds to the first method | |
102 | /// declared in the trait of the provided vtable. | |
103 | pub fn get_vtable_slot( | |
104 | &self, | |
105 | vtable: Scalar<M::PointerTag>, | |
106 | idx: usize | |
107 | ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> { | |
108 | let ptr_size = self.pointer_size(); | |
109 | // Skip over the 'drop_ptr', 'size', and 'align' fields. | |
110 | let vtable_slot = vtable.ptr_offset(ptr_size * (idx as u64 + 3), self)?; | |
111 | let vtable_slot = self.memory.check_ptr_access( | |
112 | vtable_slot, | |
113 | ptr_size, | |
114 | self.tcx.data_layout.pointer_align.abi, | |
115 | )?.expect("cannot be a ZST"); | |
116 | let fn_ptr = self.memory.get_raw(vtable_slot.alloc_id)? | |
117 | .read_ptr_sized(self, vtable_slot)?.not_undef()?; | |
118 | Ok(self.memory.get_fn(fn_ptr)?) | |
119 | } | |
120 | ||
121 | /// Returns the drop fn instance as well as the actual dynamic type. | |
ff7c6d11 XL |
122 | pub fn read_drop_type_from_vtable( |
123 | &self, | |
dc9dc135 XL |
124 | vtable: Scalar<M::PointerTag>, |
125 | ) -> InterpResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> { | |
60c5eb7d | 126 | // We don't care about the pointee type; we just want a pointer. |
dc9dc135 XL |
127 | let vtable = self.memory.check_ptr_access( |
128 | vtable, | |
129 | self.tcx.data_layout.pointer_size, | |
130 | self.tcx.data_layout.pointer_align.abi, | |
131 | )?.expect("cannot be a ZST"); | |
a1dfa0c6 | 132 | let drop_fn = self.memory |
60c5eb7d | 133 | .get_raw(vtable.alloc_id)? |
a1dfa0c6 | 134 | .read_ptr_sized(self, vtable)? |
416331ca XL |
135 | .not_undef()?; |
136 | // We *need* an instance here, no other kind of function value, to be able | |
137 | // to determine the type. | |
138 | let drop_instance = self.memory.get_fn(drop_fn)?.as_instance()?; | |
b7449926 XL |
139 | trace!("Found drop fn: {:?}", drop_instance); |
140 | let fn_sig = drop_instance.ty(*self.tcx).fn_sig(*self.tcx); | |
141 | let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig); | |
dc9dc135 | 142 | // The drop function takes `*mut T` where `T` is the type being dropped, so get that. |
60c5eb7d XL |
143 | let args = fn_sig.inputs(); |
144 | if args.len() != 1 { | |
145 | throw_ub_format!( | |
146 | "drop fn should have 1 argument, but signature is {:?}", fn_sig | |
147 | ); | |
148 | } | |
149 | let ty = args[0].builtin_deref(true) | |
150 | .ok_or_else(|| err_ub_format!( | |
151 | "drop fn argument type {} is not a pointer type", | |
152 | args[0] | |
153 | ))? | |
154 | .ty; | |
b7449926 | 155 | Ok((drop_instance, ty)) |
ff7c6d11 XL |
156 | } |
157 | ||
158 | pub fn read_size_and_align_from_vtable( | |
159 | &self, | |
dc9dc135 XL |
160 | vtable: Scalar<M::PointerTag>, |
161 | ) -> InterpResult<'tcx, (Size, Align)> { | |
b7449926 | 162 | let pointer_size = self.pointer_size(); |
60c5eb7d | 163 | // We check for `size = 3 * ptr_size`, which covers the drop fn (unused here), |
dc9dc135 XL |
164 | // the size, and the align (which we read below). |
165 | let vtable = self.memory.check_ptr_access( | |
166 | vtable, | |
167 | 3*pointer_size, | |
168 | self.tcx.data_layout.pointer_align.abi, | |
169 | )?.expect("cannot be a ZST"); | |
60c5eb7d | 170 | let alloc = self.memory.get_raw(vtable.alloc_id)?; |
dc9dc135 XL |
171 | let size = alloc.read_ptr_sized( |
172 | self, | |
173 | vtable.offset(pointer_size, self)? | |
e1599b0c XL |
174 | )?.not_undef()?; |
175 | let size = self.force_bits(size, pointer_size)? as u64; | |
a1dfa0c6 XL |
176 | let align = alloc.read_ptr_sized( |
177 | self, | |
ff7c6d11 | 178 | vtable.offset(pointer_size * 2, self)?, |
e1599b0c XL |
179 | )?.not_undef()?; |
180 | let align = self.force_bits(align, pointer_size)? as u64; | |
181 | ||
182 | if size >= self.tcx.data_layout().obj_size_bound() { | |
183 | throw_ub_format!("invalid vtable: \ | |
184 | size is bigger than largest supported object"); | |
185 | } | |
a1dfa0c6 | 186 | Ok((Size::from_bytes(size), Align::from_bytes(align).unwrap())) |
ff7c6d11 XL |
187 | } |
188 | } |