]> git.proxmox.com Git - rustc.git/blob - src/librustc_trans/trans/attributes.rs
Imported Upstream version 1.1.0+dfsg1
[rustc.git] / src / librustc_trans / trans / attributes.rs
1 // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10 //! Set and unset common attributes on LLVM values.
11
12 use libc::{c_uint, c_ulonglong};
13 use llvm::{self, ValueRef, AttrHelper};
14 use middle::ty::{self, ClosureTyper};
15 use syntax::abi;
16 use syntax::ast;
17 pub use syntax::attr::InlineAttr;
18 use trans::base;
19 use trans::common;
20 use trans::context::CrateContext;
21 use trans::machine;
22 use trans::type_of;
23
24 /// Mark LLVM function to use split stack.
25 #[inline]
26 pub fn split_stack(val: ValueRef, set: bool) {
27 unsafe {
28 let attr = "split-stack\0".as_ptr() as *const _;
29 if set {
30 llvm::LLVMAddFunctionAttrString(val, llvm::FunctionIndex as c_uint, attr);
31 } else {
32 llvm::LLVMRemoveFunctionAttrString(val, llvm::FunctionIndex as c_uint, attr);
33 }
34 }
35 }
36
37 /// Mark LLVM function to use provided inline heuristic.
38 #[inline]
39 pub fn inline(val: ValueRef, inline: InlineAttr) {
40 use self::InlineAttr::*;
41 match inline {
42 Hint => llvm::SetFunctionAttribute(val, llvm::Attribute::InlineHint),
43 Always => llvm::SetFunctionAttribute(val, llvm::Attribute::AlwaysInline),
44 Never => llvm::SetFunctionAttribute(val, llvm::Attribute::NoInline),
45 None => {
46 let attr = llvm::Attribute::InlineHint |
47 llvm::Attribute::AlwaysInline |
48 llvm::Attribute::NoInline;
49 unsafe {
50 llvm::LLVMRemoveFunctionAttr(val, attr.bits() as c_ulonglong)
51 }
52 },
53 };
54 }
55
56 /// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
57 #[inline]
58 pub fn emit_uwtable(val: ValueRef, emit: bool) {
59 if emit {
60 llvm::SetFunctionAttribute(val, llvm::Attribute::UWTable);
61 } else {
62 unsafe {
63 llvm::LLVMRemoveFunctionAttr(
64 val,
65 llvm::Attribute::UWTable.bits() as c_ulonglong,
66 );
67 }
68 }
69 }
70
71 /// Tell LLVM whether the function can or cannot unwind.
72 #[inline]
73 #[allow(dead_code)] // possibly useful function
74 pub fn unwind(val: ValueRef, can_unwind: bool) {
75 if can_unwind {
76 unsafe {
77 llvm::LLVMRemoveFunctionAttr(
78 val,
79 llvm::Attribute::NoUnwind.bits() as c_ulonglong,
80 );
81 }
82 } else {
83 llvm::SetFunctionAttribute(val, llvm::Attribute::NoUnwind);
84 }
85 }
86
87 /// Tell LLVM whether it should optimise function for size.
88 #[inline]
89 #[allow(dead_code)] // possibly useful function
90 pub fn set_optimize_for_size(val: ValueRef, optimize: bool) {
91 if optimize {
92 llvm::SetFunctionAttribute(val, llvm::Attribute::OptimizeForSize);
93 } else {
94 unsafe {
95 llvm::LLVMRemoveFunctionAttr(
96 val,
97 llvm::Attribute::OptimizeForSize.bits() as c_ulonglong,
98 );
99 }
100 }
101 }
102
103 /// Composite function which sets LLVM attributes for function depending on its AST (#[attribute])
104 /// attributes.
105 pub fn from_fn_attrs(ccx: &CrateContext, attrs: &[ast::Attribute], llfn: ValueRef) {
106 use syntax::attr::*;
107 inline(llfn, find_inline_attr(Some(ccx.sess().diagnostic()), attrs));
108
109 for attr in attrs {
110 if attr.check_name("no_stack_check") {
111 split_stack(llfn, false);
112 } else if attr.check_name("cold") {
113 unsafe {
114 llvm::LLVMAddFunctionAttribute(llfn,
115 llvm::FunctionIndex as c_uint,
116 llvm::ColdAttribute as u64)
117 }
118 } else if attr.check_name("allocator") {
119 llvm::Attribute::NoAlias.apply_llfn(llvm::ReturnIndex as c_uint, llfn);
120 }
121 }
122 }
123
124 /// Composite function which converts function type into LLVM attributes for the function.
125 pub fn from_fn_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_type: ty::Ty<'tcx>)
126 -> llvm::AttrBuilder {
127 use middle::ty::{BrAnon, ReLateBound};
128
129 let function_type;
130 let (fn_sig, abi, env_ty) = match fn_type.sty {
131 ty::ty_bare_fn(_, ref f) => (&f.sig, f.abi, None),
132 ty::ty_closure(closure_did, substs) => {
133 let typer = common::NormalizingClosureTyper::new(ccx.tcx());
134 function_type = typer.closure_type(closure_did, substs);
135 let self_type = base::self_type_for_closure(ccx, closure_did, fn_type);
136 (&function_type.sig, abi::RustCall, Some(self_type))
137 }
138 _ => ccx.sess().bug("expected closure or function.")
139 };
140
141 let fn_sig = ty::erase_late_bound_regions(ccx.tcx(), fn_sig);
142
143 let mut attrs = llvm::AttrBuilder::new();
144 let ret_ty = fn_sig.output;
145
146 // These have an odd calling convention, so we need to manually
147 // unpack the input ty's
148 let input_tys = match fn_type.sty {
149 ty::ty_closure(..) => {
150 assert!(abi == abi::RustCall);
151
152 match fn_sig.inputs[0].sty {
153 ty::ty_tup(ref inputs) => {
154 let mut full_inputs = vec![env_ty.expect("Missing closure environment")];
155 full_inputs.push_all(inputs);
156 full_inputs
157 }
158 _ => ccx.sess().bug("expected tuple'd inputs")
159 }
160 },
161 ty::ty_bare_fn(..) if abi == abi::RustCall => {
162 let mut inputs = vec![fn_sig.inputs[0]];
163
164 match fn_sig.inputs[1].sty {
165 ty::ty_tup(ref t_in) => {
166 inputs.push_all(&t_in[..]);
167 inputs
168 }
169 _ => ccx.sess().bug("expected tuple'd inputs")
170 }
171 }
172 _ => fn_sig.inputs.clone()
173 };
174
175 // Index 0 is the return value of the llvm func, so we start at 1
176 let mut first_arg_offset = 1;
177 if let ty::FnConverging(ret_ty) = ret_ty {
178 // A function pointer is called without the declaration
179 // available, so we have to apply any attributes with ABI
180 // implications directly to the call instruction. Right now,
181 // the only attribute we need to worry about is `sret`.
182 if type_of::return_uses_outptr(ccx, ret_ty) {
183 let llret_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, ret_ty));
184
185 // The outptr can be noalias and nocapture because it's entirely
186 // invisible to the program. We also know it's nonnull as well
187 // as how many bytes we can dereference
188 attrs.arg(1, llvm::Attribute::StructRet)
189 .arg(1, llvm::Attribute::NoAlias)
190 .arg(1, llvm::Attribute::NoCapture)
191 .arg(1, llvm::DereferenceableAttribute(llret_sz));
192
193 // Add one more since there's an outptr
194 first_arg_offset += 1;
195 } else {
196 // The `noalias` attribute on the return value is useful to a
197 // function ptr caller.
198 match ret_ty.sty {
199 // `Box` pointer return values never alias because ownership
200 // is transferred
201 ty::ty_uniq(it) if common::type_is_sized(ccx.tcx(), it) => {
202 attrs.ret(llvm::Attribute::NoAlias);
203 }
204 _ => {}
205 }
206
207 // We can also mark the return value as `dereferenceable` in certain cases
208 match ret_ty.sty {
209 // These are not really pointers but pairs, (pointer, len)
210 ty::ty_rptr(_, ty::mt { ty: inner, .. })
211 | ty::ty_uniq(inner) if common::type_is_sized(ccx.tcx(), inner) => {
212 let llret_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, inner));
213 attrs.ret(llvm::DereferenceableAttribute(llret_sz));
214 }
215 _ => {}
216 }
217
218 if let ty::ty_bool = ret_ty.sty {
219 attrs.ret(llvm::Attribute::ZExt);
220 }
221 }
222 }
223
224 for (idx, &t) in input_tys.iter().enumerate().map(|(i, v)| (i + first_arg_offset, v)) {
225 match t.sty {
226 // this needs to be first to prevent fat pointers from falling through
227 _ if !common::type_is_immediate(ccx, t) => {
228 let llarg_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, t));
229
230 // For non-immediate arguments the callee gets its own copy of
231 // the value on the stack, so there are no aliases. It's also
232 // program-invisible so can't possibly capture
233 attrs.arg(idx, llvm::Attribute::NoAlias)
234 .arg(idx, llvm::Attribute::NoCapture)
235 .arg(idx, llvm::DereferenceableAttribute(llarg_sz));
236 }
237
238 ty::ty_bool => {
239 attrs.arg(idx, llvm::Attribute::ZExt);
240 }
241
242 // `Box` pointer parameters never alias because ownership is transferred
243 ty::ty_uniq(inner) => {
244 let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, inner));
245
246 attrs.arg(idx, llvm::Attribute::NoAlias)
247 .arg(idx, llvm::DereferenceableAttribute(llsz));
248 }
249
250 // `&mut` pointer parameters never alias other parameters, or mutable global data
251 //
252 // `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as both
253 // `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely on
254 // memory dependencies rather than pointer equality
255 ty::ty_rptr(b, mt) if mt.mutbl == ast::MutMutable ||
256 !ty::type_contents(ccx.tcx(), mt.ty).interior_unsafe() => {
257
258 let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, mt.ty));
259 attrs.arg(idx, llvm::Attribute::NoAlias)
260 .arg(idx, llvm::DereferenceableAttribute(llsz));
261
262 if mt.mutbl == ast::MutImmutable {
263 attrs.arg(idx, llvm::Attribute::ReadOnly);
264 }
265
266 if let ReLateBound(_, BrAnon(_)) = *b {
267 attrs.arg(idx, llvm::Attribute::NoCapture);
268 }
269 }
270
271 // When a reference in an argument has no named lifetime, it's impossible for that
272 // reference to escape this function (returned or stored beyond the call by a closure).
273 ty::ty_rptr(&ReLateBound(_, BrAnon(_)), mt) => {
274 let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, mt.ty));
275 attrs.arg(idx, llvm::Attribute::NoCapture)
276 .arg(idx, llvm::DereferenceableAttribute(llsz));
277 }
278
279 // & pointer parameters are also never null and we know exactly how
280 // many bytes we can dereference
281 ty::ty_rptr(_, mt) => {
282 let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, mt.ty));
283 attrs.arg(idx, llvm::DereferenceableAttribute(llsz));
284 }
285 _ => ()
286 }
287 }
288
289 attrs
290 }