1 use crate::{LateContext, LateLintPass, LintContext}
;
3 use rustc_attr
as attr
;
4 use rustc_data_structures
::fx
::FxHashSet
;
5 use rustc_errors
::Applicability
;
7 use rustc_hir
::def_id
::DefId
;
8 use rustc_hir
::{is_range_literal, Expr, ExprKind, Node}
;
9 use rustc_middle
::ty
::layout
::{IntegerExt, LayoutOf, SizeSkeleton}
;
10 use rustc_middle
::ty
::subst
::SubstsRef
;
11 use rustc_middle
::ty
::{self, AdtKind, DefIdTree, Ty, TyCtxt, TypeFoldable}
;
12 use rustc_span
::source_map
;
13 use rustc_span
::symbol
::sym
;
14 use rustc_span
::{Span, Symbol, DUMMY_SP}
;
15 use rustc_target
::abi
::{Abi, WrappingRange}
;
16 use rustc_target
::abi
::{Integer, TagEncoding, Variants}
;
17 use rustc_target
::spec
::abi
::Abi
as SpecAbi
;
21 use std
::ops
::ControlFlow
;
25 /// The `unused_comparisons` lint detects comparisons made useless by
26 /// limits of the types involved.
40 /// A useless comparison may indicate a mistake, and should be fixed or
44 "comparisons made useless by limits of the types involved"
48 /// The `overflowing_literals` lint detects literal out of range for its
53 /// ```rust,compile_fail
61 /// It is usually a mistake to use a literal that overflows the type where
62 /// it is used. Either use a literal that is within range, or change the
63 /// type to be within the range of the literal.
66 "literal out of range for its type"
70 /// The `variant_size_differences` lint detects enums with widely varying
75 /// ```rust,compile_fail
76 /// #![deny(variant_size_differences)]
87 /// It can be a mistake to add a variant to an enum that is much larger
88 /// than the other variants, bloating the overall size required for all
89 /// variants. This can impact performance and memory usage. This is
90 /// triggered if one variant is more than 3 times larger than the
91 /// second-largest variant.
93 /// Consider placing the large variant's contents on the heap (for example
94 /// via [`Box`]) to keep the overall size of the enum itself down.
96 /// This lint is "allow" by default because it can be noisy, and may not be
97 /// an actual problem. Decisions about this should be guided with
98 /// profiling and benchmarking.
100 /// [`Box`]: https://doc.rust-lang.org/std/boxed/index.html
101 VARIANT_SIZE_DIFFERENCES
,
103 "detects enums with widely varying variant sizes"
106 #[derive(Copy, Clone)]
107 pub struct TypeLimits
{
108 /// Id of the last visited negated expression
109 negated_expr_id
: Option
<hir
::HirId
>,
112 impl_lint_pass
!(TypeLimits
=> [UNUSED_COMPARISONS
, OVERFLOWING_LITERALS
]);
115 pub fn new() -> TypeLimits
{
116 TypeLimits { negated_expr_id: None }
120 /// Attempts to special-case the overflowing literal lint when it occurs as a range endpoint.
121 /// Returns `true` iff the lint was overridden.
122 fn lint_overflowing_range_endpoint
<'tcx
>(
123 cx
: &LateContext
<'tcx
>,
127 expr
: &'tcx hir
::Expr
<'tcx
>,
128 parent_expr
: &'tcx hir
::Expr
<'tcx
>,
131 // We only want to handle exclusive (`..`) ranges,
132 // which are represented as `ExprKind::Struct`.
133 let mut overwritten
= false;
134 if let ExprKind
::Struct(_
, eps
, _
) = &parent_expr
.kind
{
138 // We can suggest using an inclusive range
139 // (`..=`) instead only if it is the `end` that is
140 // overflowing and only by 1.
141 if eps
[1].expr
.hir_id
== expr
.hir_id
&& lit_val
- 1 == max
{
142 cx
.struct_span_lint(OVERFLOWING_LITERALS
, parent_expr
.span
, |lint
| {
143 let mut err
= lint
.build(&format
!("range endpoint is out of range for `{}`", ty
));
144 if let Ok(start
) = cx
.sess().source_map().span_to_snippet(eps
[0].span
) {
145 use ast
::{LitIntType, LitKind}
;
146 // We need to preserve the literal's suffix,
147 // as it may determine typing information.
148 let suffix
= match lit
.node
{
149 LitKind
::Int(_
, LitIntType
::Signed(s
)) => s
.name_str(),
150 LitKind
::Int(_
, LitIntType
::Unsigned(s
)) => s
.name_str(),
151 LitKind
::Int(_
, LitIntType
::Unsuffixed
) => "",
154 let suggestion
= format
!("{}..={}{}", start
, lit_val
- 1, suffix
);
157 "use an inclusive range instead",
159 Applicability
::MachineApplicable
,
170 // For `isize` & `usize`, be conservative with the warnings, so that the
171 // warnings are consistent between 32- and 64-bit platforms.
172 fn int_ty_range(int_ty
: ty
::IntTy
) -> (i128
, i128
) {
174 ty
::IntTy
::Isize
=> (i64::MIN
.into(), i64::MAX
.into()),
175 ty
::IntTy
::I8
=> (i8::MIN
.into(), i8::MAX
.into()),
176 ty
::IntTy
::I16
=> (i16::MIN
.into(), i16::MAX
.into()),
177 ty
::IntTy
::I32
=> (i32::MIN
.into(), i32::MAX
.into()),
178 ty
::IntTy
::I64
=> (i64::MIN
.into(), i64::MAX
.into()),
179 ty
::IntTy
::I128
=> (i128
::MIN
, i128
::MAX
),
183 fn uint_ty_range(uint_ty
: ty
::UintTy
) -> (u128
, u128
) {
184 let max
= match uint_ty
{
185 ty
::UintTy
::Usize
=> u64::MAX
.into(),
186 ty
::UintTy
::U8
=> u8::MAX
.into(),
187 ty
::UintTy
::U16
=> u16::MAX
.into(),
188 ty
::UintTy
::U32
=> u32::MAX
.into(),
189 ty
::UintTy
::U64
=> u64::MAX
.into(),
190 ty
::UintTy
::U128
=> u128
::MAX
,
195 fn get_bin_hex_repr(cx
: &LateContext
<'_
>, lit
: &hir
::Lit
) -> Option
<String
> {
196 let src
= cx
.sess().source_map().span_to_snippet(lit
.span
).ok()?
;
197 let firstch
= src
.chars().next()?
;
200 match src
.chars().nth(1) {
201 Some('x'
| 'b'
) => return Some(src
),
209 fn report_bin_hex_error(
210 cx
: &LateContext
<'_
>,
211 expr
: &hir
::Expr
<'_
>,
217 let size
= Integer
::from_attr(&cx
.tcx
, ty
).size();
218 cx
.struct_span_lint(OVERFLOWING_LITERALS
, expr
.span
, |lint
| {
219 let (t
, actually
) = match ty
{
220 attr
::IntType
::SignedInt(t
) => {
221 let actually
= if negative
{
222 -(size
.sign_extend(val
) as i128
)
224 size
.sign_extend(val
) as i128
226 (t
.name_str(), actually
.to_string())
228 attr
::IntType
::UnsignedInt(t
) => {
229 let actually
= size
.truncate(val
);
230 (t
.name_str(), actually
.to_string())
233 let mut err
= lint
.build(&format
!("literal out of range for `{}`", t
));
235 // If the value is negative,
236 // emits a note about the value itself, apart from the literal.
238 "the literal `{}` (decimal `{}`) does not fit into \
242 err
.note(&format
!("and the value `-{}` will become `{}{}`", repr_str
, actually
, t
));
245 "the literal `{}` (decimal `{}`) does not fit into \
246 the type `{}` and will become `{}{}`",
247 repr_str
, val
, t
, actually
, t
250 if let Some(sugg_ty
) =
251 get_type_suggestion(cx
.typeck_results().node_type(expr
.hir_id
), val
, negative
)
253 if let Some(pos
) = repr_str
.chars().position(|c
| c
== 'i'
|| c
== 'u'
) {
254 let (sans_suffix
, _
) = repr_str
.split_at(pos
);
257 &format
!("consider using the type `{}` instead", sugg_ty
),
258 format
!("{}{}", sans_suffix
, sugg_ty
),
259 Applicability
::MachineApplicable
,
262 err
.help(&format
!("consider using the type `{}` instead", sugg_ty
));
269 // This function finds the next fitting type and generates a suggestion string.
270 // It searches for fitting types in the following way (`X < Y`):
271 // - `iX`: if literal fits in `uX` => `uX`, else => `iY`
275 // No suggestion for: `isize`, `usize`.
276 fn get_type_suggestion(t
: Ty
<'_
>, val
: u128
, negative
: bool
) -> Option
<&'
static str> {
279 macro_rules
! find_fit
{
280 ($ty
:expr
, $val
:expr
, $negative
:expr
,
281 $
($
type:ident
=> [$
($utypes
:expr
),*] => [$
($itypes
:expr
),*]),+) => {
283 let _neg
= if negative { 1 }
else { 0 }
;
286 $
(if !negative
&& val
<= uint_ty_range($utypes
).1 {
287 return Some($utypes
.name_str())
289 $
(if val
<= int_ty_range($itypes
).1 as u128
+ _neg
{
290 return Some($itypes
.name_str())
300 ty
::Int(i
) => find_fit
!(i
, val
, negative
,
301 I8
=> [U8
] => [I16
, I32
, I64
, I128
],
302 I16
=> [U16
] => [I32
, I64
, I128
],
303 I32
=> [U32
] => [I64
, I128
],
304 I64
=> [U64
] => [I128
],
305 I128
=> [U128
] => []),
306 ty
::Uint(u
) => find_fit
!(u
, val
, negative
,
307 U8
=> [U8
, U16
, U32
, U64
, U128
] => [],
308 U16
=> [U16
, U32
, U64
, U128
] => [],
309 U32
=> [U32
, U64
, U128
] => [],
310 U64
=> [U64
, U128
] => [],
311 U128
=> [U128
] => []),
316 fn lint_int_literal
<'tcx
>(
317 cx
: &LateContext
<'tcx
>,
318 type_limits
: &TypeLimits
,
319 e
: &'tcx hir
::Expr
<'tcx
>,
324 let int_type
= t
.normalize(cx
.sess().target
.pointer_width
);
325 let (min
, max
) = int_ty_range(int_type
);
326 let max
= max
as u128
;
327 let negative
= type_limits
.negated_expr_id
== Some(e
.hir_id
);
329 // Detect literal value out of range [min, max] inclusive
330 // avoiding use of -min to prevent overflow/panic
331 if (negative
&& v
> max
+ 1) || (!negative
&& v
> max
) {
332 if let Some(repr_str
) = get_bin_hex_repr(cx
, lit
) {
333 report_bin_hex_error(
336 attr
::IntType
::SignedInt(ty
::ast_int_ty(t
)),
344 let par_id
= cx
.tcx
.hir().get_parent_node(e
.hir_id
);
345 if let Node
::Expr(par_e
) = cx
.tcx
.hir().get(par_id
) {
346 if let hir
::ExprKind
::Struct(..) = par_e
.kind
{
347 if is_range_literal(par_e
)
348 && lint_overflowing_range_endpoint(cx
, lit
, v
, max
, e
, par_e
, t
.name_str())
350 // The overflowing literal lint was overridden.
356 cx
.struct_span_lint(OVERFLOWING_LITERALS
, e
.span
, |lint
| {
357 let mut err
= lint
.build(&format
!("literal out of range for `{}`", t
.name_str()));
359 "the literal `{}` does not fit into the type `{}` whose range is `{}..={}`",
362 .span_to_snippet(lit
.span
)
363 .expect("must get snippet from literal"),
368 if let Some(sugg_ty
) =
369 get_type_suggestion(cx
.typeck_results().node_type(e
.hir_id
), v
, negative
)
371 err
.help(&format
!("consider using the type `{}` instead", sugg_ty
));
378 fn lint_uint_literal
<'tcx
>(
379 cx
: &LateContext
<'tcx
>,
380 e
: &'tcx hir
::Expr
<'tcx
>,
384 let uint_type
= t
.normalize(cx
.sess().target
.pointer_width
);
385 let (min
, max
) = uint_ty_range(uint_type
);
386 let lit_val
: u128
= match lit
.node
{
387 // _v is u8, within range by definition
388 ast
::LitKind
::Byte(_v
) => return,
389 ast
::LitKind
::Int(v
, _
) => v
,
392 if lit_val
< min
|| lit_val
> max
{
393 let parent_id
= cx
.tcx
.hir().get_parent_node(e
.hir_id
);
394 if let Node
::Expr(par_e
) = cx
.tcx
.hir().get(parent_id
) {
396 hir
::ExprKind
::Cast(..) => {
397 if let ty
::Char
= cx
.typeck_results().expr_ty(par_e
).kind() {
398 cx
.struct_span_lint(OVERFLOWING_LITERALS
, par_e
.span
, |lint
| {
399 lint
.build("only `u8` can be cast into `char`")
402 "use a `char` literal instead",
403 format
!("'\\u{{{:X}}}'", lit_val
),
404 Applicability
::MachineApplicable
,
411 hir
::ExprKind
::Struct(..) if is_range_literal(par_e
) => {
412 let t
= t
.name_str();
413 if lint_overflowing_range_endpoint(cx
, lit
, lit_val
, max
, e
, par_e
, t
) {
414 // The overflowing literal lint was overridden.
421 if let Some(repr_str
) = get_bin_hex_repr(cx
, lit
) {
422 report_bin_hex_error(
425 attr
::IntType
::UnsignedInt(ty
::ast_uint_ty(t
)),
432 cx
.struct_span_lint(OVERFLOWING_LITERALS
, e
.span
, |lint
| {
433 lint
.build(&format
!("literal out of range for `{}`", t
.name_str()))
435 "the literal `{}` does not fit into the type `{}` whose range is `{}..={}`",
438 .span_to_snippet(lit
.span
)
439 .expect("must get snippet from literal"),
449 fn lint_literal
<'tcx
>(
450 cx
: &LateContext
<'tcx
>,
451 type_limits
: &TypeLimits
,
452 e
: &'tcx hir
::Expr
<'tcx
>,
455 match *cx
.typeck_results().node_type(e
.hir_id
).kind() {
458 ast
::LitKind
::Int(v
, ast
::LitIntType
::Signed(_
) | ast
::LitIntType
::Unsuffixed
) => {
459 lint_int_literal(cx
, type_limits
, e
, lit
, t
, v
)
464 ty
::Uint(t
) => lint_uint_literal(cx
, e
, lit
, t
),
466 let is_infinite
= match lit
.node
{
467 ast
::LitKind
::Float(v
, _
) => match t
{
468 ty
::FloatTy
::F32
=> v
.as_str().parse().map(f32::is_infinite
),
469 ty
::FloatTy
::F64
=> v
.as_str().parse().map(f64::is_infinite
),
473 if is_infinite
== Ok(true) {
474 cx
.struct_span_lint(OVERFLOWING_LITERALS
, e
.span
, |lint
| {
475 lint
.build(&format
!("literal out of range for `{}`", t
.name_str()))
477 "the literal `{}` does not fit into the type `{}` and will be converted to `{}::INFINITY`",
480 .span_to_snippet(lit
.span
)
481 .expect("must get snippet from literal"),
493 impl<'tcx
> LateLintPass
<'tcx
> for TypeLimits
{
494 fn check_expr(&mut self, cx
: &LateContext
<'tcx
>, e
: &'tcx hir
::Expr
<'tcx
>) {
496 hir
::ExprKind
::Unary(hir
::UnOp
::Neg
, ref expr
) => {
497 // propagate negation, if the negation itself isn't negated
498 if self.negated_expr_id
!= Some(e
.hir_id
) {
499 self.negated_expr_id
= Some(expr
.hir_id
);
502 hir
::ExprKind
::Binary(binop
, ref l
, ref r
) => {
503 if is_comparison(binop
) && !check_limits(cx
, binop
, &l
, &r
) {
504 cx
.struct_span_lint(UNUSED_COMPARISONS
, e
.span
, |lint
| {
505 lint
.build("comparison is useless due to type limits").emit();
509 hir
::ExprKind
::Lit(ref lit
) => lint_literal(cx
, self, e
, lit
),
513 fn is_valid
<T
: cmp
::PartialOrd
>(binop
: hir
::BinOp
, v
: T
, min
: T
, max
: T
) -> bool
{
515 hir
::BinOpKind
::Lt
=> v
> min
&& v
<= max
,
516 hir
::BinOpKind
::Le
=> v
>= min
&& v
< max
,
517 hir
::BinOpKind
::Gt
=> v
>= min
&& v
< max
,
518 hir
::BinOpKind
::Ge
=> v
> min
&& v
<= max
,
519 hir
::BinOpKind
::Eq
| hir
::BinOpKind
::Ne
=> v
>= min
&& v
<= max
,
524 fn rev_binop(binop
: hir
::BinOp
) -> hir
::BinOp
{
528 hir
::BinOpKind
::Lt
=> hir
::BinOpKind
::Gt
,
529 hir
::BinOpKind
::Le
=> hir
::BinOpKind
::Ge
,
530 hir
::BinOpKind
::Gt
=> hir
::BinOpKind
::Lt
,
531 hir
::BinOpKind
::Ge
=> hir
::BinOpKind
::Le
,
538 cx
: &LateContext
<'_
>,
543 let (lit
, expr
, swap
) = match (&l
.kind
, &r
.kind
) {
544 (&hir
::ExprKind
::Lit(_
), _
) => (l
, r
, true),
545 (_
, &hir
::ExprKind
::Lit(_
)) => (r
, l
, false),
548 // Normalize the binop so that the literal is always on the RHS in
550 let norm_binop
= if swap { rev_binop(binop) }
else { binop }
;
551 match *cx
.typeck_results().node_type(expr
.hir_id
).kind() {
553 let (min
, max
) = int_ty_range(int_ty
);
554 let lit_val
: i128
= match lit
.kind
{
555 hir
::ExprKind
::Lit(ref li
) => match li
.node
{
558 ast
::LitIntType
::Signed(_
) | ast
::LitIntType
::Unsuffixed
,
564 is_valid(norm_binop
, lit_val
, min
, max
)
566 ty
::Uint(uint_ty
) => {
567 let (min
, max
): (u128
, u128
) = uint_ty_range(uint_ty
);
568 let lit_val
: u128
= match lit
.kind
{
569 hir
::ExprKind
::Lit(ref li
) => match li
.node
{
570 ast
::LitKind
::Int(v
, _
) => v
,
575 is_valid(norm_binop
, lit_val
, min
, max
)
581 fn is_comparison(binop
: hir
::BinOp
) -> bool
{
596 /// The `improper_ctypes` lint detects incorrect use of types in foreign
603 /// static STATIC: String;
611 /// The compiler has several checks to verify that types used in `extern`
612 /// blocks are safe and follow certain rules to ensure proper
613 /// compatibility with the foreign interfaces. This lint is issued when it
614 /// detects a probable mistake in a definition. The lint usually should
615 /// provide a description of the issue, along with possibly a hint on how
619 "proper use of libc types in foreign modules"
622 declare_lint_pass
!(ImproperCTypesDeclarations
=> [IMPROPER_CTYPES
]);
625 /// The `improper_ctypes_definitions` lint detects incorrect use of
626 /// [`extern` function] definitions.
628 /// [`extern` function]: https://doc.rust-lang.org/reference/items/functions.html#extern-function-qualifier
633 /// # #![allow(unused)]
634 /// pub extern "C" fn str_type(p: &str) { }
641 /// There are many parameter and return types that may be specified in an
642 /// `extern` function that are not compatible with the given ABI. This
643 /// lint is an alert that these types should not be used. The lint usually
644 /// should provide a description of the issue, along with possibly a hint
645 /// on how to resolve it.
646 IMPROPER_CTYPES_DEFINITIONS
,
648 "proper use of libc types in foreign item definitions"
651 declare_lint_pass
!(ImproperCTypesDefinitions
=> [IMPROPER_CTYPES_DEFINITIONS
]);
653 #[derive(Clone, Copy)]
654 crate enum CItemKind
{
659 struct ImproperCTypesVisitor
<'a
, 'tcx
> {
660 cx
: &'a LateContext
<'tcx
>,
664 enum FfiResult
<'tcx
> {
666 FfiPhantom(Ty
<'tcx
>),
667 FfiUnsafe { ty: Ty<'tcx>, reason: String, help: Option<String> }
,
670 crate fn nonnull_optimization_guaranteed
<'tcx
>(tcx
: TyCtxt
<'tcx
>, def
: ty
::AdtDef
<'tcx
>) -> bool
{
671 tcx
.has_attr(def
.did(), sym
::rustc_nonnull_optimization_guaranteed
)
674 /// `repr(transparent)` structs can have a single non-ZST field, this function returns that
676 pub fn transparent_newtype_field
<'a
, 'tcx
>(
678 variant
: &'a ty
::VariantDef
,
679 ) -> Option
<&'a ty
::FieldDef
> {
680 let param_env
= tcx
.param_env(variant
.def_id
);
681 variant
.fields
.iter().find(|field
| {
682 let field_ty
= tcx
.type_of(field
.did
);
683 let is_zst
= tcx
.layout_of(param_env
.and(field_ty
)).map_or(false, |layout
| layout
.is_zst());
688 /// Is type known to be non-null?
689 fn ty_is_known_nonnull
<'tcx
>(cx
: &LateContext
<'tcx
>, ty
: Ty
<'tcx
>, mode
: CItemKind
) -> bool
{
692 ty
::FnPtr(_
) => true,
694 ty
::Adt(def
, _
) if def
.is_box() && matches
!(mode
, CItemKind
::Definition
) => true,
695 ty
::Adt(def
, substs
) if def
.repr().transparent() && !def
.is_union() => {
696 let marked_non_null
= nonnull_optimization_guaranteed(tcx
, *def
);
702 // Types with a `#[repr(no_niche)]` attribute have their niche hidden.
703 // The attribute is used by the UnsafeCell for example (the only use so far).
704 if def
.repr().hide_niche() {
710 .filter_map(|variant
| transparent_newtype_field(cx
.tcx
, variant
))
711 .any(|field
| ty_is_known_nonnull(cx
, field
.ty(tcx
, substs
), mode
))
717 /// Given a non-null scalar (or transparent) type `ty`, return the nullable version of that type.
718 /// If the type passed in was not scalar, returns None.
719 fn get_nullable_type
<'tcx
>(cx
: &LateContext
<'tcx
>, ty
: Ty
<'tcx
>) -> Option
<Ty
<'tcx
>> {
721 Some(match *ty
.kind() {
722 ty
::Adt(field_def
, field_substs
) => {
723 let inner_field_ty
= {
724 let first_non_zst_ty
= field_def
727 .filter_map(|v
| transparent_newtype_field(cx
.tcx
, v
));
729 first_non_zst_ty
.clone().count(),
731 "Wrong number of fields for transparent type"
735 .expect("No non-zst fields in transparent type.")
736 .ty(tcx
, field_substs
)
738 return get_nullable_type(cx
, inner_field_ty
);
740 ty
::Int(ty
) => tcx
.mk_mach_int(ty
),
741 ty
::Uint(ty
) => tcx
.mk_mach_uint(ty
),
742 ty
::RawPtr(ty_mut
) => tcx
.mk_ptr(ty_mut
),
743 // As these types are always non-null, the nullable equivalent of
744 // Option<T> of these types are their raw pointer counterparts.
745 ty
::Ref(_region
, ty
, mutbl
) => tcx
.mk_ptr(ty
::TypeAndMut { ty, mutbl }
),
747 // There is no nullable equivalent for Rust's function pointers -- you
748 // must use an Option<fn(..) -> _> to represent it.
752 // We should only ever reach this case if ty_is_known_nonnull is extended
756 "get_nullable_type: Unhandled scalar kind: {:?} while checking {:?}",
764 /// Check if this enum can be safely exported based on the "nullable pointer optimization". If it
765 /// can, return the type that `ty` can be safely converted to, otherwise return `None`.
766 /// Currently restricted to function pointers, boxes, references, `core::num::NonZero*`,
767 /// `core::ptr::NonNull`, and `#[repr(transparent)]` newtypes.
768 /// FIXME: This duplicates code in codegen.
769 crate fn repr_nullable_ptr
<'tcx
>(
770 cx
: &LateContext
<'tcx
>,
773 ) -> Option
<Ty
<'tcx
>> {
774 debug
!("is_repr_nullable_ptr(cx, ty = {:?})", ty
);
775 if let ty
::Adt(ty_def
, substs
) = ty
.kind() {
776 let field_ty
= match &ty_def
.variants().raw
[..] {
777 [var_one
, var_two
] => match (&var_one
.fields
[..], &var_two
.fields
[..]) {
778 ([], [field
]) | ([field
], []) => field
.ty(cx
.tcx
, substs
),
784 if !ty_is_known_nonnull(cx
, field_ty
, ckind
) {
788 // At this point, the field's type is known to be nonnull and the parent enum is Option-like.
789 // If the computed size for the field and the enum are different, the nonnull optimization isn't
790 // being applied (and we've got a problem somewhere).
791 let compute_size_skeleton
= |t
| SizeSkeleton
::compute(t
, cx
.tcx
, cx
.param_env
).unwrap();
792 if !compute_size_skeleton(ty
).same_size(compute_size_skeleton(field_ty
)) {
793 bug
!("improper_ctypes: Option nonnull optimization not applied?");
796 // Return the nullable type this Option-like enum can be safely represented with.
797 let field_ty_abi
= &cx
.layout_of(field_ty
).unwrap().abi
;
798 if let Abi
::Scalar(field_ty_scalar
) = field_ty_abi
{
799 match field_ty_scalar
.valid_range(cx
) {
800 WrappingRange { start: 0, end }
801 if end
== field_ty_scalar
.size(&cx
.tcx
).unsigned_int_max() - 1 =>
803 return Some(get_nullable_type(cx
, field_ty
).unwrap());
805 WrappingRange { start: 1, .. }
=> {
806 return Some(get_nullable_type(cx
, field_ty
).unwrap());
808 WrappingRange { start, end }
=> {
809 unreachable
!("Unhandled start and end range: ({}, {})", start
, end
)
817 impl<'a
, 'tcx
> ImproperCTypesVisitor
<'a
, 'tcx
> {
818 /// Check if the type is array and emit an unsafe type lint.
819 fn check_for_array_ty(&mut self, sp
: Span
, ty
: Ty
<'tcx
>) -> bool
{
820 if let ty
::Array(..) = ty
.kind() {
821 self.emit_ffi_unsafe_type_lint(
824 "passing raw arrays by value is not FFI-safe",
825 Some("consider passing a pointer to the array"),
833 /// Checks if the given field's type is "ffi-safe".
834 fn check_field_type_for_ffi(
836 cache
: &mut FxHashSet
<Ty
<'tcx
>>,
837 field
: &ty
::FieldDef
,
838 substs
: SubstsRef
<'tcx
>,
839 ) -> FfiResult
<'tcx
> {
840 let field_ty
= field
.ty(self.cx
.tcx
, substs
);
841 if field_ty
.has_opaque_types() {
842 self.check_type_for_ffi(cache
, field_ty
)
844 let field_ty
= self.cx
.tcx
.normalize_erasing_regions(self.cx
.param_env
, field_ty
);
845 self.check_type_for_ffi(cache
, field_ty
)
849 /// Checks if the given `VariantDef`'s field types are "ffi-safe".
850 fn check_variant_for_ffi(
852 cache
: &mut FxHashSet
<Ty
<'tcx
>>,
854 def
: ty
::AdtDef
<'tcx
>,
855 variant
: &ty
::VariantDef
,
856 substs
: SubstsRef
<'tcx
>,
857 ) -> FfiResult
<'tcx
> {
860 if def
.repr().transparent() {
861 // Can assume that at most one field is not a ZST, so only check
862 // that field's type for FFI-safety.
863 if let Some(field
) = transparent_newtype_field(self.cx
.tcx
, variant
) {
864 self.check_field_type_for_ffi(cache
, field
, substs
)
866 // All fields are ZSTs; this means that the type should behave
867 // like (), which is FFI-unsafe
870 reason
: "this struct contains only zero-sized fields".into(),
875 // We can't completely trust repr(C) markings; make sure the fields are
877 let mut all_phantom
= !variant
.fields
.is_empty();
878 for field
in &variant
.fields
{
879 match self.check_field_type_for_ffi(cache
, &field
, substs
) {
883 FfiPhantom(..) if def
.is_enum() => {
886 reason
: "this enum contains a PhantomData field".into(),
895 if all_phantom { FfiPhantom(ty) }
else { FfiSafe }
899 /// Checks if the given type is "ffi-safe" (has a stable, well-defined
900 /// representation which can be exported to C code).
901 fn check_type_for_ffi(&self, cache
: &mut FxHashSet
<Ty
<'tcx
>>, ty
: Ty
<'tcx
>) -> FfiResult
<'tcx
> {
904 let tcx
= self.cx
.tcx
;
906 // Protect against infinite recursion, for example
907 // `struct S(*mut S);`.
908 // FIXME: A recursion limit is necessary as well, for irregular
910 if !cache
.insert(ty
) {
915 ty
::Adt(def
, substs
) => {
916 if def
.is_box() && matches
!(self.mode
, CItemKind
::Definition
) {
917 if ty
.boxed_ty().is_sized(tcx
.at(DUMMY_SP
), self.cx
.param_env
) {
922 reason
: "box cannot be represented as a single pointer".to_string(),
927 if def
.is_phantom_data() {
928 return FfiPhantom(ty
);
930 match def
.adt_kind() {
931 AdtKind
::Struct
| AdtKind
::Union
=> {
932 let kind
= if def
.is_struct() { "struct" }
else { "union" }
;
934 if !def
.repr().c() && !def
.repr().transparent() {
937 reason
: format
!("this {} has unspecified layout", kind
),
939 "consider adding a `#[repr(C)]` or \
940 `#[repr(transparent)]` attribute to this {}",
946 let is_non_exhaustive
=
947 def
.non_enum_variant().is_field_list_non_exhaustive();
948 if is_non_exhaustive
&& !def
.did().is_local() {
951 reason
: format
!("this {} is non-exhaustive", kind
),
956 if def
.non_enum_variant().fields
.is_empty() {
959 reason
: format
!("this {} has no fields", kind
),
960 help
: Some(format
!("consider adding a member to this {}", kind
)),
964 self.check_variant_for_ffi(cache
, ty
, def
, def
.non_enum_variant(), substs
)
967 if def
.variants().is_empty() {
968 // Empty enums are okay... although sort of useless.
972 // Check for a repr() attribute to specify the size of the
974 if !def
.repr().c() && !def
.repr().transparent() && def
.repr().int
.is_none()
976 // Special-case types like `Option<extern fn()>`.
977 if repr_nullable_ptr(self.cx
, ty
, self.mode
).is_none() {
980 reason
: "enum has no representation hint".into(),
982 "consider adding a `#[repr(C)]`, \
983 `#[repr(transparent)]`, or integer `#[repr(...)]` \
984 attribute to this enum"
991 if def
.is_variant_list_non_exhaustive() && !def
.did().is_local() {
994 reason
: "this enum is non-exhaustive".into(),
999 // Check the contained variants.
1000 for variant
in def
.variants() {
1001 let is_non_exhaustive
= variant
.is_field_list_non_exhaustive();
1002 if is_non_exhaustive
&& !variant
.def_id
.is_local() {
1005 reason
: "this enum has non-exhaustive variants".into(),
1010 match self.check_variant_for_ffi(cache
, ty
, def
, variant
, substs
) {
1021 ty
::Char
=> FfiUnsafe
{
1023 reason
: "the `char` type has no C equivalent".into(),
1024 help
: Some("consider using `u32` or `libc::wchar_t` instead".into()),
1027 ty
::Int(ty
::IntTy
::I128
) | ty
::Uint(ty
::UintTy
::U128
) => FfiUnsafe
{
1029 reason
: "128-bit integers don't currently have a known stable ABI".into(),
1033 // Primitive types with a stable representation.
1034 ty
::Bool
| ty
::Int(..) | ty
::Uint(..) | ty
::Float(..) | ty
::Never
=> FfiSafe
,
1036 ty
::Slice(_
) => FfiUnsafe
{
1038 reason
: "slices have no C equivalent".into(),
1039 help
: Some("consider using a raw pointer instead".into()),
1042 ty
::Dynamic(..) => {
1043 FfiUnsafe { ty, reason: "trait objects have no C equivalent".into(), help: None }
1046 ty
::Str
=> FfiUnsafe
{
1048 reason
: "string slices have no C equivalent".into(),
1049 help
: Some("consider using `*const u8` and a length instead".into()),
1052 ty
::Tuple(..) => FfiUnsafe
{
1054 reason
: "tuples have unspecified layout".into(),
1055 help
: Some("consider using a struct instead".into()),
1058 ty
::RawPtr(ty
::TypeAndMut { ty, .. }
) | ty
::Ref(_
, ty
, _
)
1060 matches
!(self.mode
, CItemKind
::Definition
)
1061 && ty
.is_sized(self.cx
.tcx
.at(DUMMY_SP
), self.cx
.param_env
)
1067 ty
::RawPtr(ty
::TypeAndMut { ty, .. }
)
1068 if match ty
.kind() {
1069 ty
::Tuple(tuple
) => tuple
.is_empty(),
1076 ty
::RawPtr(ty
::TypeAndMut { ty, .. }
) | ty
::Ref(_
, ty
, _
) => {
1077 self.check_type_for_ffi(cache
, ty
)
1080 ty
::Array(inner_ty
, _
) => self.check_type_for_ffi(cache
, inner_ty
),
1083 if self.is_internal_abi(sig
.abi()) {
1086 reason
: "this function pointer has Rust-specific calling convention".into(),
1088 "consider using an `extern fn(...) -> ...` \
1089 function pointer instead"
1095 let sig
= tcx
.erase_late_bound_regions(sig
);
1096 if !sig
.output().is_unit() {
1097 let r
= self.check_type_for_ffi(cache
, sig
.output());
1105 for arg
in sig
.inputs() {
1106 let r
= self.check_type_for_ffi(cache
, *arg
);
1117 ty
::Foreign(..) => FfiSafe
,
1119 // While opaque types are checked for earlier, if a projection in a struct field
1120 // normalizes to an opaque type, then it will reach this branch.
1122 FfiUnsafe { ty, reason: "opaque types have no C equivalent".into(), help: None }
1125 // `extern "C" fn` functions can have type parameters, which may or may not be FFI-safe,
1126 // so they are currently ignored for the purposes of this lint.
1127 ty
::Param(..) | ty
::Projection(..) if matches
!(self.mode
, CItemKind
::Definition
) => {
1132 | ty
::Projection(..)
1138 | ty
::GeneratorWitness(..)
1139 | ty
::Placeholder(..)
1140 | ty
::FnDef(..) => bug
!("unexpected type in foreign function: {:?}", ty
),
1144 fn emit_ffi_unsafe_type_lint(
1151 let lint
= match self.mode
{
1152 CItemKind
::Declaration
=> IMPROPER_CTYPES
,
1153 CItemKind
::Definition
=> IMPROPER_CTYPES_DEFINITIONS
,
1156 self.cx
.struct_span_lint(lint
, sp
, |lint
| {
1157 let item_description
= match self.mode
{
1158 CItemKind
::Declaration
=> "block",
1159 CItemKind
::Definition
=> "fn",
1161 let mut diag
= lint
.build(&format
!(
1162 "`extern` {} uses type `{}`, which is not FFI-safe",
1163 item_description
, ty
1165 diag
.span_label(sp
, "not FFI-safe");
1166 if let Some(help
) = help
{
1170 if let ty
::Adt(def
, _
) = ty
.kind() {
1171 if let Some(sp
) = self.cx
.tcx
.hir().span_if_local(def
.did()) {
1172 diag
.span_note(sp
, "the type is defined here");
1179 fn check_for_opaque_ty(&mut self, sp
: Span
, ty
: Ty
<'tcx
>) -> bool
{
1180 struct ProhibitOpaqueTypes
<'a
, 'tcx
> {
1181 cx
: &'a LateContext
<'tcx
>,
1184 impl<'a
, 'tcx
> ty
::fold
::TypeVisitor
<'tcx
> for ProhibitOpaqueTypes
<'a
, 'tcx
> {
1185 type BreakTy
= Ty
<'tcx
>;
1187 fn visit_ty(&mut self, ty
: Ty
<'tcx
>) -> ControlFlow
<Self::BreakTy
> {
1189 ty
::Opaque(..) => ControlFlow
::Break(ty
),
1190 // Consider opaque types within projections FFI-safe if they do not normalize
1191 // to more opaque types.
1192 ty
::Projection(..) => {
1193 let ty
= self.cx
.tcx
.normalize_erasing_regions(self.cx
.param_env
, ty
);
1195 // If `ty` is an opaque type directly then `super_visit_with` won't invoke
1196 // this function again.
1197 if ty
.has_opaque_types() {
1200 ControlFlow
::CONTINUE
1203 _
=> ty
.super_visit_with(self),
1208 if let Some(ty
) = ty
.visit_with(&mut ProhibitOpaqueTypes { cx: self.cx }
).break_value() {
1209 self.emit_ffi_unsafe_type_lint(ty
, sp
, "opaque types have no C equivalent", None
);
1216 fn check_type_for_ffi_and_report_errors(
1221 is_return_type
: bool
,
1223 // We have to check for opaque types before `normalize_erasing_regions`,
1224 // which will replace opaque types with their underlying concrete type.
1225 if self.check_for_opaque_ty(sp
, ty
) {
1226 // We've already emitted an error due to an opaque type.
1230 // it is only OK to use this function because extern fns cannot have
1231 // any generic types right now:
1232 let ty
= self.cx
.tcx
.normalize_erasing_regions(self.cx
.param_env
, ty
);
1234 // C doesn't really support passing arrays by value - the only way to pass an array by value
1235 // is through a struct. So, first test that the top level isn't an array, and then
1236 // recursively check the types inside.
1237 if !is_static
&& self.check_for_array_ty(sp
, ty
) {
1241 // Don't report FFI errors for unit return types. This check exists here, and not in
1242 // `check_foreign_fn` (where it would make more sense) so that normalization has definitely
1244 if is_return_type
&& ty
.is_unit() {
1248 match self.check_type_for_ffi(&mut FxHashSet
::default(), ty
) {
1249 FfiResult
::FfiSafe
=> {}
1250 FfiResult
::FfiPhantom(ty
) => {
1251 self.emit_ffi_unsafe_type_lint(ty
, sp
, "composed only of `PhantomData`", None
);
1253 // If `ty` is a `repr(transparent)` newtype, and the non-zero-sized type is a generic
1254 // argument, which after substitution, is `()`, then this branch can be hit.
1255 FfiResult
::FfiUnsafe { ty, .. }
if is_return_type
&& ty
.is_unit() => {}
1256 FfiResult
::FfiUnsafe { ty, reason, help }
=> {
1257 self.emit_ffi_unsafe_type_lint(ty
, sp
, &reason
, help
.as_deref());
1262 fn check_foreign_fn(&mut self, id
: hir
::HirId
, decl
: &hir
::FnDecl
<'_
>) {
1263 let def_id
= self.cx
.tcx
.hir().local_def_id(id
);
1264 let sig
= self.cx
.tcx
.fn_sig(def_id
);
1265 let sig
= self.cx
.tcx
.erase_late_bound_regions(sig
);
1267 for (input_ty
, input_hir
) in iter
::zip(sig
.inputs(), decl
.inputs
) {
1268 self.check_type_for_ffi_and_report_errors(input_hir
.span
, *input_ty
, false, false);
1271 if let hir
::FnRetTy
::Return(ref ret_hir
) = decl
.output
{
1272 let ret_ty
= sig
.output();
1273 self.check_type_for_ffi_and_report_errors(ret_hir
.span
, ret_ty
, false, true);
1277 fn check_foreign_static(&mut self, id
: hir
::HirId
, span
: Span
) {
1278 let def_id
= self.cx
.tcx
.hir().local_def_id(id
);
1279 let ty
= self.cx
.tcx
.type_of(def_id
);
1280 self.check_type_for_ffi_and_report_errors(span
, ty
, true, false);
1283 fn is_internal_abi(&self, abi
: SpecAbi
) -> bool
{
1286 SpecAbi
::Rust
| SpecAbi
::RustCall
| SpecAbi
::RustIntrinsic
| SpecAbi
::PlatformIntrinsic
1291 impl<'tcx
> LateLintPass
<'tcx
> for ImproperCTypesDeclarations
{
1292 fn check_foreign_item(&mut self, cx
: &LateContext
<'_
>, it
: &hir
::ForeignItem
<'_
>) {
1293 let mut vis
= ImproperCTypesVisitor { cx, mode: CItemKind::Declaration }
;
1294 let abi
= cx
.tcx
.hir().get_foreign_abi(it
.hir_id());
1296 if !vis
.is_internal_abi(abi
) {
1298 hir
::ForeignItemKind
::Fn(ref decl
, _
, _
) => {
1299 vis
.check_foreign_fn(it
.hir_id(), decl
);
1301 hir
::ForeignItemKind
::Static(ref ty
, _
) => {
1302 vis
.check_foreign_static(it
.hir_id(), ty
.span
);
1304 hir
::ForeignItemKind
::Type
=> (),
1310 impl<'tcx
> LateLintPass
<'tcx
> for ImproperCTypesDefinitions
{
1313 cx
: &LateContext
<'tcx
>,
1314 kind
: hir
::intravisit
::FnKind
<'tcx
>,
1315 decl
: &'tcx hir
::FnDecl
<'_
>,
1316 _
: &'tcx hir
::Body
<'_
>,
1320 use hir
::intravisit
::FnKind
;
1322 let abi
= match kind
{
1323 FnKind
::ItemFn(_
, _
, header
, ..) => header
.abi
,
1324 FnKind
::Method(_
, sig
, ..) => sig
.header
.abi
,
1328 let mut vis
= ImproperCTypesVisitor { cx, mode: CItemKind::Definition }
;
1329 if !vis
.is_internal_abi(abi
) {
1330 vis
.check_foreign_fn(hir_id
, decl
);
1335 declare_lint_pass
!(VariantSizeDifferences
=> [VARIANT_SIZE_DIFFERENCES
]);
1337 impl<'tcx
> LateLintPass
<'tcx
> for VariantSizeDifferences
{
1338 fn check_item(&mut self, cx
: &LateContext
<'_
>, it
: &hir
::Item
<'_
>) {
1339 if let hir
::ItemKind
::Enum(ref enum_definition
, _
) = it
.kind
{
1340 let t
= cx
.tcx
.type_of(it
.def_id
);
1341 let ty
= cx
.tcx
.erase_regions(t
);
1342 let Ok(layout
) = cx
.layout_of(ty
) else { return }
;
1343 let Variants
::Multiple
{
1344 tag_encoding
: TagEncoding
::Direct
, tag
, ref variants
, ..
1345 } = &layout
.variants
else {
1349 let tag_size
= tag
.size(&cx
.tcx
).bytes();
1352 "enum `{}` is {} bytes large with layout:\n{:#?}",
1354 layout
.size
.bytes(),
1358 let (largest
, slargest
, largest_index
) = iter
::zip(enum_definition
.variants
, variants
)
1359 .map(|(variant
, variant_layout
)| {
1360 // Subtract the size of the enum tag.
1361 let bytes
= variant_layout
.size().bytes().saturating_sub(tag_size
);
1363 debug
!("- variant `{}` is {} bytes large", variant
.ident
, bytes
);
1367 .fold((0, 0, 0), |(l
, s
, li
), (idx
, size
)| {
1370 } else if size
> s
{
1377 // We only warn if the largest variant is at least thrice as large as
1378 // the second-largest.
1379 if largest
> slargest
* 3 && slargest
> 0 {
1380 cx
.struct_span_lint(
1381 VARIANT_SIZE_DIFFERENCES
,
1382 enum_definition
.variants
[largest_index
].span
,
1384 lint
.build(&format
!(
1385 "enum variant is more than three times \
1386 larger ({} bytes) than the next largest",
1398 /// The `invalid_atomic_ordering` lint detects passing an `Ordering`
1399 /// to an atomic operation that does not support that ordering.
1403 /// ```rust,compile_fail
1404 /// # use core::sync::atomic::{AtomicU8, Ordering};
1405 /// let atom = AtomicU8::new(0);
1406 /// let value = atom.load(Ordering::Release);
1407 /// # let _ = value;
1414 /// Some atomic operations are only supported for a subset of the
1415 /// `atomic::Ordering` variants. Passing an unsupported variant will cause
1416 /// an unconditional panic at runtime, which is detected by this lint.
1418 /// This lint will trigger in the following cases: (where `AtomicType` is an
1419 /// atomic type from `core::sync::atomic`, such as `AtomicBool`,
1420 /// `AtomicPtr`, `AtomicUsize`, or any of the other integer atomics).
1422 /// - Passing `Ordering::Acquire` or `Ordering::AcqRel` to
1423 /// `AtomicType::store`.
1425 /// - Passing `Ordering::Release` or `Ordering::AcqRel` to
1426 /// `AtomicType::load`.
1428 /// - Passing `Ordering::Relaxed` to `core::sync::atomic::fence` or
1429 /// `core::sync::atomic::compiler_fence`.
1431 /// - Passing `Ordering::Release` or `Ordering::AcqRel` as the failure
1432 /// ordering for any of `AtomicType::compare_exchange`,
1433 /// `AtomicType::compare_exchange_weak`, or `AtomicType::fetch_update`.
1435 /// - Passing in a pair of orderings to `AtomicType::compare_exchange`,
1436 /// `AtomicType::compare_exchange_weak`, or `AtomicType::fetch_update`
1437 /// where the failure ordering is stronger than the success ordering.
1438 INVALID_ATOMIC_ORDERING
,
1440 "usage of invalid atomic ordering in atomic operations and memory fences"
1443 declare_lint_pass
!(InvalidAtomicOrdering
=> [INVALID_ATOMIC_ORDERING
]);
1445 impl InvalidAtomicOrdering
{
1446 fn inherent_atomic_method_call
<'hir
>(
1447 cx
: &LateContext
<'_
>,
1449 recognized_names
: &[Symbol
], // used for fast path calculation
1450 ) -> Option
<(Symbol
, &'hir
[Expr
<'hir
>])> {
1451 const ATOMIC_TYPES
: &[Symbol
] = &[
1467 if let ExprKind
::MethodCall(ref method_path
, args
, _
) = &expr
.kind
1468 && recognized_names
.contains(&method_path
.ident
.name
)
1469 && let Some(m_def_id
) = cx
.typeck_results().type_dependent_def_id(expr
.hir_id
)
1470 && let Some(impl_did
) = cx
.tcx
.impl_of_method(m_def_id
)
1471 && let Some(adt
) = cx
.tcx
.type_of(impl_did
).ty_adt_def()
1472 // skip extension traits, only lint functions from the standard library
1473 && cx
.tcx
.trait_id_of_impl(impl_did
).is_none()
1474 && let parent
= cx
.tcx
.parent(adt
.did())
1475 && cx
.tcx
.is_diagnostic_item(sym
::atomic_mod
, parent
)
1476 && ATOMIC_TYPES
.contains(&cx
.tcx
.item_name(adt
.did()))
1478 return Some((method_path
.ident
.name
, args
));
1483 fn matches_ordering(cx
: &LateContext
<'_
>, did
: DefId
, orderings
: &[Symbol
]) -> bool
{
1485 let atomic_ordering
= tcx
.get_diagnostic_item(sym
::Ordering
);
1486 orderings
.iter().any(|ordering
| {
1487 tcx
.item_name(did
) == *ordering
&& {
1488 let parent
= tcx
.parent(did
);
1489 Some(parent
) == atomic_ordering
1490 // needed in case this is a ctor, not a variant
1491 || tcx
.opt_parent(parent
) == atomic_ordering
1496 fn opt_ordering_defid(cx
: &LateContext
<'_
>, ord_arg
: &Expr
<'_
>) -> Option
<DefId
> {
1497 if let ExprKind
::Path(ref ord_qpath
) = ord_arg
.kind
{
1498 cx
.qpath_res(ord_qpath
, ord_arg
.hir_id
).opt_def_id()
1504 fn check_atomic_load_store(cx
: &LateContext
<'_
>, expr
: &Expr
<'_
>) {
1505 use rustc_hir
::def
::{DefKind, Res}
;
1506 use rustc_hir
::QPath
;
1507 if let Some((method
, args
)) = Self::inherent_atomic_method_call(cx
, expr
, &[sym
::load
, sym
::store
])
1508 && let Some((ordering_arg
, invalid_ordering
)) = match method
{
1509 sym
::load
=> Some((&args
[1], sym
::Release
)),
1510 sym
::store
=> Some((&args
[2], sym
::Acquire
)),
1513 && let ExprKind
::Path(QPath
::Resolved(_
, path
)) = ordering_arg
.kind
1514 && let Res
::Def(DefKind
::Ctor(..), ctor_id
) = path
.res
1515 && Self::matches_ordering(cx
, ctor_id
, &[invalid_ordering
, sym
::AcqRel
])
1517 cx
.struct_span_lint(INVALID_ATOMIC_ORDERING
, ordering_arg
.span
, |diag
| {
1518 if method
== sym
::load
{
1519 diag
.build("atomic loads cannot have `Release` or `AcqRel` ordering")
1520 .help("consider using ordering modes `Acquire`, `SeqCst` or `Relaxed`")
1523 debug_assert_eq
!(method
, sym
::store
);
1524 diag
.build("atomic stores cannot have `Acquire` or `AcqRel` ordering")
1525 .help("consider using ordering modes `Release`, `SeqCst` or `Relaxed`")
1532 fn check_memory_fence(cx
: &LateContext
<'_
>, expr
: &Expr
<'_
>) {
1533 if let ExprKind
::Call(ref func
, ref args
) = expr
.kind
1534 && let ExprKind
::Path(ref func_qpath
) = func
.kind
1535 && let Some(def_id
) = cx
.qpath_res(func_qpath
, func
.hir_id
).opt_def_id()
1536 && matches
!(cx
.tcx
.get_diagnostic_name(def_id
), Some(sym
::fence
| sym
::compiler_fence
))
1537 && let ExprKind
::Path(ref ordering_qpath
) = &args
[0].kind
1538 && let Some(ordering_def_id
) = cx
.qpath_res(ordering_qpath
, args
[0].hir_id
).opt_def_id()
1539 && Self::matches_ordering(cx
, ordering_def_id
, &[sym
::Relaxed
])
1541 cx
.struct_span_lint(INVALID_ATOMIC_ORDERING
, args
[0].span
, |diag
| {
1542 diag
.build("memory fences cannot have `Relaxed` ordering")
1543 .help("consider using ordering modes `Acquire`, `Release`, `AcqRel` or `SeqCst`")
1549 fn check_atomic_compare_exchange(cx
: &LateContext
<'_
>, expr
: &Expr
<'_
>) {
1550 if let Some((method
, args
)) = Self::inherent_atomic_method_call(cx
, expr
, &[sym
::fetch_update
, sym
::compare_exchange
, sym
::compare_exchange_weak
])
1551 && let Some((success_order_arg
, failure_order_arg
)) = match method
{
1552 sym
::fetch_update
=> Some((&args
[1], &args
[2])),
1553 sym
::compare_exchange
| sym
::compare_exchange_weak
=> Some((&args
[3], &args
[4])),
1556 && let Some(fail_ordering_def_id
) = Self::opt_ordering_defid(cx
, failure_order_arg
)
1558 // Helper type holding on to some checking and error reporting data. Has
1559 // - (success ordering,
1560 // - list of failure orderings forbidden by the success order,
1561 // - suggestion message)
1562 type OrdLintInfo
= (Symbol
, &'
static [Symbol
], &'
static str);
1563 const RELAXED
: OrdLintInfo
= (sym
::Relaxed
, &[sym
::SeqCst
, sym
::Acquire
], "ordering mode `Relaxed`");
1564 const ACQUIRE
: OrdLintInfo
= (sym
::Acquire
, &[sym
::SeqCst
], "ordering modes `Acquire` or `Relaxed`");
1565 const SEQ_CST
: OrdLintInfo
= (sym
::SeqCst
, &[], "ordering modes `Acquire`, `SeqCst` or `Relaxed`");
1566 const RELEASE
: OrdLintInfo
= (sym
::Release
, RELAXED
.1, RELAXED
.2);
1567 const ACQREL
: OrdLintInfo
= (sym
::AcqRel
, ACQUIRE
.1, ACQUIRE
.2);
1568 const SEARCH
: [OrdLintInfo
; 5] = [RELAXED
, ACQUIRE
, SEQ_CST
, RELEASE
, ACQREL
];
1570 let success_lint_info
= Self::opt_ordering_defid(cx
, success_order_arg
)
1571 .and_then(|success_ord_def_id
| -> Option
<OrdLintInfo
> {
1575 .find(|(ordering
, ..)| {
1576 Self::matches_ordering(cx
, success_ord_def_id
, &[*ordering
])
1579 if Self::matches_ordering(cx
, fail_ordering_def_id
, &[sym
::Release
, sym
::AcqRel
]) {
1580 // If we don't know the success order is, use what we'd suggest
1581 // if it were maximally permissive.
1582 let suggested
= success_lint_info
.unwrap_or(SEQ_CST
).2;
1583 cx
.struct_span_lint(INVALID_ATOMIC_ORDERING
, failure_order_arg
.span
, |diag
| {
1585 "{}'s failure ordering may not be `Release` or `AcqRel`",
1589 .help(&format
!("consider using {} instead", suggested
))
1592 } else if let Some((success_ord
, bad_ords_given_success
, suggested
)) = success_lint_info
{
1593 if Self::matches_ordering(cx
, fail_ordering_def_id
, bad_ords_given_success
) {
1594 cx
.struct_span_lint(INVALID_ATOMIC_ORDERING
, failure_order_arg
.span
, |diag
| {
1596 "{}'s failure ordering may not be stronger than the success ordering of `{}`",
1601 .help(&format
!("consider using {} instead", suggested
))
1610 impl<'tcx
> LateLintPass
<'tcx
> for InvalidAtomicOrdering
{
1611 fn check_expr(&mut self, cx
: &LateContext
<'tcx
>, expr
: &'tcx Expr
<'_
>) {
1612 Self::check_atomic_load_store(cx
, expr
);
1613 Self::check_memory_fence(cx
, expr
);
1614 Self::check_atomic_compare_exchange(cx
, expr
);