use rustc_target::abi::{Integer, TagEncoding, Variants};
use rustc_target::spec::abi::Abi as SpecAbi;
-use if_chain::if_chain;
use std::cmp;
use std::iter;
use std::ops::ControlFlow;
min,
max,
))
- .emit()
+ .emit();
});
}
}
hir::ExprKind::Binary(binop, ref l, ref r) => {
if is_comparison(binop) && !check_limits(cx, binop, &l, &r) {
cx.struct_span_lint(UNUSED_COMPARISONS, e.span, |lint| {
- lint.build("comparison is useless due to type limits").emit()
+ lint.build("comparison is useless due to type limits").emit();
});
}
}
FfiUnsafe { ty: Ty<'tcx>, reason: String, help: Option<String> },
}
-crate fn nonnull_optimization_guaranteed<'tcx>(tcx: TyCtxt<'tcx>, def: &ty::AdtDef) -> bool {
- tcx.get_attrs(def.did).iter().any(|a| a.has_name(sym::rustc_nonnull_optimization_guaranteed))
+crate fn nonnull_optimization_guaranteed<'tcx>(tcx: TyCtxt<'tcx>, def: ty::AdtDef<'tcx>) -> bool {
+ tcx.get_attrs(def.did()).iter().any(|a| a.has_name(sym::rustc_nonnull_optimization_guaranteed))
}
/// `repr(transparent)` structs can have a single non-ZST field, this function returns that
ty::FnPtr(_) => true,
ty::Ref(..) => true,
ty::Adt(def, _) if def.is_box() && matches!(mode, CItemKind::Definition) => true,
- ty::Adt(def, substs) if def.repr.transparent() && !def.is_union() => {
- let marked_non_null = nonnull_optimization_guaranteed(tcx, &def);
+ ty::Adt(def, substs) if def.repr().transparent() && !def.is_union() => {
+ let marked_non_null = nonnull_optimization_guaranteed(tcx, *def);
if marked_non_null {
return true;
// Types with a `#[repr(no_niche)]` attribute have their niche hidden.
// The attribute is used by the UnsafeCell for example (the only use so far).
- if def.repr.hide_niche() {
+ if def.repr().hide_niche() {
return false;
}
- def.variants
+ def.variants()
.iter()
.filter_map(|variant| transparent_newtype_field(cx.tcx, variant))
.any(|field| ty_is_known_nonnull(cx, field.ty(tcx, substs), mode))
Some(match *ty.kind() {
ty::Adt(field_def, field_substs) => {
let inner_field_ty = {
- let first_non_zst_ty =
- field_def.variants.iter().filter_map(|v| transparent_newtype_field(cx.tcx, v));
+ let first_non_zst_ty = field_def
+ .variants()
+ .iter()
+ .filter_map(|v| transparent_newtype_field(cx.tcx, v));
debug_assert_eq!(
first_non_zst_ty.clone().count(),
1,
) -> Option<Ty<'tcx>> {
debug!("is_repr_nullable_ptr(cx, ty = {:?})", ty);
if let ty::Adt(ty_def, substs) = ty.kind() {
- let field_ty = match &ty_def.variants.raw[..] {
+ let field_ty = match &ty_def.variants().raw[..] {
[var_one, var_two] => match (&var_one.fields[..], &var_two.fields[..]) {
([], [field]) | ([field], []) => field.ty(cx.tcx, substs),
_ => return None,
let field_ty_abi = &cx.layout_of(field_ty).unwrap().abi;
if let Abi::Scalar(field_ty_scalar) = field_ty_abi {
match (field_ty_scalar.valid_range.start, field_ty_scalar.valid_range.end) {
- (0, _) => unreachable!("Non-null optimisation extended to a non-zero value."),
+ (0, x) if x == field_ty_scalar.value.size(&cx.tcx).unsigned_int_max() - 1 => {
+ return Some(get_nullable_type(cx, field_ty).unwrap());
+ }
(1, _) => {
return Some(get_nullable_type(cx, field_ty).unwrap());
}
&self,
cache: &mut FxHashSet<Ty<'tcx>>,
ty: Ty<'tcx>,
- def: &ty::AdtDef,
+ def: ty::AdtDef<'tcx>,
variant: &ty::VariantDef,
substs: SubstsRef<'tcx>,
) -> FfiResult<'tcx> {
use FfiResult::*;
- if def.repr.transparent() {
+ if def.repr().transparent() {
// Can assume that at most one field is not a ZST, so only check
// that field's type for FFI-safety.
if let Some(field) = transparent_newtype_field(self.cx.tcx, variant) {
AdtKind::Struct | AdtKind::Union => {
let kind = if def.is_struct() { "struct" } else { "union" };
- if !def.repr.c() && !def.repr.transparent() {
+ if !def.repr().c() && !def.repr().transparent() {
return FfiUnsafe {
ty,
reason: format!("this {} has unspecified layout", kind),
let is_non_exhaustive =
def.non_enum_variant().is_field_list_non_exhaustive();
- if is_non_exhaustive && !def.did.is_local() {
+ if is_non_exhaustive && !def.did().is_local() {
return FfiUnsafe {
ty,
reason: format!("this {} is non-exhaustive", kind),
self.check_variant_for_ffi(cache, ty, def, def.non_enum_variant(), substs)
}
AdtKind::Enum => {
- if def.variants.is_empty() {
+ if def.variants().is_empty() {
// Empty enums are okay... although sort of useless.
return FfiSafe;
}
// Check for a repr() attribute to specify the size of the
// discriminant.
- if !def.repr.c() && !def.repr.transparent() && def.repr.int.is_none() {
+ if !def.repr().c() && !def.repr().transparent() && def.repr().int.is_none()
+ {
// Special-case types like `Option<extern fn()>`.
if repr_nullable_ptr(self.cx, ty, self.mode).is_none() {
return FfiUnsafe {
}
}
- if def.is_variant_list_non_exhaustive() && !def.did.is_local() {
+ if def.is_variant_list_non_exhaustive() && !def.did().is_local() {
return FfiUnsafe {
ty,
reason: "this enum is non-exhaustive".into(),
}
// Check the contained variants.
- for variant in &def.variants {
+ for variant in def.variants() {
let is_non_exhaustive = variant.is_field_list_non_exhaustive();
if is_non_exhaustive && !variant.def_id.is_local() {
return FfiUnsafe {
}
diag.note(note);
if let ty::Adt(def, _) = ty.kind() {
- if let Some(sp) = self.cx.tcx.hir().span_if_local(def.did) {
+ if let Some(sp) = self.cx.tcx.hir().span_if_local(def.did()) {
diag.span_note(sp, "the type is defined here");
}
}
if let hir::ItemKind::Enum(ref enum_definition, _) = it.kind {
let t = cx.tcx.type_of(it.def_id);
let ty = cx.tcx.erase_regions(t);
- let layout = match cx.layout_of(ty) {
- Ok(layout) => layout,
- Err(
- ty::layout::LayoutError::Unknown(_)
- | ty::layout::LayoutError::SizeOverflow(_)
- | ty::layout::LayoutError::NormalizationFailure(_, _),
- ) => return,
- };
+ let Ok(layout) = cx.layout_of(ty) else { return };
let Variants::Multiple {
tag_encoding: TagEncoding::Direct, tag, ref variants, ..
} = &layout.variants else {
let (largest, slargest, largest_index) = iter::zip(enum_definition.variants, variants)
.map(|(variant, variant_layout)| {
// Subtract the size of the enum tag.
- let bytes = variant_layout.size.bytes().saturating_sub(tag_size);
+ let bytes = variant_layout.size().bytes().saturating_sub(tag_size);
debug!("- variant `{}` is {} bytes large", variant.ident, bytes);
bytes
larger ({} bytes) than the next largest",
largest
))
- .emit()
+ .emit();
},
);
}
sym::AtomicI64,
sym::AtomicI128,
];
- if_chain! {
- if let ExprKind::MethodCall(ref method_path, args, _) = &expr.kind;
- if recognized_names.contains(&method_path.ident.name);
- if let Some(m_def_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id);
- if let Some(impl_did) = cx.tcx.impl_of_method(m_def_id);
- if let Some(adt) = cx.tcx.type_of(impl_did).ty_adt_def();
+ if let ExprKind::MethodCall(ref method_path, args, _) = &expr.kind
+ && recognized_names.contains(&method_path.ident.name)
+ && let Some(m_def_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id)
+ && let Some(impl_did) = cx.tcx.impl_of_method(m_def_id)
+ && let Some(adt) = cx.tcx.type_of(impl_did).ty_adt_def()
// skip extension traits, only lint functions from the standard library
- if cx.tcx.trait_id_of_impl(impl_did).is_none();
-
- if let Some(parent) = cx.tcx.parent(adt.did);
- if cx.tcx.is_diagnostic_item(sym::atomic_mod, parent);
- if ATOMIC_TYPES.contains(&cx.tcx.item_name(adt.did));
- then {
- return Some((method_path.ident.name, args));
- }
+ && cx.tcx.trait_id_of_impl(impl_did).is_none()
+ && let Some(parent) = cx.tcx.parent(adt.did())
+ && cx.tcx.is_diagnostic_item(sym::atomic_mod, parent)
+ && ATOMIC_TYPES.contains(&cx.tcx.item_name(adt.did()))
+ {
+ return Some((method_path.ident.name, args));
}
None
}
fn check_atomic_load_store(cx: &LateContext<'_>, expr: &Expr<'_>) {
use rustc_hir::def::{DefKind, Res};
use rustc_hir::QPath;
- if_chain! {
- if let Some((method, args)) = Self::inherent_atomic_method_call(cx, expr, &[sym::load, sym::store]);
- if let Some((ordering_arg, invalid_ordering)) = match method {
+ if let Some((method, args)) = Self::inherent_atomic_method_call(cx, expr, &[sym::load, sym::store])
+ && let Some((ordering_arg, invalid_ordering)) = match method {
sym::load => Some((&args[1], sym::Release)),
sym::store => Some((&args[2], sym::Acquire)),
_ => None,
- };
-
- if let ExprKind::Path(QPath::Resolved(_, path)) = ordering_arg.kind;
- if let Res::Def(DefKind::Ctor(..), ctor_id) = path.res;
- if Self::matches_ordering(cx, ctor_id, &[invalid_ordering, sym::AcqRel]);
- then {
- cx.struct_span_lint(INVALID_ATOMIC_ORDERING, ordering_arg.span, |diag| {
- if method == sym::load {
- diag.build("atomic loads cannot have `Release` or `AcqRel` ordering")
- .help("consider using ordering modes `Acquire`, `SeqCst` or `Relaxed`")
- .emit()
- } else {
- debug_assert_eq!(method, sym::store);
- diag.build("atomic stores cannot have `Acquire` or `AcqRel` ordering")
- .help("consider using ordering modes `Release`, `SeqCst` or `Relaxed`")
- .emit();
- }
- });
}
+ && let ExprKind::Path(QPath::Resolved(_, path)) = ordering_arg.kind
+ && let Res::Def(DefKind::Ctor(..), ctor_id) = path.res
+ && Self::matches_ordering(cx, ctor_id, &[invalid_ordering, sym::AcqRel])
+ {
+ cx.struct_span_lint(INVALID_ATOMIC_ORDERING, ordering_arg.span, |diag| {
+ if method == sym::load {
+ diag.build("atomic loads cannot have `Release` or `AcqRel` ordering")
+ .help("consider using ordering modes `Acquire`, `SeqCst` or `Relaxed`")
+ .emit()
+ } else {
+ debug_assert_eq!(method, sym::store);
+ diag.build("atomic stores cannot have `Acquire` or `AcqRel` ordering")
+ .help("consider using ordering modes `Release`, `SeqCst` or `Relaxed`")
+ .emit();
+ }
+ });
}
}
fn check_memory_fence(cx: &LateContext<'_>, expr: &Expr<'_>) {
- if_chain! {
- if let ExprKind::Call(ref func, ref args) = expr.kind;
- if let ExprKind::Path(ref func_qpath) = func.kind;
- if let Some(def_id) = cx.qpath_res(func_qpath, func.hir_id).opt_def_id();
- if matches!(cx.tcx.get_diagnostic_name(def_id), Some(sym::fence | sym::compiler_fence));
- if let ExprKind::Path(ref ordering_qpath) = &args[0].kind;
- if let Some(ordering_def_id) = cx.qpath_res(ordering_qpath, args[0].hir_id).opt_def_id();
- if Self::matches_ordering(cx, ordering_def_id, &[sym::Relaxed]);
- then {
- cx.struct_span_lint(INVALID_ATOMIC_ORDERING, args[0].span, |diag| {
- diag.build("memory fences cannot have `Relaxed` ordering")
- .help("consider using ordering modes `Acquire`, `Release`, `AcqRel` or `SeqCst`")
- .emit();
- });
- }
+ if let ExprKind::Call(ref func, ref args) = expr.kind
+ && let ExprKind::Path(ref func_qpath) = func.kind
+ && let Some(def_id) = cx.qpath_res(func_qpath, func.hir_id).opt_def_id()
+ && matches!(cx.tcx.get_diagnostic_name(def_id), Some(sym::fence | sym::compiler_fence))
+ && let ExprKind::Path(ref ordering_qpath) = &args[0].kind
+ && let Some(ordering_def_id) = cx.qpath_res(ordering_qpath, args[0].hir_id).opt_def_id()
+ && Self::matches_ordering(cx, ordering_def_id, &[sym::Relaxed])
+ {
+ cx.struct_span_lint(INVALID_ATOMIC_ORDERING, args[0].span, |diag| {
+ diag.build("memory fences cannot have `Relaxed` ordering")
+ .help("consider using ordering modes `Acquire`, `Release`, `AcqRel` or `SeqCst`")
+ .emit();
+ });
}
}
fn check_atomic_compare_exchange(cx: &LateContext<'_>, expr: &Expr<'_>) {
- if_chain! {
- if let Some((method, args)) = Self::inherent_atomic_method_call(cx, expr, &[sym::fetch_update, sym::compare_exchange, sym::compare_exchange_weak]);
- if let Some((success_order_arg, failure_order_arg)) = match method {
+ if let Some((method, args)) = Self::inherent_atomic_method_call(cx, expr, &[sym::fetch_update, sym::compare_exchange, sym::compare_exchange_weak])
+ && let Some((success_order_arg, failure_order_arg)) = match method {
sym::fetch_update => Some((&args[1], &args[2])),
sym::compare_exchange | sym::compare_exchange_weak => Some((&args[3], &args[4])),
_ => None,
- };
-
- if let Some(fail_ordering_def_id) = Self::opt_ordering_defid(cx, failure_order_arg);
- then {
- // Helper type holding on to some checking and error reporting data. Has
- // - (success ordering,
- // - list of failure orderings forbidden by the success order,
- // - suggestion message)
- type OrdLintInfo = (Symbol, &'static [Symbol], &'static str);
- const RELAXED: OrdLintInfo = (sym::Relaxed, &[sym::SeqCst, sym::Acquire], "ordering mode `Relaxed`");
- const ACQUIRE: OrdLintInfo = (sym::Acquire, &[sym::SeqCst], "ordering modes `Acquire` or `Relaxed`");
- const SEQ_CST: OrdLintInfo = (sym::SeqCst, &[], "ordering modes `Acquire`, `SeqCst` or `Relaxed`");
- const RELEASE: OrdLintInfo = (sym::Release, RELAXED.1, RELAXED.2);
- const ACQREL: OrdLintInfo = (sym::AcqRel, ACQUIRE.1, ACQUIRE.2);
- const SEARCH: [OrdLintInfo; 5] = [RELAXED, ACQUIRE, SEQ_CST, RELEASE, ACQREL];
-
- let success_lint_info = Self::opt_ordering_defid(cx, success_order_arg)
- .and_then(|success_ord_def_id| -> Option<OrdLintInfo> {
- SEARCH
- .iter()
- .copied()
- .find(|(ordering, ..)| {
- Self::matches_ordering(cx, success_ord_def_id, &[*ordering])
- })
- });
- if Self::matches_ordering(cx, fail_ordering_def_id, &[sym::Release, sym::AcqRel]) {
- // If we don't know the success order is, use what we'd suggest
- // if it were maximally permissive.
- let suggested = success_lint_info.unwrap_or(SEQ_CST).2;
+ }
+ && let Some(fail_ordering_def_id) = Self::opt_ordering_defid(cx, failure_order_arg)
+ {
+ // Helper type holding on to some checking and error reporting data. Has
+ // - (success ordering,
+ // - list of failure orderings forbidden by the success order,
+ // - suggestion message)
+ type OrdLintInfo = (Symbol, &'static [Symbol], &'static str);
+ const RELAXED: OrdLintInfo = (sym::Relaxed, &[sym::SeqCst, sym::Acquire], "ordering mode `Relaxed`");
+ const ACQUIRE: OrdLintInfo = (sym::Acquire, &[sym::SeqCst], "ordering modes `Acquire` or `Relaxed`");
+ const SEQ_CST: OrdLintInfo = (sym::SeqCst, &[], "ordering modes `Acquire`, `SeqCst` or `Relaxed`");
+ const RELEASE: OrdLintInfo = (sym::Release, RELAXED.1, RELAXED.2);
+ const ACQREL: OrdLintInfo = (sym::AcqRel, ACQUIRE.1, ACQUIRE.2);
+ const SEARCH: [OrdLintInfo; 5] = [RELAXED, ACQUIRE, SEQ_CST, RELEASE, ACQREL];
+
+ let success_lint_info = Self::opt_ordering_defid(cx, success_order_arg)
+ .and_then(|success_ord_def_id| -> Option<OrdLintInfo> {
+ SEARCH
+ .iter()
+ .copied()
+ .find(|(ordering, ..)| {
+ Self::matches_ordering(cx, success_ord_def_id, &[*ordering])
+ })
+ });
+ if Self::matches_ordering(cx, fail_ordering_def_id, &[sym::Release, sym::AcqRel]) {
+ // If we don't know the success order is, use what we'd suggest
+ // if it were maximally permissive.
+ let suggested = success_lint_info.unwrap_or(SEQ_CST).2;
+ cx.struct_span_lint(INVALID_ATOMIC_ORDERING, failure_order_arg.span, |diag| {
+ let msg = format!(
+ "{}'s failure ordering may not be `Release` or `AcqRel`",
+ method,
+ );
+ diag.build(&msg)
+ .help(&format!("consider using {} instead", suggested))
+ .emit();
+ });
+ } else if let Some((success_ord, bad_ords_given_success, suggested)) = success_lint_info {
+ if Self::matches_ordering(cx, fail_ordering_def_id, bad_ords_given_success) {
cx.struct_span_lint(INVALID_ATOMIC_ORDERING, failure_order_arg.span, |diag| {
let msg = format!(
- "{}'s failure ordering may not be `Release` or `AcqRel`",
+ "{}'s failure ordering may not be stronger than the success ordering of `{}`",
method,
+ success_ord,
);
diag.build(&msg)
.help(&format!("consider using {} instead", suggested))
.emit();
});
- } else if let Some((success_ord, bad_ords_given_success, suggested)) = success_lint_info {
- if Self::matches_ordering(cx, fail_ordering_def_id, bad_ords_given_success) {
- cx.struct_span_lint(INVALID_ATOMIC_ORDERING, failure_order_arg.span, |diag| {
- let msg = format!(
- "{}'s failure ordering may not be stronger than the success ordering of `{}`",
- method,
- success_ord,
- );
- diag.build(&msg)
- .help(&format!("consider using {} instead", suggested))
- .emit();
- });
- }
}
}
}