1 use crate::{LateContext, LateLintPass, LintContext};
2 use rustc_ast as ast;
3 use rustc_attr as attr;
4 use rustc_data_structures::fx::FxHashSet;
5 use rustc_errors::Applicability;
6 use rustc_hir as hir;
7 use rustc_hir::def_id::DefId;
8 use rustc_hir::{is_range_literal, Expr, ExprKind, Node};
9 use rustc_middle::ty::layout::{IntegerExt, LayoutOf, SizeSkeleton};
10 use rustc_middle::ty::subst::SubstsRef;
11 use rustc_middle::ty::{self, AdtKind, DefIdTree, Ty, TyCtxt, TypeFoldable};
12 use rustc_span::source_map;
13 use rustc_span::symbol::sym;
14 use rustc_span::{Span, Symbol, DUMMY_SP};
15 use rustc_target::abi::Abi;
16 use rustc_target::abi::{Integer, TagEncoding, Variants};
17 use rustc_target::spec::abi::Abi as SpecAbi;
18
19 use if_chain::if_chain;
20 use std::cmp;
21 use std::iter;
22 use std::ops::ControlFlow;
23 use tracing::debug;
24
25 declare_lint! {
26 /// The `unused_comparisons` lint detects comparisons made useless by
27 /// limits of the types involved.
28 ///
29 /// ### Example
30 ///
31 /// ```rust
32 /// fn foo(x: u8) {
33 /// x >= 0;
34 /// }
35 /// ```
36 ///
37 /// {{produces}}
38 ///
39 /// ### Explanation
40 ///
41 /// A useless comparison may indicate a mistake, and should be fixed or
42 /// removed.
43 UNUSED_COMPARISONS,
44 Warn,
45 "comparisons made useless by limits of the types involved"
46 }
47
48 declare_lint! {
49 /// The `overflowing_literals` lint detects literal out of range for its
50 /// type.
51 ///
52 /// ### Example
53 ///
54 /// ```rust,compile_fail
55 /// let x: u8 = 1000;
56 /// ```
57 ///
58 /// {{produces}}
59 ///
60 /// ### Explanation
61 ///
62 /// It is usually a mistake to use a literal that overflows the type where
63 /// it is used. Either use a literal that is within range, or change the
64 /// type to be within the range of the literal.
65 OVERFLOWING_LITERALS,
66 Deny,
67 "literal out of range for its type"
68 }
69
70 declare_lint! {
71 /// The `variant_size_differences` lint detects enums with widely varying
72 /// variant sizes.
73 ///
74 /// ### Example
75 ///
76 /// ```rust,compile_fail
77 /// #![deny(variant_size_differences)]
78 /// enum En {
79 /// V0(u8),
80 /// VBig([u8; 1024]),
81 /// }
82 /// ```
83 ///
84 /// {{produces}}
85 ///
86 /// ### Explanation
87 ///
88 /// It can be a mistake to add a variant to an enum that is much larger
89 /// than the other variants, bloating the overall size required for all
90 /// variants. This can impact performance and memory usage. This is
91 /// triggered if one variant is more than 3 times larger than the
92 /// second-largest variant.
93 ///
94 /// Consider placing the large variant's contents on the heap (for example
95 /// via [`Box`]) to keep the overall size of the enum itself down.
96 ///
97 /// This lint is "allow" by default because it can be noisy, and may not be
98 /// an actual problem. Decisions about this should be guided with
99 /// profiling and benchmarking.
100 ///
101 /// [`Box`]: https://doc.rust-lang.org/std/boxed/index.html
102 VARIANT_SIZE_DIFFERENCES,
103 Allow,
104 "detects enums with widely varying variant sizes"
105 }
106
107 #[derive(Copy, Clone)]
108 pub struct TypeLimits {
109 /// Id of the last visited negated expression
110 negated_expr_id: Option<hir::HirId>,
111 }
112
113 impl_lint_pass!(TypeLimits => [UNUSED_COMPARISONS, OVERFLOWING_LITERALS]);
114
115 impl TypeLimits {
new() -> TypeLimits116 pub fn new() -> TypeLimits {
117 TypeLimits { negated_expr_id: None }
118 }
119 }
120
121 /// Attempts to special-case the overflowing literal lint when it occurs as a range endpoint.
122 /// Returns `true` iff the lint was overridden.
lint_overflowing_range_endpoint<'tcx>( cx: &LateContext<'tcx>, lit: &hir::Lit, lit_val: u128, max: u128, expr: &'tcx hir::Expr<'tcx>, parent_expr: &'tcx hir::Expr<'tcx>, ty: &str, ) -> bool123 fn lint_overflowing_range_endpoint<'tcx>(
124 cx: &LateContext<'tcx>,
125 lit: &hir::Lit,
126 lit_val: u128,
127 max: u128,
128 expr: &'tcx hir::Expr<'tcx>,
129 parent_expr: &'tcx hir::Expr<'tcx>,
130 ty: &str,
131 ) -> bool {
132 // We only want to handle exclusive (`..`) ranges,
133 // which are represented as `ExprKind::Struct`.
134 let mut overwritten = false;
135 if let ExprKind::Struct(_, eps, _) = &parent_expr.kind {
136 if eps.len() != 2 {
137 return false;
138 }
139 // We can suggest using an inclusive range
140 // (`..=`) instead only if it is the `end` that is
141 // overflowing and only by 1.
142 if eps[1].expr.hir_id == expr.hir_id && lit_val - 1 == max {
143 cx.struct_span_lint(OVERFLOWING_LITERALS, parent_expr.span, |lint| {
144 let mut err = lint.build(&format!("range endpoint is out of range for `{}`", ty));
145 if let Ok(start) = cx.sess().source_map().span_to_snippet(eps[0].span) {
146 use ast::{LitIntType, LitKind};
147 // We need to preserve the literal's suffix,
148 // as it may determine typing information.
149 let suffix = match lit.node {
150 LitKind::Int(_, LitIntType::Signed(s)) => s.name_str(),
151 LitKind::Int(_, LitIntType::Unsigned(s)) => s.name_str(),
152 LitKind::Int(_, LitIntType::Unsuffixed) => "",
153 _ => bug!(),
154 };
155 let suggestion = format!("{}..={}{}", start, lit_val - 1, suffix);
156 err.span_suggestion(
157 parent_expr.span,
158 &"use an inclusive range instead",
159 suggestion,
160 Applicability::MachineApplicable,
161 );
162 err.emit();
163 overwritten = true;
164 }
165 });
166 }
167 }
168 overwritten
169 }
170
171 // For `isize` & `usize`, be conservative with the warnings, so that the
172 // warnings are consistent between 32- and 64-bit platforms.
int_ty_range(int_ty: ty::IntTy) -> (i128, i128)173 fn int_ty_range(int_ty: ty::IntTy) -> (i128, i128) {
174 match int_ty {
175 ty::IntTy::Isize => (i64::MIN.into(), i64::MAX.into()),
176 ty::IntTy::I8 => (i8::MIN.into(), i8::MAX.into()),
177 ty::IntTy::I16 => (i16::MIN.into(), i16::MAX.into()),
178 ty::IntTy::I32 => (i32::MIN.into(), i32::MAX.into()),
179 ty::IntTy::I64 => (i64::MIN.into(), i64::MAX.into()),
180 ty::IntTy::I128 => (i128::MIN, i128::MAX),
181 }
182 }
183
uint_ty_range(uint_ty: ty::UintTy) -> (u128, u128)184 fn uint_ty_range(uint_ty: ty::UintTy) -> (u128, u128) {
185 let max = match uint_ty {
186 ty::UintTy::Usize => u64::MAX.into(),
187 ty::UintTy::U8 => u8::MAX.into(),
188 ty::UintTy::U16 => u16::MAX.into(),
189 ty::UintTy::U32 => u32::MAX.into(),
190 ty::UintTy::U64 => u64::MAX.into(),
191 ty::UintTy::U128 => u128::MAX,
192 };
193 (0, max)
194 }
195
get_bin_hex_repr(cx: &LateContext<'_>, lit: &hir::Lit) -> Option<String>196 fn get_bin_hex_repr(cx: &LateContext<'_>, lit: &hir::Lit) -> Option<String> {
197 let src = cx.sess().source_map().span_to_snippet(lit.span).ok()?;
198 let firstch = src.chars().next()?;
199
200 if firstch == '0' {
201 match src.chars().nth(1) {
202 Some('x' | 'b') => return Some(src),
203 _ => return None,
204 }
205 }
206
207 None
208 }
209
report_bin_hex_error( cx: &LateContext<'_>, expr: &hir::Expr<'_>, ty: attr::IntType, repr_str: String, val: u128, negative: bool, )210 fn report_bin_hex_error(
211 cx: &LateContext<'_>,
212 expr: &hir::Expr<'_>,
213 ty: attr::IntType,
214 repr_str: String,
215 val: u128,
216 negative: bool,
217 ) {
218 let size = Integer::from_attr(&cx.tcx, ty).size();
219 cx.struct_span_lint(OVERFLOWING_LITERALS, expr.span, |lint| {
220 let (t, actually) = match ty {
221 attr::IntType::SignedInt(t) => {
222 let actually = if negative {
223 -(size.sign_extend(val) as i128)
224 } else {
225 size.sign_extend(val) as i128
226 };
227 (t.name_str(), actually.to_string())
228 }
229 attr::IntType::UnsignedInt(t) => {
230 let actually = size.truncate(val);
231 (t.name_str(), actually.to_string())
232 }
233 };
234 let mut err = lint.build(&format!("literal out of range for `{}`", t));
235 if negative {
236 // If the value is negative,
237 // emits a note about the value itself, apart from the literal.
238 err.note(&format!(
239 "the literal `{}` (decimal `{}`) does not fit into \
240 the type `{}`",
241 repr_str, val, t
242 ));
243 err.note(&format!("and the value `-{}` will become `{}{}`", repr_str, actually, t));
244 } else {
245 err.note(&format!(
246 "the literal `{}` (decimal `{}`) does not fit into \
247 the type `{}` and will become `{}{}`",
248 repr_str, val, t, actually, t
249 ));
250 }
251 if let Some(sugg_ty) =
252 get_type_suggestion(&cx.typeck_results().node_type(expr.hir_id), val, negative)
253 {
254 if let Some(pos) = repr_str.chars().position(|c| c == 'i' || c == 'u') {
255 let (sans_suffix, _) = repr_str.split_at(pos);
256 err.span_suggestion(
257 expr.span,
258 &format!("consider using the type `{}` instead", sugg_ty),
259 format!("{}{}", sans_suffix, sugg_ty),
260 Applicability::MachineApplicable,
261 );
262 } else {
263 err.help(&format!("consider using the type `{}` instead", sugg_ty));
264 }
265 }
266 err.emit();
267 });
268 }
269
270 // This function finds the next fitting type and generates a suggestion string.
271 // It searches for fitting types in the following way (`X < Y`):
272 // - `iX`: if literal fits in `uX` => `uX`, else => `iY`
273 // - `-iX` => `iY`
274 // - `uX` => `uY`
275 //
276 // No suggestion for: `isize`, `usize`.
get_type_suggestion(t: Ty<'_>, val: u128, negative: bool) -> Option<&'static str>277 fn get_type_suggestion(t: Ty<'_>, val: u128, negative: bool) -> Option<&'static str> {
278 use ty::IntTy::*;
279 use ty::UintTy::*;
280 macro_rules! find_fit {
281 ($ty:expr, $val:expr, $negative:expr,
282 $($type:ident => [$($utypes:expr),*] => [$($itypes:expr),*]),+) => {
283 {
284 let _neg = if negative { 1 } else { 0 };
285 match $ty {
286 $($type => {
287 $(if !negative && val <= uint_ty_range($utypes).1 {
288 return Some($utypes.name_str())
289 })*
290 $(if val <= int_ty_range($itypes).1 as u128 + _neg {
291 return Some($itypes.name_str())
292 })*
293 None
294 },)+
295 _ => None
296 }
297 }
298 }
299 }
300 match t.kind() {
301 ty::Int(i) => find_fit!(i, val, negative,
302 I8 => [U8] => [I16, I32, I64, I128],
303 I16 => [U16] => [I32, I64, I128],
304 I32 => [U32] => [I64, I128],
305 I64 => [U64] => [I128],
306 I128 => [U128] => []),
307 ty::Uint(u) => find_fit!(u, val, negative,
308 U8 => [U8, U16, U32, U64, U128] => [],
309 U16 => [U16, U32, U64, U128] => [],
310 U32 => [U32, U64, U128] => [],
311 U64 => [U64, U128] => [],
312 U128 => [U128] => []),
313 _ => None,
314 }
315 }
316
lint_int_literal<'tcx>( cx: &LateContext<'tcx>, type_limits: &TypeLimits, e: &'tcx hir::Expr<'tcx>, lit: &hir::Lit, t: ty::IntTy, v: u128, )317 fn lint_int_literal<'tcx>(
318 cx: &LateContext<'tcx>,
319 type_limits: &TypeLimits,
320 e: &'tcx hir::Expr<'tcx>,
321 lit: &hir::Lit,
322 t: ty::IntTy,
323 v: u128,
324 ) {
325 let int_type = t.normalize(cx.sess().target.pointer_width);
326 let (min, max) = int_ty_range(int_type);
327 let max = max as u128;
328 let negative = type_limits.negated_expr_id == Some(e.hir_id);
329
330 // Detect literal value out of range [min, max] inclusive
331 // avoiding use of -min to prevent overflow/panic
332 if (negative && v > max + 1) || (!negative && v > max) {
333 if let Some(repr_str) = get_bin_hex_repr(cx, lit) {
334 report_bin_hex_error(
335 cx,
336 e,
337 attr::IntType::SignedInt(ty::ast_int_ty(t)),
338 repr_str,
339 v,
340 negative,
341 );
342 return;
343 }
344
345 let par_id = cx.tcx.hir().get_parent_node(e.hir_id);
346 if let Node::Expr(par_e) = cx.tcx.hir().get(par_id) {
347 if let hir::ExprKind::Struct(..) = par_e.kind {
348 if is_range_literal(par_e)
349 && lint_overflowing_range_endpoint(cx, lit, v, max, e, par_e, t.name_str())
350 {
351 // The overflowing literal lint was overridden.
352 return;
353 }
354 }
355 }
356
357 cx.struct_span_lint(OVERFLOWING_LITERALS, e.span, |lint| {
358 let mut err = lint.build(&format!("literal out of range for `{}`", t.name_str()));
359 err.note(&format!(
360 "the literal `{}` does not fit into the type `{}` whose range is `{}..={}`",
361 cx.sess()
362 .source_map()
363 .span_to_snippet(lit.span)
364 .expect("must get snippet from literal"),
365 t.name_str(),
366 min,
367 max,
368 ));
369 if let Some(sugg_ty) =
370 get_type_suggestion(&cx.typeck_results().node_type(e.hir_id), v, negative)
371 {
372 err.help(&format!("consider using the type `{}` instead", sugg_ty));
373 }
374 err.emit();
375 });
376 }
377 }
378
lint_uint_literal<'tcx>( cx: &LateContext<'tcx>, e: &'tcx hir::Expr<'tcx>, lit: &hir::Lit, t: ty::UintTy, )379 fn lint_uint_literal<'tcx>(
380 cx: &LateContext<'tcx>,
381 e: &'tcx hir::Expr<'tcx>,
382 lit: &hir::Lit,
383 t: ty::UintTy,
384 ) {
385 let uint_type = t.normalize(cx.sess().target.pointer_width);
386 let (min, max) = uint_ty_range(uint_type);
387 let lit_val: u128 = match lit.node {
388 // _v is u8, within range by definition
389 ast::LitKind::Byte(_v) => return,
390 ast::LitKind::Int(v, _) => v,
391 _ => bug!(),
392 };
393 if lit_val < min || lit_val > max {
394 let parent_id = cx.tcx.hir().get_parent_node(e.hir_id);
395 if let Node::Expr(par_e) = cx.tcx.hir().get(parent_id) {
396 match par_e.kind {
397 hir::ExprKind::Cast(..) => {
398 if let ty::Char = cx.typeck_results().expr_ty(par_e).kind() {
399 cx.struct_span_lint(OVERFLOWING_LITERALS, par_e.span, |lint| {
400 lint.build("only `u8` can be cast into `char`")
401 .span_suggestion(
402 par_e.span,
403 &"use a `char` literal instead",
404 format!("'\\u{{{:X}}}'", lit_val),
405 Applicability::MachineApplicable,
406 )
407 .emit();
408 });
409 return;
410 }
411 }
412 hir::ExprKind::Struct(..) if is_range_literal(par_e) => {
413 let t = t.name_str();
414 if lint_overflowing_range_endpoint(cx, lit, lit_val, max, e, par_e, t) {
415 // The overflowing literal lint was overridden.
416 return;
417 }
418 }
419 _ => {}
420 }
421 }
422 if let Some(repr_str) = get_bin_hex_repr(cx, lit) {
423 report_bin_hex_error(
424 cx,
425 e,
426 attr::IntType::UnsignedInt(ty::ast_uint_ty(t)),
427 repr_str,
428 lit_val,
429 false,
430 );
431 return;
432 }
433 cx.struct_span_lint(OVERFLOWING_LITERALS, e.span, |lint| {
434 lint.build(&format!("literal out of range for `{}`", t.name_str()))
435 .note(&format!(
436 "the literal `{}` does not fit into the type `{}` whose range is `{}..={}`",
437 cx.sess()
438 .source_map()
439 .span_to_snippet(lit.span)
440 .expect("must get snippet from literal"),
441 t.name_str(),
442 min,
443 max,
444 ))
445 .emit()
446 });
447 }
448 }
449
lint_literal<'tcx>( cx: &LateContext<'tcx>, type_limits: &TypeLimits, e: &'tcx hir::Expr<'tcx>, lit: &hir::Lit, )450 fn lint_literal<'tcx>(
451 cx: &LateContext<'tcx>,
452 type_limits: &TypeLimits,
453 e: &'tcx hir::Expr<'tcx>,
454 lit: &hir::Lit,
455 ) {
456 match *cx.typeck_results().node_type(e.hir_id).kind() {
457 ty::Int(t) => {
458 match lit.node {
459 ast::LitKind::Int(v, ast::LitIntType::Signed(_) | ast::LitIntType::Unsuffixed) => {
460 lint_int_literal(cx, type_limits, e, lit, t, v)
461 }
462 _ => bug!(),
463 };
464 }
465 ty::Uint(t) => lint_uint_literal(cx, e, lit, t),
466 ty::Float(t) => {
467 let is_infinite = match lit.node {
468 ast::LitKind::Float(v, _) => match t {
469 ty::FloatTy::F32 => v.as_str().parse().map(f32::is_infinite),
470 ty::FloatTy::F64 => v.as_str().parse().map(f64::is_infinite),
471 },
472 _ => bug!(),
473 };
474 if is_infinite == Ok(true) {
475 cx.struct_span_lint(OVERFLOWING_LITERALS, e.span, |lint| {
476 lint.build(&format!("literal out of range for `{}`", t.name_str()))
477 .note(&format!(
478 "the literal `{}` does not fit into the type `{}` and will be converted to `{}::INFINITY`",
479 cx.sess()
480 .source_map()
481 .span_to_snippet(lit.span)
482 .expect("must get snippet from literal"),
483 t.name_str(),
484 t.name_str(),
485 ))
486 .emit();
487 });
488 }
489 }
490 _ => {}
491 }
492 }
493
494 impl<'tcx> LateLintPass<'tcx> for TypeLimits {
check_expr(&mut self, cx: &LateContext<'tcx>, e: &'tcx hir::Expr<'tcx>)495 fn check_expr(&mut self, cx: &LateContext<'tcx>, e: &'tcx hir::Expr<'tcx>) {
496 match e.kind {
497 hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => {
498 // propagate negation, if the negation itself isn't negated
499 if self.negated_expr_id != Some(e.hir_id) {
500 self.negated_expr_id = Some(expr.hir_id);
501 }
502 }
503 hir::ExprKind::Binary(binop, ref l, ref r) => {
504 if is_comparison(binop) && !check_limits(cx, binop, &l, &r) {
505 cx.struct_span_lint(UNUSED_COMPARISONS, e.span, |lint| {
506 lint.build("comparison is useless due to type limits").emit()
507 });
508 }
509 }
510 hir::ExprKind::Lit(ref lit) => lint_literal(cx, self, e, lit),
511 _ => {}
512 };
513
514 fn is_valid<T: cmp::PartialOrd>(binop: hir::BinOp, v: T, min: T, max: T) -> bool {
515 match binop.node {
516 hir::BinOpKind::Lt => v > min && v <= max,
517 hir::BinOpKind::Le => v >= min && v < max,
518 hir::BinOpKind::Gt => v >= min && v < max,
519 hir::BinOpKind::Ge => v > min && v <= max,
520 hir::BinOpKind::Eq | hir::BinOpKind::Ne => v >= min && v <= max,
521 _ => bug!(),
522 }
523 }
524
525 fn rev_binop(binop: hir::BinOp) -> hir::BinOp {
526 source_map::respan(
527 binop.span,
528 match binop.node {
529 hir::BinOpKind::Lt => hir::BinOpKind::Gt,
530 hir::BinOpKind::Le => hir::BinOpKind::Ge,
531 hir::BinOpKind::Gt => hir::BinOpKind::Lt,
532 hir::BinOpKind::Ge => hir::BinOpKind::Le,
533 _ => return binop,
534 },
535 )
536 }
537
538 fn check_limits(
539 cx: &LateContext<'_>,
540 binop: hir::BinOp,
541 l: &hir::Expr<'_>,
542 r: &hir::Expr<'_>,
543 ) -> bool {
544 let (lit, expr, swap) = match (&l.kind, &r.kind) {
545 (&hir::ExprKind::Lit(_), _) => (l, r, true),
546 (_, &hir::ExprKind::Lit(_)) => (r, l, false),
547 _ => return true,
548 };
549 // Normalize the binop so that the literal is always on the RHS in
550 // the comparison
551 let norm_binop = if swap { rev_binop(binop) } else { binop };
552 match *cx.typeck_results().node_type(expr.hir_id).kind() {
553 ty::Int(int_ty) => {
554 let (min, max) = int_ty_range(int_ty);
555 let lit_val: i128 = match lit.kind {
556 hir::ExprKind::Lit(ref li) => match li.node {
557 ast::LitKind::Int(
558 v,
559 ast::LitIntType::Signed(_) | ast::LitIntType::Unsuffixed,
560 ) => v as i128,
561 _ => return true,
562 },
563 _ => bug!(),
564 };
565 is_valid(norm_binop, lit_val, min, max)
566 }
567 ty::Uint(uint_ty) => {
568 let (min, max): (u128, u128) = uint_ty_range(uint_ty);
569 let lit_val: u128 = match lit.kind {
570 hir::ExprKind::Lit(ref li) => match li.node {
571 ast::LitKind::Int(v, _) => v,
572 _ => return true,
573 },
574 _ => bug!(),
575 };
576 is_valid(norm_binop, lit_val, min, max)
577 }
578 _ => true,
579 }
580 }
581
582 fn is_comparison(binop: hir::BinOp) -> bool {
583 matches!(
584 binop.node,
585 hir::BinOpKind::Eq
586 | hir::BinOpKind::Lt
587 | hir::BinOpKind::Le
588 | hir::BinOpKind::Ne
589 | hir::BinOpKind::Ge
590 | hir::BinOpKind::Gt
591 )
592 }
593 }
594 }
595
596 declare_lint! {
597 /// The `improper_ctypes` lint detects incorrect use of types in foreign
598 /// modules.
599 ///
600 /// ### Example
601 ///
602 /// ```rust
603 /// extern "C" {
604 /// static STATIC: String;
605 /// }
606 /// ```
607 ///
608 /// {{produces}}
609 ///
610 /// ### Explanation
611 ///
612 /// The compiler has several checks to verify that types used in `extern`
613 /// blocks are safe and follow certain rules to ensure proper
614 /// compatibility with the foreign interfaces. This lint is issued when it
615 /// detects a probable mistake in a definition. The lint usually should
616 /// provide a description of the issue, along with possibly a hint on how
617 /// to resolve it.
618 IMPROPER_CTYPES,
619 Warn,
620 "proper use of libc types in foreign modules"
621 }
622
623 declare_lint_pass!(ImproperCTypesDeclarations => [IMPROPER_CTYPES]);
624
625 declare_lint! {
626 /// The `improper_ctypes_definitions` lint detects incorrect use of
627 /// [`extern` function] definitions.
628 ///
629 /// [`extern` function]: https://doc.rust-lang.org/reference/items/functions.html#extern-function-qualifier
630 ///
631 /// ### Example
632 ///
633 /// ```rust
634 /// # #![allow(unused)]
635 /// pub extern "C" fn str_type(p: &str) { }
636 /// ```
637 ///
638 /// {{produces}}
639 ///
640 /// ### Explanation
641 ///
642 /// There are many parameter and return types that may be specified in an
643 /// `extern` function that are not compatible with the given ABI. This
644 /// lint is an alert that these types should not be used. The lint usually
645 /// should provide a description of the issue, along with possibly a hint
646 /// on how to resolve it.
647 IMPROPER_CTYPES_DEFINITIONS,
648 Warn,
649 "proper use of libc types in foreign item definitions"
650 }
651
652 declare_lint_pass!(ImproperCTypesDefinitions => [IMPROPER_CTYPES_DEFINITIONS]);
653
654 #[derive(Clone, Copy)]
655 crate enum CItemKind {
656 Declaration,
657 Definition,
658 }
659
660 struct ImproperCTypesVisitor<'a, 'tcx> {
661 cx: &'a LateContext<'tcx>,
662 mode: CItemKind,
663 }
664
665 enum FfiResult<'tcx> {
666 FfiSafe,
667 FfiPhantom(Ty<'tcx>),
668 FfiUnsafe { ty: Ty<'tcx>, reason: String, help: Option<String> },
669 }
670
nonnull_optimization_guaranteed<'tcx>(tcx: TyCtxt<'tcx>, def: &ty::AdtDef) -> bool671 crate fn nonnull_optimization_guaranteed<'tcx>(tcx: TyCtxt<'tcx>, def: &ty::AdtDef) -> bool {
672 tcx.get_attrs(def.did).iter().any(|a| a.has_name(sym::rustc_nonnull_optimization_guaranteed))
673 }
674
675 /// `repr(transparent)` structs can have a single non-ZST field, this function returns that
676 /// field.
transparent_newtype_field<'a, 'tcx>( tcx: TyCtxt<'tcx>, variant: &'a ty::VariantDef, ) -> Option<&'a ty::FieldDef>677 pub fn transparent_newtype_field<'a, 'tcx>(
678 tcx: TyCtxt<'tcx>,
679 variant: &'a ty::VariantDef,
680 ) -> Option<&'a ty::FieldDef> {
681 let param_env = tcx.param_env(variant.def_id);
682 variant.fields.iter().find(|field| {
683 let field_ty = tcx.type_of(field.did);
684 let is_zst = tcx.layout_of(param_env.and(field_ty)).map_or(false, |layout| layout.is_zst());
685 !is_zst
686 })
687 }
688
689 /// Is type known to be non-null?
ty_is_known_nonnull<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, mode: CItemKind) -> bool690 fn ty_is_known_nonnull<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>, mode: CItemKind) -> bool {
691 let tcx = cx.tcx;
692 match ty.kind() {
693 ty::FnPtr(_) => true,
694 ty::Ref(..) => true,
695 ty::Adt(def, _) if def.is_box() && matches!(mode, CItemKind::Definition) => true,
696 ty::Adt(def, substs) if def.repr.transparent() && !def.is_union() => {
697 let marked_non_null = nonnull_optimization_guaranteed(tcx, &def);
698
699 if marked_non_null {
700 return true;
701 }
702
703 // Types with a `#[repr(no_niche)]` attribute have their niche hidden.
704 // The attribute is used by the UnsafeCell for example (the only use so far).
705 if def.repr.hide_niche() {
706 return false;
707 }
708
709 def.variants
710 .iter()
711 .filter_map(|variant| transparent_newtype_field(cx.tcx, variant))
712 .any(|field| ty_is_known_nonnull(cx, field.ty(tcx, substs), mode))
713 }
714 _ => false,
715 }
716 }
717
718 /// Given a non-null scalar (or transparent) type `ty`, return the nullable version of that type.
719 /// If the type passed in was not scalar, returns None.
get_nullable_type<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'tcx>>720 fn get_nullable_type<'tcx>(cx: &LateContext<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'tcx>> {
721 let tcx = cx.tcx;
722 Some(match *ty.kind() {
723 ty::Adt(field_def, field_substs) => {
724 let inner_field_ty = {
725 let first_non_zst_ty =
726 field_def.variants.iter().filter_map(|v| transparent_newtype_field(cx.tcx, v));
727 debug_assert_eq!(
728 first_non_zst_ty.clone().count(),
729 1,
730 "Wrong number of fields for transparent type"
731 );
732 first_non_zst_ty
733 .last()
734 .expect("No non-zst fields in transparent type.")
735 .ty(tcx, field_substs)
736 };
737 return get_nullable_type(cx, inner_field_ty);
738 }
739 ty::Int(ty) => tcx.mk_mach_int(ty),
740 ty::Uint(ty) => tcx.mk_mach_uint(ty),
741 ty::RawPtr(ty_mut) => tcx.mk_ptr(ty_mut),
742 // As these types are always non-null, the nullable equivalent of
743 // Option<T> of these types are their raw pointer counterparts.
744 ty::Ref(_region, ty, mutbl) => tcx.mk_ptr(ty::TypeAndMut { ty, mutbl }),
745 ty::FnPtr(..) => {
746 // There is no nullable equivalent for Rust's function pointers -- you
747 // must use an Option<fn(..) -> _> to represent it.
748 ty
749 }
750
751 // We should only ever reach this case if ty_is_known_nonnull is extended
752 // to other types.
753 ref unhandled => {
754 debug!(
755 "get_nullable_type: Unhandled scalar kind: {:?} while checking {:?}",
756 unhandled, ty
757 );
758 return None;
759 }
760 })
761 }
762
763 /// Check if this enum can be safely exported based on the "nullable pointer optimization". If it
764 /// can, return the type that `ty` can be safely converted to, otherwise return `None`.
765 /// Currently restricted to function pointers, boxes, references, `core::num::NonZero*`,
766 /// `core::ptr::NonNull`, and `#[repr(transparent)]` newtypes.
767 /// FIXME: This duplicates code in codegen.
repr_nullable_ptr<'tcx>( cx: &LateContext<'tcx>, ty: Ty<'tcx>, ckind: CItemKind, ) -> Option<Ty<'tcx>>768 crate fn repr_nullable_ptr<'tcx>(
769 cx: &LateContext<'tcx>,
770 ty: Ty<'tcx>,
771 ckind: CItemKind,
772 ) -> Option<Ty<'tcx>> {
773 debug!("is_repr_nullable_ptr(cx, ty = {:?})", ty);
774 if let ty::Adt(ty_def, substs) = ty.kind() {
775 let field_ty = match &ty_def.variants.raw[..] {
776 [var_one, var_two] => match (&var_one.fields[..], &var_two.fields[..]) {
777 ([], [field]) | ([field], []) => field.ty(cx.tcx, substs),
778 _ => return None,
779 },
780 _ => return None,
781 };
782
783 if !ty_is_known_nonnull(cx, field_ty, ckind) {
784 return None;
785 }
786
787 // At this point, the field's type is known to be nonnull and the parent enum is Option-like.
788 // If the computed size for the field and the enum are different, the nonnull optimization isn't
789 // being applied (and we've got a problem somewhere).
790 let compute_size_skeleton = |t| SizeSkeleton::compute(t, cx.tcx, cx.param_env).unwrap();
791 if !compute_size_skeleton(ty).same_size(compute_size_skeleton(field_ty)) {
792 bug!("improper_ctypes: Option nonnull optimization not applied?");
793 }
794
795 // Return the nullable type this Option-like enum can be safely represented with.
796 let field_ty_abi = &cx.layout_of(field_ty).unwrap().abi;
797 if let Abi::Scalar(field_ty_scalar) = field_ty_abi {
798 match (field_ty_scalar.valid_range.start, field_ty_scalar.valid_range.end) {
799 (0, _) => unreachable!("Non-null optimisation extended to a non-zero value."),
800 (1, _) => {
801 return Some(get_nullable_type(cx, field_ty).unwrap());
802 }
803 (start, end) => unreachable!("Unhandled start and end range: ({}, {})", start, end),
804 };
805 }
806 }
807 None
808 }
809
810 impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> {
811 /// Check if the type is array and emit an unsafe type lint.
check_for_array_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool812 fn check_for_array_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool {
813 if let ty::Array(..) = ty.kind() {
814 self.emit_ffi_unsafe_type_lint(
815 ty,
816 sp,
817 "passing raw arrays by value is not FFI-safe",
818 Some("consider passing a pointer to the array"),
819 );
820 true
821 } else {
822 false
823 }
824 }
825
826 /// Checks if the given field's type is "ffi-safe".
check_field_type_for_ffi( &self, cache: &mut FxHashSet<Ty<'tcx>>, field: &ty::FieldDef, substs: SubstsRef<'tcx>, ) -> FfiResult<'tcx>827 fn check_field_type_for_ffi(
828 &self,
829 cache: &mut FxHashSet<Ty<'tcx>>,
830 field: &ty::FieldDef,
831 substs: SubstsRef<'tcx>,
832 ) -> FfiResult<'tcx> {
833 let field_ty = field.ty(self.cx.tcx, substs);
834 if field_ty.has_opaque_types() {
835 self.check_type_for_ffi(cache, field_ty)
836 } else {
837 let field_ty = self.cx.tcx.normalize_erasing_regions(self.cx.param_env, field_ty);
838 self.check_type_for_ffi(cache, field_ty)
839 }
840 }
841
842 /// Checks if the given `VariantDef`'s field types are "ffi-safe".
check_variant_for_ffi( &self, cache: &mut FxHashSet<Ty<'tcx>>, ty: Ty<'tcx>, def: &ty::AdtDef, variant: &ty::VariantDef, substs: SubstsRef<'tcx>, ) -> FfiResult<'tcx>843 fn check_variant_for_ffi(
844 &self,
845 cache: &mut FxHashSet<Ty<'tcx>>,
846 ty: Ty<'tcx>,
847 def: &ty::AdtDef,
848 variant: &ty::VariantDef,
849 substs: SubstsRef<'tcx>,
850 ) -> FfiResult<'tcx> {
851 use FfiResult::*;
852
853 if def.repr.transparent() {
854 // Can assume that at most one field is not a ZST, so only check
855 // that field's type for FFI-safety.
856 if let Some(field) = transparent_newtype_field(self.cx.tcx, variant) {
857 self.check_field_type_for_ffi(cache, field, substs)
858 } else {
859 // All fields are ZSTs; this means that the type should behave
860 // like (), which is FFI-unsafe
861 FfiUnsafe {
862 ty,
863 reason: "this struct contains only zero-sized fields".into(),
864 help: None,
865 }
866 }
867 } else {
868 // We can't completely trust repr(C) markings; make sure the fields are
869 // actually safe.
870 let mut all_phantom = !variant.fields.is_empty();
871 for field in &variant.fields {
872 match self.check_field_type_for_ffi(cache, &field, substs) {
873 FfiSafe => {
874 all_phantom = false;
875 }
876 FfiPhantom(..) if def.is_enum() => {
877 return FfiUnsafe {
878 ty,
879 reason: "this enum contains a PhantomData field".into(),
880 help: None,
881 };
882 }
883 FfiPhantom(..) => {}
884 r => return r,
885 }
886 }
887
888 if all_phantom { FfiPhantom(ty) } else { FfiSafe }
889 }
890 }
891
892 /// Checks if the given type is "ffi-safe" (has a stable, well-defined
893 /// representation which can be exported to C code).
check_type_for_ffi(&self, cache: &mut FxHashSet<Ty<'tcx>>, ty: Ty<'tcx>) -> FfiResult<'tcx>894 fn check_type_for_ffi(&self, cache: &mut FxHashSet<Ty<'tcx>>, ty: Ty<'tcx>) -> FfiResult<'tcx> {
895 use FfiResult::*;
896
897 let tcx = self.cx.tcx;
898
899 // Protect against infinite recursion, for example
900 // `struct S(*mut S);`.
901 // FIXME: A recursion limit is necessary as well, for irregular
902 // recursive types.
903 if !cache.insert(ty) {
904 return FfiSafe;
905 }
906
907 match *ty.kind() {
908 ty::Adt(def, substs) => {
909 if def.is_box() && matches!(self.mode, CItemKind::Definition) {
910 if ty.boxed_ty().is_sized(tcx.at(DUMMY_SP), self.cx.param_env) {
911 return FfiSafe;
912 } else {
913 return FfiUnsafe {
914 ty,
915 reason: "box cannot be represented as a single pointer".to_string(),
916 help: None,
917 };
918 }
919 }
920 if def.is_phantom_data() {
921 return FfiPhantom(ty);
922 }
923 match def.adt_kind() {
924 AdtKind::Struct | AdtKind::Union => {
925 let kind = if def.is_struct() { "struct" } else { "union" };
926
927 if !def.repr.c() && !def.repr.transparent() {
928 return FfiUnsafe {
929 ty,
930 reason: format!("this {} has unspecified layout", kind),
931 help: Some(format!(
932 "consider adding a `#[repr(C)]` or \
933 `#[repr(transparent)]` attribute to this {}",
934 kind
935 )),
936 };
937 }
938
939 let is_non_exhaustive =
940 def.non_enum_variant().is_field_list_non_exhaustive();
941 if is_non_exhaustive && !def.did.is_local() {
942 return FfiUnsafe {
943 ty,
944 reason: format!("this {} is non-exhaustive", kind),
945 help: None,
946 };
947 }
948
949 if def.non_enum_variant().fields.is_empty() {
950 return FfiUnsafe {
951 ty,
952 reason: format!("this {} has no fields", kind),
953 help: Some(format!("consider adding a member to this {}", kind)),
954 };
955 }
956
957 self.check_variant_for_ffi(cache, ty, def, def.non_enum_variant(), substs)
958 }
959 AdtKind::Enum => {
960 if def.variants.is_empty() {
961 // Empty enums are okay... although sort of useless.
962 return FfiSafe;
963 }
964
965 // Check for a repr() attribute to specify the size of the
966 // discriminant.
967 if !def.repr.c() && !def.repr.transparent() && def.repr.int.is_none() {
968 // Special-case types like `Option<extern fn()>`.
969 if repr_nullable_ptr(self.cx, ty, self.mode).is_none() {
970 return FfiUnsafe {
971 ty,
972 reason: "enum has no representation hint".into(),
973 help: Some(
974 "consider adding a `#[repr(C)]`, \
975 `#[repr(transparent)]`, or integer `#[repr(...)]` \
976 attribute to this enum"
977 .into(),
978 ),
979 };
980 }
981 }
982
983 if def.is_variant_list_non_exhaustive() && !def.did.is_local() {
984 return FfiUnsafe {
985 ty,
986 reason: "this enum is non-exhaustive".into(),
987 help: None,
988 };
989 }
990
991 // Check the contained variants.
992 for variant in &def.variants {
993 let is_non_exhaustive = variant.is_field_list_non_exhaustive();
994 if is_non_exhaustive && !variant.def_id.is_local() {
995 return FfiUnsafe {
996 ty,
997 reason: "this enum has non-exhaustive variants".into(),
998 help: None,
999 };
1000 }
1001
1002 match self.check_variant_for_ffi(cache, ty, def, variant, substs) {
1003 FfiSafe => (),
1004 r => return r,
1005 }
1006 }
1007
1008 FfiSafe
1009 }
1010 }
1011 }
1012
1013 ty::Char => FfiUnsafe {
1014 ty,
1015 reason: "the `char` type has no C equivalent".into(),
1016 help: Some("consider using `u32` or `libc::wchar_t` instead".into()),
1017 },
1018
1019 ty::Int(ty::IntTy::I128) | ty::Uint(ty::UintTy::U128) => FfiUnsafe {
1020 ty,
1021 reason: "128-bit integers don't currently have a known stable ABI".into(),
1022 help: None,
1023 },
1024
1025 // Primitive types with a stable representation.
1026 ty::Bool | ty::Int(..) | ty::Uint(..) | ty::Float(..) | ty::Never => FfiSafe,
1027
1028 ty::Slice(_) => FfiUnsafe {
1029 ty,
1030 reason: "slices have no C equivalent".into(),
1031 help: Some("consider using a raw pointer instead".into()),
1032 },
1033
1034 ty::Dynamic(..) => {
1035 FfiUnsafe { ty, reason: "trait objects have no C equivalent".into(), help: None }
1036 }
1037
1038 ty::Str => FfiUnsafe {
1039 ty,
1040 reason: "string slices have no C equivalent".into(),
1041 help: Some("consider using `*const u8` and a length instead".into()),
1042 },
1043
1044 ty::Tuple(..) => FfiUnsafe {
1045 ty,
1046 reason: "tuples have unspecified layout".into(),
1047 help: Some("consider using a struct instead".into()),
1048 },
1049
1050 ty::RawPtr(ty::TypeAndMut { ty, .. }) | ty::Ref(_, ty, _)
1051 if {
1052 matches!(self.mode, CItemKind::Definition)
1053 && ty.is_sized(self.cx.tcx.at(DUMMY_SP), self.cx.param_env)
1054 } =>
1055 {
1056 FfiSafe
1057 }
1058
1059 ty::RawPtr(ty::TypeAndMut { ty, .. })
1060 if match ty.kind() {
1061 ty::Tuple(tuple) => tuple.is_empty(),
1062 _ => false,
1063 } =>
1064 {
1065 FfiSafe
1066 }
1067
1068 ty::RawPtr(ty::TypeAndMut { ty, .. }) | ty::Ref(_, ty, _) => {
1069 self.check_type_for_ffi(cache, ty)
1070 }
1071
1072 ty::Array(inner_ty, _) => self.check_type_for_ffi(cache, inner_ty),
1073
1074 ty::FnPtr(sig) => {
1075 if self.is_internal_abi(sig.abi()) {
1076 return FfiUnsafe {
1077 ty,
1078 reason: "this function pointer has Rust-specific calling convention".into(),
1079 help: Some(
1080 "consider using an `extern fn(...) -> ...` \
1081 function pointer instead"
1082 .into(),
1083 ),
1084 };
1085 }
1086
1087 let sig = tcx.erase_late_bound_regions(sig);
1088 if !sig.output().is_unit() {
1089 let r = self.check_type_for_ffi(cache, sig.output());
1090 match r {
1091 FfiSafe => {}
1092 _ => {
1093 return r;
1094 }
1095 }
1096 }
1097 for arg in sig.inputs() {
1098 let r = self.check_type_for_ffi(cache, arg);
1099 match r {
1100 FfiSafe => {}
1101 _ => {
1102 return r;
1103 }
1104 }
1105 }
1106 FfiSafe
1107 }
1108
1109 ty::Foreign(..) => FfiSafe,
1110
1111 // While opaque types are checked for earlier, if a projection in a struct field
1112 // normalizes to an opaque type, then it will reach this branch.
1113 ty::Opaque(..) => {
1114 FfiUnsafe { ty, reason: "opaque types have no C equivalent".into(), help: None }
1115 }
1116
1117 // `extern "C" fn` functions can have type parameters, which may or may not be FFI-safe,
1118 // so they are currently ignored for the purposes of this lint.
1119 ty::Param(..) | ty::Projection(..) if matches!(self.mode, CItemKind::Definition) => {
1120 FfiSafe
1121 }
1122
1123 ty::Param(..)
1124 | ty::Projection(..)
1125 | ty::Infer(..)
1126 | ty::Bound(..)
1127 | ty::Error(_)
1128 | ty::Closure(..)
1129 | ty::Generator(..)
1130 | ty::GeneratorWitness(..)
1131 | ty::Placeholder(..)
1132 | ty::FnDef(..) => bug!("unexpected type in foreign function: {:?}", ty),
1133 }
1134 }
1135
emit_ffi_unsafe_type_lint( &mut self, ty: Ty<'tcx>, sp: Span, note: &str, help: Option<&str>, )1136 fn emit_ffi_unsafe_type_lint(
1137 &mut self,
1138 ty: Ty<'tcx>,
1139 sp: Span,
1140 note: &str,
1141 help: Option<&str>,
1142 ) {
1143 let lint = match self.mode {
1144 CItemKind::Declaration => IMPROPER_CTYPES,
1145 CItemKind::Definition => IMPROPER_CTYPES_DEFINITIONS,
1146 };
1147
1148 self.cx.struct_span_lint(lint, sp, |lint| {
1149 let item_description = match self.mode {
1150 CItemKind::Declaration => "block",
1151 CItemKind::Definition => "fn",
1152 };
1153 let mut diag = lint.build(&format!(
1154 "`extern` {} uses type `{}`, which is not FFI-safe",
1155 item_description, ty
1156 ));
1157 diag.span_label(sp, "not FFI-safe");
1158 if let Some(help) = help {
1159 diag.help(help);
1160 }
1161 diag.note(note);
1162 if let ty::Adt(def, _) = ty.kind() {
1163 if let Some(sp) = self.cx.tcx.hir().span_if_local(def.did) {
1164 diag.span_note(sp, "the type is defined here");
1165 }
1166 }
1167 diag.emit();
1168 });
1169 }
1170
check_for_opaque_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool1171 fn check_for_opaque_ty(&mut self, sp: Span, ty: Ty<'tcx>) -> bool {
1172 struct ProhibitOpaqueTypes<'a, 'tcx> {
1173 cx: &'a LateContext<'tcx>,
1174 }
1175
1176 impl<'a, 'tcx> ty::fold::TypeVisitor<'tcx> for ProhibitOpaqueTypes<'a, 'tcx> {
1177 type BreakTy = Ty<'tcx>;
1178 fn tcx_for_anon_const_substs(&self) -> Option<TyCtxt<'tcx>> {
1179 Some(self.cx.tcx)
1180 }
1181
1182 fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
1183 match ty.kind() {
1184 ty::Opaque(..) => ControlFlow::Break(ty),
1185 // Consider opaque types within projections FFI-safe if they do not normalize
1186 // to more opaque types.
1187 ty::Projection(..) => {
1188 let ty = self.cx.tcx.normalize_erasing_regions(self.cx.param_env, ty);
1189
1190 // If `ty` is an opaque type directly then `super_visit_with` won't invoke
1191 // this function again.
1192 if ty.has_opaque_types() {
1193 self.visit_ty(ty)
1194 } else {
1195 ControlFlow::CONTINUE
1196 }
1197 }
1198 _ => ty.super_visit_with(self),
1199 }
1200 }
1201 }
1202
1203 if let Some(ty) = ty.visit_with(&mut ProhibitOpaqueTypes { cx: self.cx }).break_value() {
1204 self.emit_ffi_unsafe_type_lint(ty, sp, "opaque types have no C equivalent", None);
1205 true
1206 } else {
1207 false
1208 }
1209 }
1210
check_type_for_ffi_and_report_errors( &mut self, sp: Span, ty: Ty<'tcx>, is_static: bool, is_return_type: bool, )1211 fn check_type_for_ffi_and_report_errors(
1212 &mut self,
1213 sp: Span,
1214 ty: Ty<'tcx>,
1215 is_static: bool,
1216 is_return_type: bool,
1217 ) {
1218 // We have to check for opaque types before `normalize_erasing_regions`,
1219 // which will replace opaque types with their underlying concrete type.
1220 if self.check_for_opaque_ty(sp, ty) {
1221 // We've already emitted an error due to an opaque type.
1222 return;
1223 }
1224
1225 // it is only OK to use this function because extern fns cannot have
1226 // any generic types right now:
1227 let ty = self.cx.tcx.normalize_erasing_regions(self.cx.param_env, ty);
1228
1229 // C doesn't really support passing arrays by value - the only way to pass an array by value
1230 // is through a struct. So, first test that the top level isn't an array, and then
1231 // recursively check the types inside.
1232 if !is_static && self.check_for_array_ty(sp, ty) {
1233 return;
1234 }
1235
1236 // Don't report FFI errors for unit return types. This check exists here, and not in
1237 // `check_foreign_fn` (where it would make more sense) so that normalization has definitely
1238 // happened.
1239 if is_return_type && ty.is_unit() {
1240 return;
1241 }
1242
1243 match self.check_type_for_ffi(&mut FxHashSet::default(), ty) {
1244 FfiResult::FfiSafe => {}
1245 FfiResult::FfiPhantom(ty) => {
1246 self.emit_ffi_unsafe_type_lint(ty, sp, "composed only of `PhantomData`", None);
1247 }
1248 // If `ty` is a `repr(transparent)` newtype, and the non-zero-sized type is a generic
1249 // argument, which after substitution, is `()`, then this branch can be hit.
1250 FfiResult::FfiUnsafe { ty, .. } if is_return_type && ty.is_unit() => {}
1251 FfiResult::FfiUnsafe { ty, reason, help } => {
1252 self.emit_ffi_unsafe_type_lint(ty, sp, &reason, help.as_deref());
1253 }
1254 }
1255 }
1256
check_foreign_fn(&mut self, id: hir::HirId, decl: &hir::FnDecl<'_>)1257 fn check_foreign_fn(&mut self, id: hir::HirId, decl: &hir::FnDecl<'_>) {
1258 let def_id = self.cx.tcx.hir().local_def_id(id);
1259 let sig = self.cx.tcx.fn_sig(def_id);
1260 let sig = self.cx.tcx.erase_late_bound_regions(sig);
1261
1262 for (input_ty, input_hir) in iter::zip(sig.inputs(), decl.inputs) {
1263 self.check_type_for_ffi_and_report_errors(input_hir.span, input_ty, false, false);
1264 }
1265
1266 if let hir::FnRetTy::Return(ref ret_hir) = decl.output {
1267 let ret_ty = sig.output();
1268 self.check_type_for_ffi_and_report_errors(ret_hir.span, ret_ty, false, true);
1269 }
1270 }
1271
check_foreign_static(&mut self, id: hir::HirId, span: Span)1272 fn check_foreign_static(&mut self, id: hir::HirId, span: Span) {
1273 let def_id = self.cx.tcx.hir().local_def_id(id);
1274 let ty = self.cx.tcx.type_of(def_id);
1275 self.check_type_for_ffi_and_report_errors(span, ty, true, false);
1276 }
1277
is_internal_abi(&self, abi: SpecAbi) -> bool1278 fn is_internal_abi(&self, abi: SpecAbi) -> bool {
1279 matches!(
1280 abi,
1281 SpecAbi::Rust | SpecAbi::RustCall | SpecAbi::RustIntrinsic | SpecAbi::PlatformIntrinsic
1282 )
1283 }
1284 }
1285
1286 impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDeclarations {
check_foreign_item(&mut self, cx: &LateContext<'_>, it: &hir::ForeignItem<'_>)1287 fn check_foreign_item(&mut self, cx: &LateContext<'_>, it: &hir::ForeignItem<'_>) {
1288 let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Declaration };
1289 let abi = cx.tcx.hir().get_foreign_abi(it.hir_id());
1290
1291 if !vis.is_internal_abi(abi) {
1292 match it.kind {
1293 hir::ForeignItemKind::Fn(ref decl, _, _) => {
1294 vis.check_foreign_fn(it.hir_id(), decl);
1295 }
1296 hir::ForeignItemKind::Static(ref ty, _) => {
1297 vis.check_foreign_static(it.hir_id(), ty.span);
1298 }
1299 hir::ForeignItemKind::Type => (),
1300 }
1301 }
1302 }
1303 }
1304
1305 impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDefinitions {
check_fn( &mut self, cx: &LateContext<'tcx>, kind: hir::intravisit::FnKind<'tcx>, decl: &'tcx hir::FnDecl<'_>, _: &'tcx hir::Body<'_>, _: Span, hir_id: hir::HirId, )1306 fn check_fn(
1307 &mut self,
1308 cx: &LateContext<'tcx>,
1309 kind: hir::intravisit::FnKind<'tcx>,
1310 decl: &'tcx hir::FnDecl<'_>,
1311 _: &'tcx hir::Body<'_>,
1312 _: Span,
1313 hir_id: hir::HirId,
1314 ) {
1315 use hir::intravisit::FnKind;
1316
1317 let abi = match kind {
1318 FnKind::ItemFn(_, _, header, ..) => header.abi,
1319 FnKind::Method(_, sig, ..) => sig.header.abi,
1320 _ => return,
1321 };
1322
1323 let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Definition };
1324 if !vis.is_internal_abi(abi) {
1325 vis.check_foreign_fn(hir_id, decl);
1326 }
1327 }
1328 }
1329
1330 declare_lint_pass!(VariantSizeDifferences => [VARIANT_SIZE_DIFFERENCES]);
1331
1332 impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences {
check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>)1333 fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) {
1334 if let hir::ItemKind::Enum(ref enum_definition, _) = it.kind {
1335 let t = cx.tcx.type_of(it.def_id);
1336 let ty = cx.tcx.erase_regions(t);
1337 let layout = match cx.layout_of(ty) {
1338 Ok(layout) => layout,
1339 Err(
1340 ty::layout::LayoutError::Unknown(_) | ty::layout::LayoutError::SizeOverflow(_),
1341 ) => return,
1342 };
1343 let (variants, tag) = match layout.variants {
1344 Variants::Multiple {
1345 tag_encoding: TagEncoding::Direct, tag, ref variants, ..
1346 } => (variants, tag),
1347 _ => return,
1348 };
1349
1350 let tag_size = tag.value.size(&cx.tcx).bytes();
1351
1352 debug!(
1353 "enum `{}` is {} bytes large with layout:\n{:#?}",
1354 t,
1355 layout.size.bytes(),
1356 layout
1357 );
1358
1359 let (largest, slargest, largest_index) = iter::zip(enum_definition.variants, variants)
1360 .map(|(variant, variant_layout)| {
1361 // Subtract the size of the enum tag.
1362 let bytes = variant_layout.size.bytes().saturating_sub(tag_size);
1363
1364 debug!("- variant `{}` is {} bytes large", variant.ident, bytes);
1365 bytes
1366 })
1367 .enumerate()
1368 .fold((0, 0, 0), |(l, s, li), (idx, size)| {
1369 if size > l {
1370 (size, l, idx)
1371 } else if size > s {
1372 (l, size, li)
1373 } else {
1374 (l, s, li)
1375 }
1376 });
1377
1378 // We only warn if the largest variant is at least thrice as large as
1379 // the second-largest.
1380 if largest > slargest * 3 && slargest > 0 {
1381 cx.struct_span_lint(
1382 VARIANT_SIZE_DIFFERENCES,
1383 enum_definition.variants[largest_index].span,
1384 |lint| {
1385 lint.build(&format!(
1386 "enum variant is more than three times \
1387 larger ({} bytes) than the next largest",
1388 largest
1389 ))
1390 .emit()
1391 },
1392 );
1393 }
1394 }
1395 }
1396 }
1397
1398 declare_lint! {
1399 /// The `invalid_atomic_ordering` lint detects passing an `Ordering`
1400 /// to an atomic operation that does not support that ordering.
1401 ///
1402 /// ### Example
1403 ///
1404 /// ```rust,compile_fail
1405 /// # use core::sync::atomic::{AtomicU8, Ordering};
1406 /// let atom = AtomicU8::new(0);
1407 /// let value = atom.load(Ordering::Release);
1408 /// # let _ = value;
1409 /// ```
1410 ///
1411 /// {{produces}}
1412 ///
1413 /// ### Explanation
1414 ///
1415 /// Some atomic operations are only supported for a subset of the
1416 /// `atomic::Ordering` variants. Passing an unsupported variant will cause
1417 /// an unconditional panic at runtime, which is detected by this lint.
1418 ///
1419 /// This lint will trigger in the following cases: (where `AtomicType` is an
1420 /// atomic type from `core::sync::atomic`, such as `AtomicBool`,
1421 /// `AtomicPtr`, `AtomicUsize`, or any of the other integer atomics).
1422 ///
1423 /// - Passing `Ordering::Acquire` or `Ordering::AcqRel` to
1424 /// `AtomicType::store`.
1425 ///
1426 /// - Passing `Ordering::Release` or `Ordering::AcqRel` to
1427 /// `AtomicType::load`.
1428 ///
1429 /// - Passing `Ordering::Relaxed` to `core::sync::atomic::fence` or
1430 /// `core::sync::atomic::compiler_fence`.
1431 ///
1432 /// - Passing `Ordering::Release` or `Ordering::AcqRel` as the failure
1433 /// ordering for any of `AtomicType::compare_exchange`,
1434 /// `AtomicType::compare_exchange_weak`, or `AtomicType::fetch_update`.
1435 ///
1436 /// - Passing in a pair of orderings to `AtomicType::compare_exchange`,
1437 /// `AtomicType::compare_exchange_weak`, or `AtomicType::fetch_update`
1438 /// where the failure ordering is stronger than the success ordering.
1439 INVALID_ATOMIC_ORDERING,
1440 Deny,
1441 "usage of invalid atomic ordering in atomic operations and memory fences"
1442 }
1443
1444 declare_lint_pass!(InvalidAtomicOrdering => [INVALID_ATOMIC_ORDERING]);
1445
1446 impl InvalidAtomicOrdering {
inherent_atomic_method_call<'hir>( cx: &LateContext<'_>, expr: &Expr<'hir>, recognized_names: &[Symbol], ) -> Option<(Symbol, &'hir [Expr<'hir>])>1447 fn inherent_atomic_method_call<'hir>(
1448 cx: &LateContext<'_>,
1449 expr: &Expr<'hir>,
1450 recognized_names: &[Symbol], // used for fast path calculation
1451 ) -> Option<(Symbol, &'hir [Expr<'hir>])> {
1452 const ATOMIC_TYPES: &[Symbol] = &[
1453 sym::AtomicBool,
1454 sym::AtomicPtr,
1455 sym::AtomicUsize,
1456 sym::AtomicU8,
1457 sym::AtomicU16,
1458 sym::AtomicU32,
1459 sym::AtomicU64,
1460 sym::AtomicU128,
1461 sym::AtomicIsize,
1462 sym::AtomicI8,
1463 sym::AtomicI16,
1464 sym::AtomicI32,
1465 sym::AtomicI64,
1466 sym::AtomicI128,
1467 ];
1468 if_chain! {
1469 if let ExprKind::MethodCall(ref method_path, _, args, _) = &expr.kind;
1470 if recognized_names.contains(&method_path.ident.name);
1471 if let Some(m_def_id) = cx.typeck_results().type_dependent_def_id(expr.hir_id);
1472 if let Some(impl_did) = cx.tcx.impl_of_method(m_def_id);
1473 if let Some(adt) = cx.tcx.type_of(impl_did).ty_adt_def();
1474 // skip extension traits, only lint functions from the standard library
1475 if cx.tcx.trait_id_of_impl(impl_did).is_none();
1476
1477 if let Some(parent) = cx.tcx.parent(adt.did);
1478 if cx.tcx.is_diagnostic_item(sym::atomic_mod, parent);
1479 if ATOMIC_TYPES.contains(&cx.tcx.item_name(adt.did));
1480 then {
1481 return Some((method_path.ident.name, args));
1482 }
1483 }
1484 None
1485 }
1486
matches_ordering(cx: &LateContext<'_>, did: DefId, orderings: &[Symbol]) -> bool1487 fn matches_ordering(cx: &LateContext<'_>, did: DefId, orderings: &[Symbol]) -> bool {
1488 let tcx = cx.tcx;
1489 let atomic_ordering = tcx.get_diagnostic_item(sym::Ordering);
1490 orderings.iter().any(|ordering| {
1491 tcx.item_name(did) == *ordering && {
1492 let parent = tcx.parent(did);
1493 parent == atomic_ordering
1494 // needed in case this is a ctor, not a variant
1495 || parent.map_or(false, |parent| tcx.parent(parent) == atomic_ordering)
1496 }
1497 })
1498 }
1499
opt_ordering_defid(cx: &LateContext<'_>, ord_arg: &Expr<'_>) -> Option<DefId>1500 fn opt_ordering_defid(cx: &LateContext<'_>, ord_arg: &Expr<'_>) -> Option<DefId> {
1501 if let ExprKind::Path(ref ord_qpath) = ord_arg.kind {
1502 cx.qpath_res(ord_qpath, ord_arg.hir_id).opt_def_id()
1503 } else {
1504 None
1505 }
1506 }
1507
check_atomic_load_store(cx: &LateContext<'_>, expr: &Expr<'_>)1508 fn check_atomic_load_store(cx: &LateContext<'_>, expr: &Expr<'_>) {
1509 use rustc_hir::def::{DefKind, Res};
1510 use rustc_hir::QPath;
1511 if_chain! {
1512 if let Some((method, args)) = Self::inherent_atomic_method_call(cx, expr, &[sym::load, sym::store]);
1513 if let Some((ordering_arg, invalid_ordering)) = match method {
1514 sym::load => Some((&args[1], sym::Release)),
1515 sym::store => Some((&args[2], sym::Acquire)),
1516 _ => None,
1517 };
1518
1519 if let ExprKind::Path(QPath::Resolved(_, path)) = ordering_arg.kind;
1520 if let Res::Def(DefKind::Ctor(..), ctor_id) = path.res;
1521 if Self::matches_ordering(cx, ctor_id, &[invalid_ordering, sym::AcqRel]);
1522 then {
1523 cx.struct_span_lint(INVALID_ATOMIC_ORDERING, ordering_arg.span, |diag| {
1524 if method == sym::load {
1525 diag.build("atomic loads cannot have `Release` or `AcqRel` ordering")
1526 .help("consider using ordering modes `Acquire`, `SeqCst` or `Relaxed`")
1527 .emit()
1528 } else {
1529 debug_assert_eq!(method, sym::store);
1530 diag.build("atomic stores cannot have `Acquire` or `AcqRel` ordering")
1531 .help("consider using ordering modes `Release`, `SeqCst` or `Relaxed`")
1532 .emit();
1533 }
1534 });
1535 }
1536 }
1537 }
1538
check_memory_fence(cx: &LateContext<'_>, expr: &Expr<'_>)1539 fn check_memory_fence(cx: &LateContext<'_>, expr: &Expr<'_>) {
1540 if_chain! {
1541 if let ExprKind::Call(ref func, ref args) = expr.kind;
1542 if let ExprKind::Path(ref func_qpath) = func.kind;
1543 if let Some(def_id) = cx.qpath_res(func_qpath, func.hir_id).opt_def_id();
1544 if matches!(cx.tcx.get_diagnostic_name(def_id), Some(sym::fence | sym::compiler_fence));
1545 if let ExprKind::Path(ref ordering_qpath) = &args[0].kind;
1546 if let Some(ordering_def_id) = cx.qpath_res(ordering_qpath, args[0].hir_id).opt_def_id();
1547 if Self::matches_ordering(cx, ordering_def_id, &[sym::Relaxed]);
1548 then {
1549 cx.struct_span_lint(INVALID_ATOMIC_ORDERING, args[0].span, |diag| {
1550 diag.build("memory fences cannot have `Relaxed` ordering")
1551 .help("consider using ordering modes `Acquire`, `Release`, `AcqRel` or `SeqCst`")
1552 .emit();
1553 });
1554 }
1555 }
1556 }
1557
check_atomic_compare_exchange(cx: &LateContext<'_>, expr: &Expr<'_>)1558 fn check_atomic_compare_exchange(cx: &LateContext<'_>, expr: &Expr<'_>) {
1559 if_chain! {
1560 if let Some((method, args)) = Self::inherent_atomic_method_call(cx, expr, &[sym::fetch_update, sym::compare_exchange, sym::compare_exchange_weak]);
1561 if let Some((success_order_arg, failure_order_arg)) = match method {
1562 sym::fetch_update => Some((&args[1], &args[2])),
1563 sym::compare_exchange | sym::compare_exchange_weak => Some((&args[3], &args[4])),
1564 _ => None,
1565 };
1566
1567 if let Some(fail_ordering_def_id) = Self::opt_ordering_defid(cx, failure_order_arg);
1568 then {
1569 // Helper type holding on to some checking and error reporting data. Has
1570 // - (success ordering,
1571 // - list of failure orderings forbidden by the success order,
1572 // - suggestion message)
1573 type OrdLintInfo = (Symbol, &'static [Symbol], &'static str);
1574 const RELAXED: OrdLintInfo = (sym::Relaxed, &[sym::SeqCst, sym::Acquire], "ordering mode `Relaxed`");
1575 const ACQUIRE: OrdLintInfo = (sym::Acquire, &[sym::SeqCst], "ordering modes `Acquire` or `Relaxed`");
1576 const SEQ_CST: OrdLintInfo = (sym::SeqCst, &[], "ordering modes `Acquire`, `SeqCst` or `Relaxed`");
1577 const RELEASE: OrdLintInfo = (sym::Release, RELAXED.1, RELAXED.2);
1578 const ACQREL: OrdLintInfo = (sym::AcqRel, ACQUIRE.1, ACQUIRE.2);
1579 const SEARCH: [OrdLintInfo; 5] = [RELAXED, ACQUIRE, SEQ_CST, RELEASE, ACQREL];
1580
1581 let success_lint_info = Self::opt_ordering_defid(cx, success_order_arg)
1582 .and_then(|success_ord_def_id| -> Option<OrdLintInfo> {
1583 SEARCH
1584 .iter()
1585 .copied()
1586 .find(|(ordering, ..)| {
1587 Self::matches_ordering(cx, success_ord_def_id, &[*ordering])
1588 })
1589 });
1590 if Self::matches_ordering(cx, fail_ordering_def_id, &[sym::Release, sym::AcqRel]) {
1591 // If we don't know the success order is, use what we'd suggest
1592 // if it were maximally permissive.
1593 let suggested = success_lint_info.unwrap_or(SEQ_CST).2;
1594 cx.struct_span_lint(INVALID_ATOMIC_ORDERING, failure_order_arg.span, |diag| {
1595 let msg = format!(
1596 "{}'s failure ordering may not be `Release` or `AcqRel`",
1597 method,
1598 );
1599 diag.build(&msg)
1600 .help(&format!("consider using {} instead", suggested))
1601 .emit();
1602 });
1603 } else if let Some((success_ord, bad_ords_given_success, suggested)) = success_lint_info {
1604 if Self::matches_ordering(cx, fail_ordering_def_id, bad_ords_given_success) {
1605 cx.struct_span_lint(INVALID_ATOMIC_ORDERING, failure_order_arg.span, |diag| {
1606 let msg = format!(
1607 "{}'s failure ordering may not be stronger than the success ordering of `{}`",
1608 method,
1609 success_ord,
1610 );
1611 diag.build(&msg)
1612 .help(&format!("consider using {} instead", suggested))
1613 .emit();
1614 });
1615 }
1616 }
1617 }
1618 }
1619 }
1620 }
1621
1622 impl<'tcx> LateLintPass<'tcx> for InvalidAtomicOrdering {
check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>)1623 fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'_>) {
1624 Self::check_atomic_load_store(cx, expr);
1625 Self::check_memory_fence(cx, expr);
1626 Self::check_atomic_compare_exchange(cx, expr);
1627 }
1628 }
1629