1 //! Miscellaneous type-system utilities that are too small to deserve their own modules.
2 
3 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
4 use crate::ty::fold::TypeFolder;
5 use crate::ty::layout::IntegerExt;
6 use crate::ty::query::TyCtxtAt;
7 use crate::ty::subst::{GenericArgKind, Subst, SubstsRef};
8 use crate::ty::TyKind::*;
9 use crate::ty::{self, DebruijnIndex, DefIdTree, List, Ty, TyCtxt, TypeFoldable};
10 use rustc_apfloat::Float as _;
11 use rustc_ast as ast;
12 use rustc_attr::{self as attr, SignedInt, UnsignedInt};
13 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
14 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
15 use rustc_errors::ErrorReported;
16 use rustc_hir as hir;
17 use rustc_hir::def::DefKind;
18 use rustc_hir::def_id::DefId;
19 use rustc_macros::HashStable;
20 use rustc_query_system::ich::NodeIdHashingMode;
21 use rustc_span::DUMMY_SP;
22 use rustc_target::abi::{Integer, Size, TargetDataLayout};
23 use smallvec::SmallVec;
24 use std::{fmt, iter};
25 
26 #[derive(Copy, Clone, Debug)]
27 pub struct Discr<'tcx> {
28     /// Bit representation of the discriminant (e.g., `-128i8` is `0xFF_u128`).
29     pub val: u128,
30     pub ty: Ty<'tcx>,
31 }
32 
33 impl<'tcx> fmt::Display for Discr<'tcx> {
fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result34     fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
35         match *self.ty.kind() {
36             ty::Int(ity) => {
37                 let size = ty::tls::with(|tcx| Integer::from_int_ty(&tcx, ity).size());
38                 let x = self.val;
39                 // sign extend the raw representation to be an i128
40                 let x = size.sign_extend(x) as i128;
41                 write!(fmt, "{}", x)
42             }
43             _ => write!(fmt, "{}", self.val),
44         }
45     }
46 }
47 
int_size_and_signed<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> (Size, bool)48 fn int_size_and_signed<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> (Size, bool) {
49     let (int, signed) = match *ty.kind() {
50         Int(ity) => (Integer::from_int_ty(&tcx, ity), true),
51         Uint(uty) => (Integer::from_uint_ty(&tcx, uty), false),
52         _ => bug!("non integer discriminant"),
53     };
54     (int.size(), signed)
55 }
56 
57 impl<'tcx> Discr<'tcx> {
58     /// Adds `1` to the value and wraps around if the maximum for the type is reached.
wrap_incr(self, tcx: TyCtxt<'tcx>) -> Self59     pub fn wrap_incr(self, tcx: TyCtxt<'tcx>) -> Self {
60         self.checked_add(tcx, 1).0
61     }
checked_add(self, tcx: TyCtxt<'tcx>, n: u128) -> (Self, bool)62     pub fn checked_add(self, tcx: TyCtxt<'tcx>, n: u128) -> (Self, bool) {
63         let (size, signed) = int_size_and_signed(tcx, self.ty);
64         let (val, oflo) = if signed {
65             let min = size.signed_int_min();
66             let max = size.signed_int_max();
67             let val = size.sign_extend(self.val) as i128;
68             assert!(n < (i128::MAX as u128));
69             let n = n as i128;
70             let oflo = val > max - n;
71             let val = if oflo { min + (n - (max - val) - 1) } else { val + n };
72             // zero the upper bits
73             let val = val as u128;
74             let val = size.truncate(val);
75             (val, oflo)
76         } else {
77             let max = size.unsigned_int_max();
78             let val = self.val;
79             let oflo = val > max - n;
80             let val = if oflo { n - (max - val) - 1 } else { val + n };
81             (val, oflo)
82         };
83         (Self { val, ty: self.ty }, oflo)
84     }
85 }
86 
87 pub trait IntTypeExt {
to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>88     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>>89     fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>>;
initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx>90     fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx>;
91 }
92 
93 impl IntTypeExt for attr::IntType {
to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>94     fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
95         match *self {
96             SignedInt(ast::IntTy::I8) => tcx.types.i8,
97             SignedInt(ast::IntTy::I16) => tcx.types.i16,
98             SignedInt(ast::IntTy::I32) => tcx.types.i32,
99             SignedInt(ast::IntTy::I64) => tcx.types.i64,
100             SignedInt(ast::IntTy::I128) => tcx.types.i128,
101             SignedInt(ast::IntTy::Isize) => tcx.types.isize,
102             UnsignedInt(ast::UintTy::U8) => tcx.types.u8,
103             UnsignedInt(ast::UintTy::U16) => tcx.types.u16,
104             UnsignedInt(ast::UintTy::U32) => tcx.types.u32,
105             UnsignedInt(ast::UintTy::U64) => tcx.types.u64,
106             UnsignedInt(ast::UintTy::U128) => tcx.types.u128,
107             UnsignedInt(ast::UintTy::Usize) => tcx.types.usize,
108         }
109     }
110 
initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx>111     fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx> {
112         Discr { val: 0, ty: self.to_ty(tcx) }
113     }
114 
disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>>115     fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>> {
116         if let Some(val) = val {
117             assert_eq!(self.to_ty(tcx), val.ty);
118             let (new, oflo) = val.checked_add(tcx, 1);
119             if oflo { None } else { Some(new) }
120         } else {
121             Some(self.initial_discriminant(tcx))
122         }
123     }
124 }
125 
126 impl<'tcx> TyCtxt<'tcx> {
127     /// Creates a hash of the type `Ty` which will be the same no matter what crate
128     /// context it's calculated within. This is used by the `type_id` intrinsic.
type_id_hash(self, ty: Ty<'tcx>) -> u64129     pub fn type_id_hash(self, ty: Ty<'tcx>) -> u64 {
130         let mut hasher = StableHasher::new();
131         let mut hcx = self.create_stable_hashing_context();
132 
133         // We want the type_id be independent of the types free regions, so we
134         // erase them. The erase_regions() call will also anonymize bound
135         // regions, which is desirable too.
136         let ty = self.erase_regions(ty);
137 
138         hcx.while_hashing_spans(false, |hcx| {
139             hcx.with_node_id_hashing_mode(NodeIdHashingMode::HashDefPath, |hcx| {
140                 ty.hash_stable(hcx, &mut hasher);
141             });
142         });
143         hasher.finish()
144     }
145 
has_error_field(self, ty: Ty<'tcx>) -> bool146     pub fn has_error_field(self, ty: Ty<'tcx>) -> bool {
147         if let ty::Adt(def, substs) = *ty.kind() {
148             for field in def.all_fields() {
149                 let field_ty = field.ty(self, substs);
150                 if let Error(_) = field_ty.kind() {
151                     return true;
152                 }
153             }
154         }
155         false
156     }
157 
158     /// Attempts to returns the deeply last field of nested structures, but
159     /// does not apply any normalization in its search. Returns the same type
160     /// if input `ty` is not a structure at all.
struct_tail_without_normalization(self, ty: Ty<'tcx>) -> Ty<'tcx>161     pub fn struct_tail_without_normalization(self, ty: Ty<'tcx>) -> Ty<'tcx> {
162         let tcx = self;
163         tcx.struct_tail_with_normalize(ty, |ty| ty)
164     }
165 
166     /// Returns the deeply last field of nested structures, or the same type if
167     /// not a structure at all. Corresponds to the only possible unsized field,
168     /// and its type can be used to determine unsizing strategy.
169     ///
170     /// Should only be called if `ty` has no inference variables and does not
171     /// need its lifetimes preserved (e.g. as part of codegen); otherwise
172     /// normalization attempt may cause compiler bugs.
struct_tail_erasing_lifetimes( self, ty: Ty<'tcx>, param_env: ty::ParamEnv<'tcx>, ) -> Ty<'tcx>173     pub fn struct_tail_erasing_lifetimes(
174         self,
175         ty: Ty<'tcx>,
176         param_env: ty::ParamEnv<'tcx>,
177     ) -> Ty<'tcx> {
178         let tcx = self;
179         tcx.struct_tail_with_normalize(ty, |ty| tcx.normalize_erasing_regions(param_env, ty))
180     }
181 
182     /// Returns the deeply last field of nested structures, or the same type if
183     /// not a structure at all. Corresponds to the only possible unsized field,
184     /// and its type can be used to determine unsizing strategy.
185     ///
186     /// This is parameterized over the normalization strategy (i.e. how to
187     /// handle `<T as Trait>::Assoc` and `impl Trait`); pass the identity
188     /// function to indicate no normalization should take place.
189     ///
190     /// See also `struct_tail_erasing_lifetimes`, which is suitable for use
191     /// during codegen.
struct_tail_with_normalize( self, mut ty: Ty<'tcx>, normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>, ) -> Ty<'tcx>192     pub fn struct_tail_with_normalize(
193         self,
194         mut ty: Ty<'tcx>,
195         normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>,
196     ) -> Ty<'tcx> {
197         let recursion_limit = self.recursion_limit();
198         for iteration in 0.. {
199             if !recursion_limit.value_within_limit(iteration) {
200                 return self.ty_error_with_message(
201                     DUMMY_SP,
202                     &format!("reached the recursion limit finding the struct tail for {}", ty),
203                 );
204             }
205             match *ty.kind() {
206                 ty::Adt(def, substs) => {
207                     if !def.is_struct() {
208                         break;
209                     }
210                     match def.non_enum_variant().fields.last() {
211                         Some(f) => ty = f.ty(self, substs),
212                         None => break,
213                     }
214                 }
215 
216                 ty::Tuple(tys) if let Some((&last_ty, _)) = tys.split_last() => {
217                     ty = last_ty.expect_ty();
218                 }
219 
220                 ty::Tuple(_) => break,
221 
222                 ty::Projection(_) | ty::Opaque(..) => {
223                     let normalized = normalize(ty);
224                     if ty == normalized {
225                         return ty;
226                     } else {
227                         ty = normalized;
228                     }
229                 }
230 
231                 _ => {
232                     break;
233                 }
234             }
235         }
236         ty
237     }
238 
239     /// Same as applying `struct_tail` on `source` and `target`, but only
240     /// keeps going as long as the two types are instances of the same
241     /// structure definitions.
242     /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
243     /// whereas struct_tail produces `T`, and `Trait`, respectively.
244     ///
245     /// Should only be called if the types have no inference variables and do
246     /// not need their lifetimes preserved (e.g., as part of codegen); otherwise,
247     /// normalization attempt may cause compiler bugs.
struct_lockstep_tails_erasing_lifetimes( self, source: Ty<'tcx>, target: Ty<'tcx>, param_env: ty::ParamEnv<'tcx>, ) -> (Ty<'tcx>, Ty<'tcx>)248     pub fn struct_lockstep_tails_erasing_lifetimes(
249         self,
250         source: Ty<'tcx>,
251         target: Ty<'tcx>,
252         param_env: ty::ParamEnv<'tcx>,
253     ) -> (Ty<'tcx>, Ty<'tcx>) {
254         let tcx = self;
255         tcx.struct_lockstep_tails_with_normalize(source, target, |ty| {
256             tcx.normalize_erasing_regions(param_env, ty)
257         })
258     }
259 
260     /// Same as applying `struct_tail` on `source` and `target`, but only
261     /// keeps going as long as the two types are instances of the same
262     /// structure definitions.
263     /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
264     /// whereas struct_tail produces `T`, and `Trait`, respectively.
265     ///
266     /// See also `struct_lockstep_tails_erasing_lifetimes`, which is suitable for use
267     /// during codegen.
struct_lockstep_tails_with_normalize( self, source: Ty<'tcx>, target: Ty<'tcx>, normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>, ) -> (Ty<'tcx>, Ty<'tcx>)268     pub fn struct_lockstep_tails_with_normalize(
269         self,
270         source: Ty<'tcx>,
271         target: Ty<'tcx>,
272         normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>,
273     ) -> (Ty<'tcx>, Ty<'tcx>) {
274         let (mut a, mut b) = (source, target);
275         loop {
276             match (&a.kind(), &b.kind()) {
277                 (&Adt(a_def, a_substs), &Adt(b_def, b_substs))
278                     if a_def == b_def && a_def.is_struct() =>
279                 {
280                     if let Some(f) = a_def.non_enum_variant().fields.last() {
281                         a = f.ty(self, a_substs);
282                         b = f.ty(self, b_substs);
283                     } else {
284                         break;
285                     }
286                 }
287                 (&Tuple(a_tys), &Tuple(b_tys)) if a_tys.len() == b_tys.len() => {
288                     if let Some(a_last) = a_tys.last() {
289                         a = a_last.expect_ty();
290                         b = b_tys.last().unwrap().expect_ty();
291                     } else {
292                         break;
293                     }
294                 }
295                 (ty::Projection(_) | ty::Opaque(..), _)
296                 | (_, ty::Projection(_) | ty::Opaque(..)) => {
297                     // If either side is a projection, attempt to
298                     // progress via normalization. (Should be safe to
299                     // apply to both sides as normalization is
300                     // idempotent.)
301                     let a_norm = normalize(a);
302                     let b_norm = normalize(b);
303                     if a == a_norm && b == b_norm {
304                         break;
305                     } else {
306                         a = a_norm;
307                         b = b_norm;
308                     }
309                 }
310 
311                 _ => break,
312             }
313         }
314         (a, b)
315     }
316 
317     /// Calculate the destructor of a given type.
calculate_dtor( self, adt_did: DefId, validate: impl Fn(Self, DefId) -> Result<(), ErrorReported>, ) -> Option<ty::Destructor>318     pub fn calculate_dtor(
319         self,
320         adt_did: DefId,
321         validate: impl Fn(Self, DefId) -> Result<(), ErrorReported>,
322     ) -> Option<ty::Destructor> {
323         let drop_trait = self.lang_items().drop_trait()?;
324         self.ensure().coherent_trait(drop_trait);
325 
326         let ty = self.type_of(adt_did);
327         let (did, constness) = self.find_map_relevant_impl(drop_trait, ty, |impl_did| {
328             if let Some(item_id) = self.associated_item_def_ids(impl_did).first() {
329                 if validate(self, impl_did).is_ok() {
330                     return Some((*item_id, self.impl_constness(impl_did)));
331                 }
332             }
333             None
334         })?;
335 
336         Some(ty::Destructor { did, constness })
337     }
338 
339     /// Returns the set of types that are required to be alive in
340     /// order to run the destructor of `def` (see RFCs 769 and
341     /// 1238).
342     ///
343     /// Note that this returns only the constraints for the
344     /// destructor of `def` itself. For the destructors of the
345     /// contents, you need `adt_dtorck_constraint`.
destructor_constraints(self, def: &'tcx ty::AdtDef) -> Vec<ty::subst::GenericArg<'tcx>>346     pub fn destructor_constraints(self, def: &'tcx ty::AdtDef) -> Vec<ty::subst::GenericArg<'tcx>> {
347         let dtor = match def.destructor(self) {
348             None => {
349                 debug!("destructor_constraints({:?}) - no dtor", def.did);
350                 return vec![];
351             }
352             Some(dtor) => dtor.did,
353         };
354 
355         let impl_def_id = self.associated_item(dtor).container.id();
356         let impl_generics = self.generics_of(impl_def_id);
357 
358         // We have a destructor - all the parameters that are not
359         // pure_wrt_drop (i.e, don't have a #[may_dangle] attribute)
360         // must be live.
361 
362         // We need to return the list of parameters from the ADTs
363         // generics/substs that correspond to impure parameters on the
364         // impl's generics. This is a bit ugly, but conceptually simple:
365         //
366         // Suppose our ADT looks like the following
367         //
368         //     struct S<X, Y, Z>(X, Y, Z);
369         //
370         // and the impl is
371         //
372         //     impl<#[may_dangle] P0, P1, P2> Drop for S<P1, P2, P0>
373         //
374         // We want to return the parameters (X, Y). For that, we match
375         // up the item-substs <X, Y, Z> with the substs on the impl ADT,
376         // <P1, P2, P0>, and then look up which of the impl substs refer to
377         // parameters marked as pure.
378 
379         let impl_substs = match *self.type_of(impl_def_id).kind() {
380             ty::Adt(def_, substs) if def_ == def => substs,
381             _ => bug!(),
382         };
383 
384         let item_substs = match *self.type_of(def.did).kind() {
385             ty::Adt(def_, substs) if def_ == def => substs,
386             _ => bug!(),
387         };
388 
389         let result = iter::zip(item_substs, impl_substs)
390             .filter(|&(_, k)| {
391                 match k.unpack() {
392                     GenericArgKind::Lifetime(&ty::RegionKind::ReEarlyBound(ref ebr)) => {
393                         !impl_generics.region_param(ebr, self).pure_wrt_drop
394                     }
395                     GenericArgKind::Type(&ty::TyS { kind: ty::Param(ref pt), .. }) => {
396                         !impl_generics.type_param(pt, self).pure_wrt_drop
397                     }
398                     GenericArgKind::Const(&ty::Const {
399                         val: ty::ConstKind::Param(ref pc), ..
400                     }) => !impl_generics.const_param(pc, self).pure_wrt_drop,
401                     GenericArgKind::Lifetime(_)
402                     | GenericArgKind::Type(_)
403                     | GenericArgKind::Const(_) => {
404                         // Not a type, const or region param: this should be reported
405                         // as an error.
406                         false
407                     }
408                 }
409             })
410             .map(|(item_param, _)| item_param)
411             .collect();
412         debug!("destructor_constraint({:?}) = {:?}", def.did, result);
413         result
414     }
415 
416     /// Returns `true` if `def_id` refers to a closure (e.g., `|x| x * 2`). Note
417     /// that closures have a `DefId`, but the closure *expression* also
418     /// has a `HirId` that is located within the context where the
419     /// closure appears (and, sadly, a corresponding `NodeId`, since
420     /// those are not yet phased out). The parent of the closure's
421     /// `DefId` will also be the context where it appears.
is_closure(self, def_id: DefId) -> bool422     pub fn is_closure(self, def_id: DefId) -> bool {
423         matches!(self.def_kind(def_id), DefKind::Closure | DefKind::Generator)
424     }
425 
426     /// Returns `true` if `def_id` refers to a definition that does not have its own
427     /// type-checking context, i.e. closure, generator or inline const.
is_typeck_child(self, def_id: DefId) -> bool428     pub fn is_typeck_child(self, def_id: DefId) -> bool {
429         matches!(
430             self.def_kind(def_id),
431             DefKind::Closure | DefKind::Generator | DefKind::InlineConst
432         )
433     }
434 
435     /// Returns `true` if `def_id` refers to a trait (i.e., `trait Foo { ... }`).
is_trait(self, def_id: DefId) -> bool436     pub fn is_trait(self, def_id: DefId) -> bool {
437         self.def_kind(def_id) == DefKind::Trait
438     }
439 
440     /// Returns `true` if `def_id` refers to a trait alias (i.e., `trait Foo = ...;`),
441     /// and `false` otherwise.
is_trait_alias(self, def_id: DefId) -> bool442     pub fn is_trait_alias(self, def_id: DefId) -> bool {
443         self.def_kind(def_id) == DefKind::TraitAlias
444     }
445 
446     /// Returns `true` if this `DefId` refers to the implicit constructor for
447     /// a tuple struct like `struct Foo(u32)`, and `false` otherwise.
is_constructor(self, def_id: DefId) -> bool448     pub fn is_constructor(self, def_id: DefId) -> bool {
449         matches!(self.def_kind(def_id), DefKind::Ctor(..))
450     }
451 
452     /// Given the `DefId`, returns the `DefId` of the innermost item that
453     /// has its own type-checking context or "inference enviornment".
454     ///
455     /// For example, a closure has its own `DefId`, but it is type-checked
456     /// with the containing item. Similarly, an inline const block has its
457     /// own `DefId` but it is type-checked together with the containing item.
458     ///
459     /// Therefore, when we fetch the
460     /// `typeck` the closure, for example, we really wind up
461     /// fetching the `typeck` the enclosing fn item.
typeck_root_def_id(self, def_id: DefId) -> DefId462     pub fn typeck_root_def_id(self, def_id: DefId) -> DefId {
463         let mut def_id = def_id;
464         while self.is_typeck_child(def_id) {
465             def_id = self.parent(def_id).unwrap_or_else(|| {
466                 bug!("closure {:?} has no parent", def_id);
467             });
468         }
469         def_id
470     }
471 
472     /// Given the `DefId` and substs a closure, creates the type of
473     /// `self` argument that the closure expects. For example, for a
474     /// `Fn` closure, this would return a reference type `&T` where
475     /// `T = closure_ty`.
476     ///
477     /// Returns `None` if this closure's kind has not yet been inferred.
478     /// This should only be possible during type checking.
479     ///
480     /// Note that the return value is a late-bound region and hence
481     /// wrapped in a binder.
closure_env_ty( self, closure_def_id: DefId, closure_substs: SubstsRef<'tcx>, env_region: ty::RegionKind, ) -> Option<Ty<'tcx>>482     pub fn closure_env_ty(
483         self,
484         closure_def_id: DefId,
485         closure_substs: SubstsRef<'tcx>,
486         env_region: ty::RegionKind,
487     ) -> Option<Ty<'tcx>> {
488         let closure_ty = self.mk_closure(closure_def_id, closure_substs);
489         let closure_kind_ty = closure_substs.as_closure().kind_ty();
490         let closure_kind = closure_kind_ty.to_opt_closure_kind()?;
491         let env_ty = match closure_kind {
492             ty::ClosureKind::Fn => self.mk_imm_ref(self.mk_region(env_region), closure_ty),
493             ty::ClosureKind::FnMut => self.mk_mut_ref(self.mk_region(env_region), closure_ty),
494             ty::ClosureKind::FnOnce => closure_ty,
495         };
496         Some(env_ty)
497     }
498 
499     /// Returns `true` if the node pointed to by `def_id` is a `static` item.
is_static(self, def_id: DefId) -> bool500     pub fn is_static(self, def_id: DefId) -> bool {
501         self.static_mutability(def_id).is_some()
502     }
503 
504     /// Returns `true` if this is a `static` item with the `#[thread_local]` attribute.
is_thread_local_static(self, def_id: DefId) -> bool505     pub fn is_thread_local_static(self, def_id: DefId) -> bool {
506         self.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
507     }
508 
509     /// Returns `true` if the node pointed to by `def_id` is a mutable `static` item.
is_mutable_static(self, def_id: DefId) -> bool510     pub fn is_mutable_static(self, def_id: DefId) -> bool {
511         self.static_mutability(def_id) == Some(hir::Mutability::Mut)
512     }
513 
514     /// Get the type of the pointer to the static that we use in MIR.
static_ptr_ty(self, def_id: DefId) -> Ty<'tcx>515     pub fn static_ptr_ty(self, def_id: DefId) -> Ty<'tcx> {
516         // Make sure that any constants in the static's type are evaluated.
517         let static_ty = self.normalize_erasing_regions(ty::ParamEnv::empty(), self.type_of(def_id));
518 
519         // Make sure that accesses to unsafe statics end up using raw pointers.
520         // For thread-locals, this needs to be kept in sync with `Rvalue::ty`.
521         if self.is_mutable_static(def_id) {
522             self.mk_mut_ptr(static_ty)
523         } else if self.is_foreign_item(def_id) {
524             self.mk_imm_ptr(static_ty)
525         } else {
526             self.mk_imm_ref(self.lifetimes.re_erased, static_ty)
527         }
528     }
529 
530     /// Expands the given impl trait type, stopping if the type is recursive.
531     #[instrument(skip(self), level = "debug")]
try_expand_impl_trait_type( self, def_id: DefId, substs: SubstsRef<'tcx>, ) -> Result<Ty<'tcx>, Ty<'tcx>>532     pub fn try_expand_impl_trait_type(
533         self,
534         def_id: DefId,
535         substs: SubstsRef<'tcx>,
536     ) -> Result<Ty<'tcx>, Ty<'tcx>> {
537         let mut visitor = OpaqueTypeExpander {
538             seen_opaque_tys: FxHashSet::default(),
539             expanded_cache: FxHashMap::default(),
540             primary_def_id: Some(def_id),
541             found_recursion: false,
542             found_any_recursion: false,
543             check_recursion: true,
544             tcx: self,
545         };
546 
547         let expanded_type = visitor.expand_opaque_ty(def_id, substs).unwrap();
548         trace!(?expanded_type);
549         if visitor.found_recursion { Err(expanded_type) } else { Ok(expanded_type) }
550     }
551 }
552 
553 struct OpaqueTypeExpander<'tcx> {
554     // Contains the DefIds of the opaque types that are currently being
555     // expanded. When we expand an opaque type we insert the DefId of
556     // that type, and when we finish expanding that type we remove the
557     // its DefId.
558     seen_opaque_tys: FxHashSet<DefId>,
559     // Cache of all expansions we've seen so far. This is a critical
560     // optimization for some large types produced by async fn trees.
561     expanded_cache: FxHashMap<(DefId, SubstsRef<'tcx>), Ty<'tcx>>,
562     primary_def_id: Option<DefId>,
563     found_recursion: bool,
564     found_any_recursion: bool,
565     /// Whether or not to check for recursive opaque types.
566     /// This is `true` when we're explicitly checking for opaque type
567     /// recursion, and 'false' otherwise to avoid unnecessary work.
568     check_recursion: bool,
569     tcx: TyCtxt<'tcx>,
570 }
571 
572 impl<'tcx> OpaqueTypeExpander<'tcx> {
expand_opaque_ty(&mut self, def_id: DefId, substs: SubstsRef<'tcx>) -> Option<Ty<'tcx>>573     fn expand_opaque_ty(&mut self, def_id: DefId, substs: SubstsRef<'tcx>) -> Option<Ty<'tcx>> {
574         if self.found_any_recursion {
575             return None;
576         }
577         let substs = substs.fold_with(self);
578         if !self.check_recursion || self.seen_opaque_tys.insert(def_id) {
579             let expanded_ty = match self.expanded_cache.get(&(def_id, substs)) {
580                 Some(expanded_ty) => expanded_ty,
581                 None => {
582                     let generic_ty = self.tcx.type_of(def_id);
583                     let concrete_ty = generic_ty.subst(self.tcx, substs);
584                     let expanded_ty = self.fold_ty(concrete_ty);
585                     self.expanded_cache.insert((def_id, substs), expanded_ty);
586                     expanded_ty
587                 }
588             };
589             if self.check_recursion {
590                 self.seen_opaque_tys.remove(&def_id);
591             }
592             Some(expanded_ty)
593         } else {
594             // If another opaque type that we contain is recursive, then it
595             // will report the error, so we don't have to.
596             self.found_any_recursion = true;
597             self.found_recursion = def_id == *self.primary_def_id.as_ref().unwrap();
598             None
599         }
600     }
601 }
602 
603 impl<'tcx> TypeFolder<'tcx> for OpaqueTypeExpander<'tcx> {
tcx(&self) -> TyCtxt<'tcx>604     fn tcx(&self) -> TyCtxt<'tcx> {
605         self.tcx
606     }
607 
fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx>608     fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
609         if let ty::Opaque(def_id, substs) = t.kind {
610             self.expand_opaque_ty(def_id, substs).unwrap_or(t)
611         } else if t.has_opaque_types() {
612             t.super_fold_with(self)
613         } else {
614             t
615         }
616     }
617 }
618 
619 impl<'tcx> ty::TyS<'tcx> {
620     /// Returns the maximum value for the given numeric type (including `char`s)
621     /// or returns `None` if the type is not numeric.
numeric_max_val(&'tcx self, tcx: TyCtxt<'tcx>) -> Option<&'tcx ty::Const<'tcx>>622     pub fn numeric_max_val(&'tcx self, tcx: TyCtxt<'tcx>) -> Option<&'tcx ty::Const<'tcx>> {
623         let val = match self.kind() {
624             ty::Int(_) | ty::Uint(_) => {
625                 let (size, signed) = int_size_and_signed(tcx, self);
626                 let val =
627                     if signed { size.signed_int_max() as u128 } else { size.unsigned_int_max() };
628                 Some(val)
629             }
630             ty::Char => Some(std::char::MAX as u128),
631             ty::Float(fty) => Some(match fty {
632                 ty::FloatTy::F32 => rustc_apfloat::ieee::Single::INFINITY.to_bits(),
633                 ty::FloatTy::F64 => rustc_apfloat::ieee::Double::INFINITY.to_bits(),
634             }),
635             _ => None,
636         };
637         val.map(|v| ty::Const::from_bits(tcx, v, ty::ParamEnv::empty().and(self)))
638     }
639 
640     /// Returns the minimum value for the given numeric type (including `char`s)
641     /// or returns `None` if the type is not numeric.
numeric_min_val(&'tcx self, tcx: TyCtxt<'tcx>) -> Option<&'tcx ty::Const<'tcx>>642     pub fn numeric_min_val(&'tcx self, tcx: TyCtxt<'tcx>) -> Option<&'tcx ty::Const<'tcx>> {
643         let val = match self.kind() {
644             ty::Int(_) | ty::Uint(_) => {
645                 let (size, signed) = int_size_and_signed(tcx, self);
646                 let val = if signed { size.truncate(size.signed_int_min() as u128) } else { 0 };
647                 Some(val)
648             }
649             ty::Char => Some(0),
650             ty::Float(fty) => Some(match fty {
651                 ty::FloatTy::F32 => (-::rustc_apfloat::ieee::Single::INFINITY).to_bits(),
652                 ty::FloatTy::F64 => (-::rustc_apfloat::ieee::Double::INFINITY).to_bits(),
653             }),
654             _ => None,
655         };
656         val.map(|v| ty::Const::from_bits(tcx, v, ty::ParamEnv::empty().and(self)))
657     }
658 
659     /// Checks whether values of this type `T` are *moved* or *copied*
660     /// when referenced -- this amounts to a check for whether `T:
661     /// Copy`, but note that we **don't** consider lifetimes when
662     /// doing this check. This means that we may generate MIR which
663     /// does copies even when the type actually doesn't satisfy the
664     /// full requirements for the `Copy` trait (cc #29149) -- this
665     /// winds up being reported as an error during NLL borrow check.
is_copy_modulo_regions( &'tcx self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>, ) -> bool666     pub fn is_copy_modulo_regions(
667         &'tcx self,
668         tcx_at: TyCtxtAt<'tcx>,
669         param_env: ty::ParamEnv<'tcx>,
670     ) -> bool {
671         tcx_at.is_copy_raw(param_env.and(self))
672     }
673 
674     /// Checks whether values of this type `T` have a size known at
675     /// compile time (i.e., whether `T: Sized`). Lifetimes are ignored
676     /// for the purposes of this check, so it can be an
677     /// over-approximation in generic contexts, where one can have
678     /// strange rules like `<T as Foo<'static>>::Bar: Sized` that
679     /// actually carry lifetime requirements.
is_sized(&'tcx self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool680     pub fn is_sized(&'tcx self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
681         self.is_trivially_sized(tcx_at.tcx) || tcx_at.is_sized_raw(param_env.and(self))
682     }
683 
684     /// Checks whether values of this type `T` implement the `Freeze`
685     /// trait -- frozen types are those that do not contain an
686     /// `UnsafeCell` anywhere. This is a language concept used to
687     /// distinguish "true immutability", which is relevant to
688     /// optimization as well as the rules around static values. Note
689     /// that the `Freeze` trait is not exposed to end users and is
690     /// effectively an implementation detail.
is_freeze(&'tcx self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool691     pub fn is_freeze(&'tcx self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
692         self.is_trivially_freeze() || tcx_at.is_freeze_raw(param_env.and(self))
693     }
694 
695     /// Fast path helper for testing if a type is `Freeze`.
696     ///
697     /// Returning true means the type is known to be `Freeze`. Returning
698     /// `false` means nothing -- could be `Freeze`, might not be.
is_trivially_freeze(&self) -> bool699     fn is_trivially_freeze(&self) -> bool {
700         match self.kind() {
701             ty::Int(_)
702             | ty::Uint(_)
703             | ty::Float(_)
704             | ty::Bool
705             | ty::Char
706             | ty::Str
707             | ty::Never
708             | ty::Ref(..)
709             | ty::RawPtr(_)
710             | ty::FnDef(..)
711             | ty::Error(_)
712             | ty::FnPtr(_) => true,
713             ty::Tuple(_) => self.tuple_fields().all(Self::is_trivially_freeze),
714             ty::Slice(elem_ty) | ty::Array(elem_ty, _) => elem_ty.is_trivially_freeze(),
715             ty::Adt(..)
716             | ty::Bound(..)
717             | ty::Closure(..)
718             | ty::Dynamic(..)
719             | ty::Foreign(_)
720             | ty::Generator(..)
721             | ty::GeneratorWitness(_)
722             | ty::Infer(_)
723             | ty::Opaque(..)
724             | ty::Param(_)
725             | ty::Placeholder(_)
726             | ty::Projection(_) => false,
727         }
728     }
729 
730     /// Checks whether values of this type `T` implement the `Unpin` trait.
is_unpin(&'tcx self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool731     pub fn is_unpin(&'tcx self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
732         self.is_trivially_unpin() || tcx_at.is_unpin_raw(param_env.and(self))
733     }
734 
735     /// Fast path helper for testing if a type is `Unpin`.
736     ///
737     /// Returning true means the type is known to be `Unpin`. Returning
738     /// `false` means nothing -- could be `Unpin`, might not be.
is_trivially_unpin(&self) -> bool739     fn is_trivially_unpin(&self) -> bool {
740         match self.kind() {
741             ty::Int(_)
742             | ty::Uint(_)
743             | ty::Float(_)
744             | ty::Bool
745             | ty::Char
746             | ty::Str
747             | ty::Never
748             | ty::Ref(..)
749             | ty::RawPtr(_)
750             | ty::FnDef(..)
751             | ty::Error(_)
752             | ty::FnPtr(_) => true,
753             ty::Tuple(_) => self.tuple_fields().all(Self::is_trivially_unpin),
754             ty::Slice(elem_ty) | ty::Array(elem_ty, _) => elem_ty.is_trivially_unpin(),
755             ty::Adt(..)
756             | ty::Bound(..)
757             | ty::Closure(..)
758             | ty::Dynamic(..)
759             | ty::Foreign(_)
760             | ty::Generator(..)
761             | ty::GeneratorWitness(_)
762             | ty::Infer(_)
763             | ty::Opaque(..)
764             | ty::Param(_)
765             | ty::Placeholder(_)
766             | ty::Projection(_) => false,
767         }
768     }
769 
770     /// If `ty.needs_drop(...)` returns `true`, then `ty` is definitely
771     /// non-copy and *might* have a destructor attached; if it returns
772     /// `false`, then `ty` definitely has no destructor (i.e., no drop glue).
773     ///
774     /// (Note that this implies that if `ty` has a destructor attached,
775     /// then `needs_drop` will definitely return `true` for `ty`.)
776     ///
777     /// Note that this method is used to check eligible types in unions.
778     #[inline]
needs_drop(&'tcx self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool779     pub fn needs_drop(&'tcx self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
780         // Avoid querying in simple cases.
781         match needs_drop_components(self, &tcx.data_layout) {
782             Err(AlwaysRequiresDrop) => true,
783             Ok(components) => {
784                 let query_ty = match *components {
785                     [] => return false,
786                     // If we've got a single component, call the query with that
787                     // to increase the chance that we hit the query cache.
788                     [component_ty] => component_ty,
789                     _ => self,
790                 };
791                 // This doesn't depend on regions, so try to minimize distinct
792                 // query keys used.
793                 let erased = tcx.normalize_erasing_regions(param_env, query_ty);
794                 tcx.needs_drop_raw(param_env.and(erased))
795             }
796         }
797     }
798 
799     /// Checks if `ty` has has a significant drop.
800     ///
801     /// Note that this method can return false even if `ty` has a destructor
802     /// attached; even if that is the case then the adt has been marked with
803     /// the attribute `rustc_insignificant_dtor`.
804     ///
805     /// Note that this method is used to check for change in drop order for
806     /// 2229 drop reorder migration analysis.
807     #[inline]
has_significant_drop( &'tcx self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>, ) -> bool808     pub fn has_significant_drop(
809         &'tcx self,
810         tcx: TyCtxt<'tcx>,
811         param_env: ty::ParamEnv<'tcx>,
812     ) -> bool {
813         // Avoid querying in simple cases.
814         match needs_drop_components(self, &tcx.data_layout) {
815             Err(AlwaysRequiresDrop) => true,
816             Ok(components) => {
817                 let query_ty = match *components {
818                     [] => return false,
819                     // If we've got a single component, call the query with that
820                     // to increase the chance that we hit the query cache.
821                     [component_ty] => component_ty,
822                     _ => self,
823                 };
824 
825                 // FIXME(#86868): We should be canonicalizing, or else moving this to a method of inference
826                 // context, or *something* like that, but for now just avoid passing inference
827                 // variables to queries that can't cope with them. Instead, conservatively
828                 // return "true" (may change drop order).
829                 if query_ty.needs_infer() {
830                     return true;
831                 }
832 
833                 // This doesn't depend on regions, so try to minimize distinct
834                 // query keys used.
835                 let erased = tcx.normalize_erasing_regions(param_env, query_ty);
836                 tcx.has_significant_drop_raw(param_env.and(erased))
837             }
838         }
839     }
840 
841     /// Returns `true` if equality for this type is both reflexive and structural.
842     ///
843     /// Reflexive equality for a type is indicated by an `Eq` impl for that type.
844     ///
845     /// Primitive types (`u32`, `str`) have structural equality by definition. For composite data
846     /// types, equality for the type as a whole is structural when it is the same as equality
847     /// between all components (fields, array elements, etc.) of that type. For ADTs, structural
848     /// equality is indicated by an implementation of `PartialStructuralEq` and `StructuralEq` for
849     /// that type.
850     ///
851     /// This function is "shallow" because it may return `true` for a composite type whose fields
852     /// are not `StructuralEq`. For example, `[T; 4]` has structural equality regardless of `T`
853     /// because equality for arrays is determined by the equality of each array element. If you
854     /// want to know whether a given call to `PartialEq::eq` will proceed structurally all the way
855     /// down, you will need to use a type visitor.
856     #[inline]
is_structural_eq_shallow(&'tcx self, tcx: TyCtxt<'tcx>) -> bool857     pub fn is_structural_eq_shallow(&'tcx self, tcx: TyCtxt<'tcx>) -> bool {
858         match self.kind() {
859             // Look for an impl of both `PartialStructuralEq` and `StructuralEq`.
860             Adt(..) => tcx.has_structural_eq_impls(self),
861 
862             // Primitive types that satisfy `Eq`.
863             Bool | Char | Int(_) | Uint(_) | Str | Never => true,
864 
865             // Composite types that satisfy `Eq` when all of their fields do.
866             //
867             // Because this function is "shallow", we return `true` for these composites regardless
868             // of the type(s) contained within.
869             Ref(..) | Array(..) | Slice(_) | Tuple(..) => true,
870 
871             // Raw pointers use bitwise comparison.
872             RawPtr(_) | FnPtr(_) => true,
873 
874             // Floating point numbers are not `Eq`.
875             Float(_) => false,
876 
877             // Conservatively return `false` for all others...
878 
879             // Anonymous function types
880             FnDef(..) | Closure(..) | Dynamic(..) | Generator(..) => false,
881 
882             // Generic or inferred types
883             //
884             // FIXME(ecstaticmorse): Maybe we should `bug` here? This should probably only be
885             // called for known, fully-monomorphized types.
886             Projection(_) | Opaque(..) | Param(_) | Bound(..) | Placeholder(_) | Infer(_) => false,
887 
888             Foreign(_) | GeneratorWitness(..) | Error(_) => false,
889         }
890     }
891 
same_type(a: Ty<'tcx>, b: Ty<'tcx>) -> bool892     pub fn same_type(a: Ty<'tcx>, b: Ty<'tcx>) -> bool {
893         match (&a.kind(), &b.kind()) {
894             (&Adt(did_a, substs_a), &Adt(did_b, substs_b)) => {
895                 if did_a != did_b {
896                     return false;
897                 }
898 
899                 substs_a.types().zip(substs_b.types()).all(|(a, b)| Self::same_type(a, b))
900             }
901             _ => a == b,
902         }
903     }
904 
905     /// Peel off all reference types in this type until there are none left.
906     ///
907     /// This method is idempotent, i.e. `ty.peel_refs().peel_refs() == ty.peel_refs()`.
908     ///
909     /// # Examples
910     ///
911     /// - `u8` -> `u8`
912     /// - `&'a mut u8` -> `u8`
913     /// - `&'a &'b u8` -> `u8`
914     /// - `&'a *const &'b u8 -> *const &'b u8`
peel_refs(&'tcx self) -> Ty<'tcx>915     pub fn peel_refs(&'tcx self) -> Ty<'tcx> {
916         let mut ty = self;
917         while let Ref(_, inner_ty, _) = ty.kind() {
918             ty = inner_ty;
919         }
920         ty
921     }
922 
outer_exclusive_binder(&'tcx self) -> DebruijnIndex923     pub fn outer_exclusive_binder(&'tcx self) -> DebruijnIndex {
924         self.outer_exclusive_binder
925     }
926 }
927 
928 pub enum ExplicitSelf<'tcx> {
929     ByValue,
930     ByReference(ty::Region<'tcx>, hir::Mutability),
931     ByRawPointer(hir::Mutability),
932     ByBox,
933     Other,
934 }
935 
936 impl<'tcx> ExplicitSelf<'tcx> {
937     /// Categorizes an explicit self declaration like `self: SomeType`
938     /// into either `self`, `&self`, `&mut self`, `Box<self>`, or
939     /// `Other`.
940     /// This is mainly used to require the arbitrary_self_types feature
941     /// in the case of `Other`, to improve error messages in the common cases,
942     /// and to make `Other` non-object-safe.
943     ///
944     /// Examples:
945     ///
946     /// ```
947     /// impl<'a> Foo for &'a T {
948     ///     // Legal declarations:
949     ///     fn method1(self: &&'a T); // ExplicitSelf::ByReference
950     ///     fn method2(self: &'a T); // ExplicitSelf::ByValue
951     ///     fn method3(self: Box<&'a T>); // ExplicitSelf::ByBox
952     ///     fn method4(self: Rc<&'a T>); // ExplicitSelf::Other
953     ///
954     ///     // Invalid cases will be caught by `check_method_receiver`:
955     ///     fn method_err1(self: &'a mut T); // ExplicitSelf::Other
956     ///     fn method_err2(self: &'static T) // ExplicitSelf::ByValue
957     ///     fn method_err3(self: &&T) // ExplicitSelf::ByReference
958     /// }
959     /// ```
960     ///
determine<P>(self_arg_ty: Ty<'tcx>, is_self_ty: P) -> ExplicitSelf<'tcx> where P: Fn(Ty<'tcx>) -> bool,961     pub fn determine<P>(self_arg_ty: Ty<'tcx>, is_self_ty: P) -> ExplicitSelf<'tcx>
962     where
963         P: Fn(Ty<'tcx>) -> bool,
964     {
965         use self::ExplicitSelf::*;
966 
967         match *self_arg_ty.kind() {
968             _ if is_self_ty(self_arg_ty) => ByValue,
969             ty::Ref(region, ty, mutbl) if is_self_ty(ty) => ByReference(region, mutbl),
970             ty::RawPtr(ty::TypeAndMut { ty, mutbl }) if is_self_ty(ty) => ByRawPointer(mutbl),
971             ty::Adt(def, _) if def.is_box() && is_self_ty(self_arg_ty.boxed_ty()) => ByBox,
972             _ => Other,
973         }
974     }
975 }
976 
977 /// Returns a list of types such that the given type needs drop if and only if
978 /// *any* of the returned types need drop. Returns `Err(AlwaysRequiresDrop)` if
979 /// this type always needs drop.
needs_drop_components( ty: Ty<'tcx>, target_layout: &TargetDataLayout, ) -> Result<SmallVec<[Ty<'tcx>; 2]>, AlwaysRequiresDrop>980 pub fn needs_drop_components(
981     ty: Ty<'tcx>,
982     target_layout: &TargetDataLayout,
983 ) -> Result<SmallVec<[Ty<'tcx>; 2]>, AlwaysRequiresDrop> {
984     match ty.kind() {
985         ty::Infer(ty::FreshIntTy(_))
986         | ty::Infer(ty::FreshFloatTy(_))
987         | ty::Bool
988         | ty::Int(_)
989         | ty::Uint(_)
990         | ty::Float(_)
991         | ty::Never
992         | ty::FnDef(..)
993         | ty::FnPtr(_)
994         | ty::Char
995         | ty::GeneratorWitness(..)
996         | ty::RawPtr(_)
997         | ty::Ref(..)
998         | ty::Str => Ok(SmallVec::new()),
999 
1000         // Foreign types can never have destructors.
1001         ty::Foreign(..) => Ok(SmallVec::new()),
1002 
1003         ty::Dynamic(..) | ty::Error(_) => Err(AlwaysRequiresDrop),
1004 
1005         ty::Slice(ty) => needs_drop_components(ty, target_layout),
1006         ty::Array(elem_ty, size) => {
1007             match needs_drop_components(elem_ty, target_layout) {
1008                 Ok(v) if v.is_empty() => Ok(v),
1009                 res => match size.val.try_to_bits(target_layout.pointer_size) {
1010                     // Arrays of size zero don't need drop, even if their element
1011                     // type does.
1012                     Some(0) => Ok(SmallVec::new()),
1013                     Some(_) => res,
1014                     // We don't know which of the cases above we are in, so
1015                     // return the whole type and let the caller decide what to
1016                     // do.
1017                     None => Ok(smallvec![ty]),
1018                 },
1019             }
1020         }
1021         // If any field needs drop, then the whole tuple does.
1022         ty::Tuple(..) => ty.tuple_fields().try_fold(SmallVec::new(), move |mut acc, elem| {
1023             acc.extend(needs_drop_components(elem, target_layout)?);
1024             Ok(acc)
1025         }),
1026 
1027         // These require checking for `Copy` bounds or `Adt` destructors.
1028         ty::Adt(..)
1029         | ty::Projection(..)
1030         | ty::Param(_)
1031         | ty::Bound(..)
1032         | ty::Placeholder(..)
1033         | ty::Opaque(..)
1034         | ty::Infer(_)
1035         | ty::Closure(..)
1036         | ty::Generator(..) => Ok(smallvec![ty]),
1037     }
1038 }
1039 
1040 // Does the equivalent of
1041 // ```
1042 // let v = self.iter().map(|p| p.fold_with(folder)).collect::<SmallVec<[_; 8]>>();
1043 // folder.tcx().intern_*(&v)
1044 // ```
fold_list<'tcx, F, T>( list: &'tcx ty::List<T>, folder: &mut F, intern: impl FnOnce(TyCtxt<'tcx>, &[T]) -> &'tcx ty::List<T>, ) -> &'tcx ty::List<T> where F: TypeFolder<'tcx>, T: TypeFoldable<'tcx> + PartialEq + Copy,1045 pub fn fold_list<'tcx, F, T>(
1046     list: &'tcx ty::List<T>,
1047     folder: &mut F,
1048     intern: impl FnOnce(TyCtxt<'tcx>, &[T]) -> &'tcx ty::List<T>,
1049 ) -> &'tcx ty::List<T>
1050 where
1051     F: TypeFolder<'tcx>,
1052     T: TypeFoldable<'tcx> + PartialEq + Copy,
1053 {
1054     let mut iter = list.iter();
1055     // Look for the first element that changed
1056     if let Some((i, new_t)) = iter.by_ref().enumerate().find_map(|(i, t)| {
1057         let new_t = t.fold_with(folder);
1058         if new_t == t { None } else { Some((i, new_t)) }
1059     }) {
1060         // An element changed, prepare to intern the resulting list
1061         let mut new_list = SmallVec::<[_; 8]>::with_capacity(list.len());
1062         new_list.extend_from_slice(&list[..i]);
1063         new_list.push(new_t);
1064         new_list.extend(iter.map(|t| t.fold_with(folder)));
1065         intern(folder.tcx(), &new_list)
1066     } else {
1067         list
1068     }
1069 }
1070 
1071 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
1072 pub struct AlwaysRequiresDrop;
1073 
1074 /// Normalizes all opaque types in the given value, replacing them
1075 /// with their underlying types.
normalize_opaque_types( tcx: TyCtxt<'tcx>, val: &'tcx List<ty::Predicate<'tcx>>, ) -> &'tcx List<ty::Predicate<'tcx>>1076 pub fn normalize_opaque_types(
1077     tcx: TyCtxt<'tcx>,
1078     val: &'tcx List<ty::Predicate<'tcx>>,
1079 ) -> &'tcx List<ty::Predicate<'tcx>> {
1080     let mut visitor = OpaqueTypeExpander {
1081         seen_opaque_tys: FxHashSet::default(),
1082         expanded_cache: FxHashMap::default(),
1083         primary_def_id: None,
1084         found_recursion: false,
1085         found_any_recursion: false,
1086         check_recursion: false,
1087         tcx,
1088     };
1089     val.fold_with(&mut visitor)
1090 }
1091 
provide(providers: &mut ty::query::Providers)1092 pub fn provide(providers: &mut ty::query::Providers) {
1093     *providers = ty::query::Providers { normalize_opaque_types, ..*providers }
1094 }
1095