1 use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
2 use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
3 use crate::ty::subst::Subst;
4 use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
5 use rustc_ast as ast;
6 use rustc_attr as attr;
7 use rustc_hir as hir;
8 use rustc_hir::lang_items::LangItem;
9 use rustc_index::bit_set::BitSet;
10 use rustc_index::vec::{Idx, IndexVec};
11 use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
12 use rustc_span::symbol::{Ident, Symbol};
13 use rustc_span::{Span, DUMMY_SP};
14 use rustc_target::abi::call::{
15 ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
16 };
17 use rustc_target::abi::*;
18 use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
19
20 use std::cmp;
21 use std::fmt;
22 use std::iter;
23 use std::num::NonZeroUsize;
24 use std::ops::Bound;
25
26 use rand::{seq::SliceRandom, SeedableRng};
27 use rand_xoshiro::Xoshiro128StarStar;
28
provide(providers: &mut ty::query::Providers)29 pub fn provide(providers: &mut ty::query::Providers) {
30 *providers =
31 ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
32 }
33
34 pub trait IntegerExt {
to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>35 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer36 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer37 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer38 fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
repr_discr<'tcx>( tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, repr: &ReprOptions, min: i128, max: i128, ) -> (Integer, bool)39 fn repr_discr<'tcx>(
40 tcx: TyCtxt<'tcx>,
41 ty: Ty<'tcx>,
42 repr: &ReprOptions,
43 min: i128,
44 max: i128,
45 ) -> (Integer, bool);
46 }
47
48 impl IntegerExt for Integer {
49 #[inline]
to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>50 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
51 match (*self, signed) {
52 (I8, false) => tcx.types.u8,
53 (I16, false) => tcx.types.u16,
54 (I32, false) => tcx.types.u32,
55 (I64, false) => tcx.types.u64,
56 (I128, false) => tcx.types.u128,
57 (I8, true) => tcx.types.i8,
58 (I16, true) => tcx.types.i16,
59 (I32, true) => tcx.types.i32,
60 (I64, true) => tcx.types.i64,
61 (I128, true) => tcx.types.i128,
62 }
63 }
64
65 /// Gets the Integer type from an attr::IntType.
from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer66 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
67 let dl = cx.data_layout();
68
69 match ity {
70 attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
71 attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
72 attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
73 attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
74 attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
75 attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
76 dl.ptr_sized_integer()
77 }
78 }
79 }
80
from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer81 fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
82 match ity {
83 ty::IntTy::I8 => I8,
84 ty::IntTy::I16 => I16,
85 ty::IntTy::I32 => I32,
86 ty::IntTy::I64 => I64,
87 ty::IntTy::I128 => I128,
88 ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
89 }
90 }
from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer91 fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
92 match ity {
93 ty::UintTy::U8 => I8,
94 ty::UintTy::U16 => I16,
95 ty::UintTy::U32 => I32,
96 ty::UintTy::U64 => I64,
97 ty::UintTy::U128 => I128,
98 ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
99 }
100 }
101
102 /// Finds the appropriate Integer type and signedness for the given
103 /// signed discriminant range and `#[repr]` attribute.
104 /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
105 /// that shouldn't affect anything, other than maybe debuginfo.
repr_discr<'tcx>( tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, repr: &ReprOptions, min: i128, max: i128, ) -> (Integer, bool)106 fn repr_discr<'tcx>(
107 tcx: TyCtxt<'tcx>,
108 ty: Ty<'tcx>,
109 repr: &ReprOptions,
110 min: i128,
111 max: i128,
112 ) -> (Integer, bool) {
113 // Theoretically, negative values could be larger in unsigned representation
114 // than the unsigned representation of the signed minimum. However, if there
115 // are any negative values, the only valid unsigned representation is u128
116 // which can fit all i128 values, so the result remains unaffected.
117 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
118 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
119
120 if let Some(ity) = repr.int {
121 let discr = Integer::from_attr(&tcx, ity);
122 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
123 if discr < fit {
124 bug!(
125 "Integer::repr_discr: `#[repr]` hint too small for \
126 discriminant range of enum `{}",
127 ty
128 )
129 }
130 return (discr, ity.is_signed());
131 }
132
133 let at_least = if repr.c() {
134 // This is usually I32, however it can be different on some platforms,
135 // notably hexagon and arm-none/thumb-none
136 tcx.data_layout().c_enum_min_size
137 } else {
138 // repr(Rust) enums try to be as small as possible
139 I8
140 };
141
142 // If there are no negative values, we can use the unsigned fit.
143 if min >= 0 {
144 (cmp::max(unsigned_fit, at_least), false)
145 } else {
146 (cmp::max(signed_fit, at_least), true)
147 }
148 }
149 }
150
151 pub trait PrimitiveExt {
to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>152 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>153 fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
154 }
155
156 impl PrimitiveExt for Primitive {
157 #[inline]
to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>158 fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
159 match *self {
160 Int(i, signed) => i.to_ty(tcx, signed),
161 F32 => tcx.types.f32,
162 F64 => tcx.types.f64,
163 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
164 }
165 }
166
167 /// Return an *integer* type matching this primitive.
168 /// Useful in particular when dealing with enum discriminants.
169 #[inline]
to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>170 fn to_int_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
171 match *self {
172 Int(i, signed) => i.to_ty(tcx, signed),
173 Pointer => tcx.types.usize,
174 F32 | F64 => bug!("floats do not have an int type"),
175 }
176 }
177 }
178
179 /// The first half of a fat pointer.
180 ///
181 /// - For a trait object, this is the address of the box.
182 /// - For a slice, this is the base address.
183 pub const FAT_PTR_ADDR: usize = 0;
184
185 /// The second half of a fat pointer.
186 ///
187 /// - For a trait object, this is the address of the vtable.
188 /// - For a slice, this is the length.
189 pub const FAT_PTR_EXTRA: usize = 1;
190
191 /// The maximum supported number of lanes in a SIMD vector.
192 ///
193 /// This value is selected based on backend support:
194 /// * LLVM does not appear to have a vector width limit.
195 /// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
196 pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
197
198 #[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
199 pub enum LayoutError<'tcx> {
200 Unknown(Ty<'tcx>),
201 SizeOverflow(Ty<'tcx>),
202 }
203
204 impl<'tcx> fmt::Display for LayoutError<'tcx> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result205 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
206 match *self {
207 LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
208 LayoutError::SizeOverflow(ty) => {
209 write!(f, "values of the type `{}` are too big for the current architecture", ty)
210 }
211 }
212 }
213 }
214
layout_of<'tcx>( tcx: TyCtxt<'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>, ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>>215 fn layout_of<'tcx>(
216 tcx: TyCtxt<'tcx>,
217 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
218 ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
219 ty::tls::with_related_context(tcx, move |icx| {
220 let (param_env, ty) = query.into_parts();
221
222 if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
223 tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
224 }
225
226 // Update the ImplicitCtxt to increase the layout_depth
227 let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
228
229 ty::tls::enter_context(&icx, |_| {
230 let param_env = param_env.with_reveal_all_normalized(tcx);
231 let unnormalized_ty = ty;
232 let ty = tcx.normalize_erasing_regions(param_env, ty);
233 if ty != unnormalized_ty {
234 // Ensure this layout is also cached for the normalized type.
235 return tcx.layout_of(param_env.and(ty));
236 }
237
238 let cx = LayoutCx { tcx, param_env };
239
240 let layout = cx.layout_of_uncached(ty)?;
241 let layout = TyAndLayout { ty, layout };
242
243 cx.record_layout_for_printing(layout);
244
245 // Type-level uninhabitedness should always imply ABI uninhabitedness.
246 if tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
247 assert!(layout.abi.is_uninhabited());
248 }
249
250 Ok(layout)
251 })
252 })
253 }
254
255 pub struct LayoutCx<'tcx, C> {
256 pub tcx: C,
257 pub param_env: ty::ParamEnv<'tcx>,
258 }
259
260 #[derive(Copy, Clone, Debug)]
261 enum StructKind {
262 /// A tuple, closure, or univariant which cannot be coerced to unsized.
263 AlwaysSized,
264 /// A univariant, the last field of which may be coerced to unsized.
265 MaybeUnsized,
266 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
267 Prefixed(Size, Align),
268 }
269
270 // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
271 // This is used to go between `memory_index` (source field order to memory order)
272 // and `inverse_memory_index` (memory order to source field order).
273 // See also `FieldsShape::Arbitrary::memory_index` for more details.
274 // FIXME(eddyb) build a better abstraction for permutations, if possible.
invert_mapping(map: &[u32]) -> Vec<u32>275 fn invert_mapping(map: &[u32]) -> Vec<u32> {
276 let mut inverse = vec![0; map.len()];
277 for i in 0..map.len() {
278 inverse[map[i] as usize] = i as u32;
279 }
280 inverse
281 }
282
283 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
scalar_pair(&self, a: Scalar, b: Scalar) -> Layout284 fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
285 let dl = self.data_layout();
286 let b_align = b.value.align(dl);
287 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
288 let b_offset = a.value.size(dl).align_to(b_align.abi);
289 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
290
291 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
292 // returns the last maximum.
293 let largest_niche = Niche::from_scalar(dl, b_offset, b)
294 .into_iter()
295 .chain(Niche::from_scalar(dl, Size::ZERO, a))
296 .max_by_key(|niche| niche.available(dl));
297
298 Layout {
299 variants: Variants::Single { index: VariantIdx::new(0) },
300 fields: FieldsShape::Arbitrary {
301 offsets: vec![Size::ZERO, b_offset],
302 memory_index: vec![0, 1],
303 },
304 abi: Abi::ScalarPair(a, b),
305 largest_niche,
306 align,
307 size,
308 }
309 }
310
univariant_uninterned( &self, ty: Ty<'tcx>, fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind: StructKind, ) -> Result<Layout, LayoutError<'tcx>>311 fn univariant_uninterned(
312 &self,
313 ty: Ty<'tcx>,
314 fields: &[TyAndLayout<'_>],
315 repr: &ReprOptions,
316 kind: StructKind,
317 ) -> Result<Layout, LayoutError<'tcx>> {
318 let dl = self.data_layout();
319 let pack = repr.pack;
320 if pack.is_some() && repr.align.is_some() {
321 self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
322 return Err(LayoutError::Unknown(ty));
323 }
324
325 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
326
327 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
328
329 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
330 // randomize field ordering with
331 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
332
333 let optimize = !repr.inhibit_struct_field_reordering_opt();
334 if optimize {
335 let end =
336 if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
337 let optimizing = &mut inverse_memory_index[..end];
338 let field_align = |f: &TyAndLayout<'_>| {
339 if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
340 };
341
342 // If `-Z randomize-layout` was enabled for the type definition we can shuffle
343 // the field ordering to try and catch some code making assumptions about layouts
344 // we don't guarantee
345 if repr.can_randomize_type_layout() {
346 // Shuffle the ordering of the fields
347 optimizing.shuffle(&mut rng);
348
349 // Otherwise we just leave things alone and actually optimize the type's fields
350 } else {
351 match kind {
352 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
353 optimizing.sort_by_key(|&x| {
354 // Place ZSTs first to avoid "interesting offsets",
355 // especially with only one or two non-ZST fields.
356 let f = &fields[x as usize];
357 (!f.is_zst(), cmp::Reverse(field_align(f)))
358 });
359 }
360
361 StructKind::Prefixed(..) => {
362 // Sort in ascending alignment so that the layout stays optimal
363 // regardless of the prefix
364 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
365 }
366 }
367
368 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
369 // regardless of the status of `-Z randomize-layout`
370 }
371 }
372
373 // inverse_memory_index holds field indices by increasing memory offset.
374 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
375 // We now write field offsets to the corresponding offset slot;
376 // field 5 with offset 0 puts 0 in offsets[5].
377 // At the bottom of this function, we invert `inverse_memory_index` to
378 // produce `memory_index` (see `invert_mapping`).
379
380 let mut sized = true;
381 let mut offsets = vec![Size::ZERO; fields.len()];
382 let mut offset = Size::ZERO;
383 let mut largest_niche = None;
384 let mut largest_niche_available = 0;
385
386 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
387 let prefix_align =
388 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
389 align = align.max(AbiAndPrefAlign::new(prefix_align));
390 offset = prefix_size.align_to(prefix_align);
391 }
392
393 for &i in &inverse_memory_index {
394 let field = fields[i as usize];
395 if !sized {
396 self.tcx.sess.delay_span_bug(
397 DUMMY_SP,
398 &format!(
399 "univariant: field #{} of `{}` comes after unsized field",
400 offsets.len(),
401 ty
402 ),
403 );
404 }
405
406 if field.is_unsized() {
407 sized = false;
408 }
409
410 // Invariant: offset < dl.obj_size_bound() <= 1<<61
411 let field_align = if let Some(pack) = pack {
412 field.align.min(AbiAndPrefAlign::new(pack))
413 } else {
414 field.align
415 };
416 offset = offset.align_to(field_align.abi);
417 align = align.max(field_align);
418
419 debug!("univariant offset: {:?} field: {:#?}", offset, field);
420 offsets[i as usize] = offset;
421
422 if !repr.hide_niche() {
423 if let Some(mut niche) = field.largest_niche {
424 let available = niche.available(dl);
425 if available > largest_niche_available {
426 largest_niche_available = available;
427 niche.offset += offset;
428 largest_niche = Some(niche);
429 }
430 }
431 }
432
433 offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
434 }
435
436 if let Some(repr_align) = repr.align {
437 align = align.max(AbiAndPrefAlign::new(repr_align));
438 }
439
440 debug!("univariant min_size: {:?}", offset);
441 let min_size = offset;
442
443 // As stated above, inverse_memory_index holds field indices by increasing offset.
444 // This makes it an already-sorted view of the offsets vec.
445 // To invert it, consider:
446 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
447 // Field 5 would be the first element, so memory_index is i:
448 // Note: if we didn't optimize, it's already right.
449
450 let memory_index =
451 if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
452
453 let size = min_size.align_to(align.abi);
454 let mut abi = Abi::Aggregate { sized };
455
456 // Unpack newtype ABIs and find scalar pairs.
457 if sized && size.bytes() > 0 {
458 // All other fields must be ZSTs.
459 let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
460
461 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
462 // We have exactly one non-ZST field.
463 (Some((i, field)), None, None) => {
464 // Field fills the struct and it has a scalar or scalar pair ABI.
465 if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
466 {
467 match field.abi {
468 // For plain scalars, or vectors of them, we can't unpack
469 // newtypes for `#[repr(C)]`, as that affects C ABIs.
470 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
471 abi = field.abi;
472 }
473 // But scalar pairs are Rust-specific and get
474 // treated as aggregates by C ABIs anyway.
475 Abi::ScalarPair(..) => {
476 abi = field.abi;
477 }
478 _ => {}
479 }
480 }
481 }
482
483 // Two non-ZST fields, and they're both scalars.
484 (
485 Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(a), .. }, .. })),
486 Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(b), .. }, .. })),
487 None,
488 ) => {
489 // Order by the memory placement, not source order.
490 let ((i, a), (j, b)) =
491 if offsets[i] < offsets[j] { ((i, a), (j, b)) } else { ((j, b), (i, a)) };
492 let pair = self.scalar_pair(a, b);
493 let pair_offsets = match pair.fields {
494 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
495 assert_eq!(memory_index, &[0, 1]);
496 offsets
497 }
498 _ => bug!(),
499 };
500 if offsets[i] == pair_offsets[0]
501 && offsets[j] == pair_offsets[1]
502 && align == pair.align
503 && size == pair.size
504 {
505 // We can use `ScalarPair` only when it matches our
506 // already computed layout (including `#[repr(C)]`).
507 abi = pair.abi;
508 }
509 }
510
511 _ => {}
512 }
513 }
514
515 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
516 abi = Abi::Uninhabited;
517 }
518
519 Ok(Layout {
520 variants: Variants::Single { index: VariantIdx::new(0) },
521 fields: FieldsShape::Arbitrary { offsets, memory_index },
522 abi,
523 largest_niche,
524 align,
525 size,
526 })
527 }
528
layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>>529 fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
530 let tcx = self.tcx;
531 let param_env = self.param_env;
532 let dl = self.data_layout();
533 let scalar_unit = |value: Primitive| {
534 let size = value.size(dl);
535 assert!(size.bits() <= 128);
536 Scalar { value, valid_range: WrappingRange { start: 0, end: size.unsigned_int_max() } }
537 };
538 let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
539
540 let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
541 Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
542 };
543 debug_assert!(!ty.has_infer_types_or_consts());
544
545 Ok(match *ty.kind() {
546 // Basic scalars.
547 ty::Bool => tcx.intern_layout(Layout::scalar(
548 self,
549 Scalar { value: Int(I8, false), valid_range: WrappingRange { start: 0, end: 1 } },
550 )),
551 ty::Char => tcx.intern_layout(Layout::scalar(
552 self,
553 Scalar {
554 value: Int(I32, false),
555 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
556 },
557 )),
558 ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
559 ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
560 ty::Float(fty) => scalar(match fty {
561 ty::FloatTy::F32 => F32,
562 ty::FloatTy::F64 => F64,
563 }),
564 ty::FnPtr(_) => {
565 let mut ptr = scalar_unit(Pointer);
566 ptr.valid_range = ptr.valid_range.with_start(1);
567 tcx.intern_layout(Layout::scalar(self, ptr))
568 }
569
570 // The never type.
571 ty::Never => tcx.intern_layout(Layout {
572 variants: Variants::Single { index: VariantIdx::new(0) },
573 fields: FieldsShape::Primitive,
574 abi: Abi::Uninhabited,
575 largest_niche: None,
576 align: dl.i8_align,
577 size: Size::ZERO,
578 }),
579
580 // Potentially-wide pointers.
581 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
582 let mut data_ptr = scalar_unit(Pointer);
583 if !ty.is_unsafe_ptr() {
584 data_ptr.valid_range = data_ptr.valid_range.with_start(1);
585 }
586
587 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
588 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
589 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
590 }
591
592 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
593 let metadata = match unsized_part.kind() {
594 ty::Foreign(..) => {
595 return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
596 }
597 ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
598 ty::Dynamic(..) => {
599 let mut vtable = scalar_unit(Pointer);
600 vtable.valid_range = vtable.valid_range.with_start(1);
601 vtable
602 }
603 _ => return Err(LayoutError::Unknown(unsized_part)),
604 };
605
606 // Effectively a (ptr, meta) tuple.
607 tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
608 }
609
610 // Arrays and slices.
611 ty::Array(element, mut count) => {
612 if count.has_projections() {
613 count = tcx.normalize_erasing_regions(param_env, count);
614 if count.has_projections() {
615 return Err(LayoutError::Unknown(ty));
616 }
617 }
618
619 let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
620 let element = self.layout_of(element)?;
621 let size =
622 element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
623
624 let abi =
625 if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
626 Abi::Uninhabited
627 } else {
628 Abi::Aggregate { sized: true }
629 };
630
631 let largest_niche = if count != 0 { element.largest_niche } else { None };
632
633 tcx.intern_layout(Layout {
634 variants: Variants::Single { index: VariantIdx::new(0) },
635 fields: FieldsShape::Array { stride: element.size, count },
636 abi,
637 largest_niche,
638 align: element.align,
639 size,
640 })
641 }
642 ty::Slice(element) => {
643 let element = self.layout_of(element)?;
644 tcx.intern_layout(Layout {
645 variants: Variants::Single { index: VariantIdx::new(0) },
646 fields: FieldsShape::Array { stride: element.size, count: 0 },
647 abi: Abi::Aggregate { sized: false },
648 largest_niche: None,
649 align: element.align,
650 size: Size::ZERO,
651 })
652 }
653 ty::Str => tcx.intern_layout(Layout {
654 variants: Variants::Single { index: VariantIdx::new(0) },
655 fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
656 abi: Abi::Aggregate { sized: false },
657 largest_niche: None,
658 align: dl.i8_align,
659 size: Size::ZERO,
660 }),
661
662 // Odd unit types.
663 ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
664 ty::Dynamic(..) | ty::Foreign(..) => {
665 let mut unit = self.univariant_uninterned(
666 ty,
667 &[],
668 &ReprOptions::default(),
669 StructKind::AlwaysSized,
670 )?;
671 match unit.abi {
672 Abi::Aggregate { ref mut sized } => *sized = false,
673 _ => bug!(),
674 }
675 tcx.intern_layout(unit)
676 }
677
678 ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
679
680 ty::Closure(_, ref substs) => {
681 let tys = substs.as_closure().upvar_tys();
682 univariant(
683 &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
684 &ReprOptions::default(),
685 StructKind::AlwaysSized,
686 )?
687 }
688
689 ty::Tuple(tys) => {
690 let kind =
691 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
692
693 univariant(
694 &tys.iter()
695 .map(|k| self.layout_of(k.expect_ty()))
696 .collect::<Result<Vec<_>, _>>()?,
697 &ReprOptions::default(),
698 kind,
699 )?
700 }
701
702 // SIMD vector types.
703 ty::Adt(def, substs) if def.repr.simd() => {
704 if !def.is_struct() {
705 // Should have yielded E0517 by now.
706 tcx.sess.delay_span_bug(
707 DUMMY_SP,
708 "#[repr(simd)] was applied to an ADT that is not a struct",
709 );
710 return Err(LayoutError::Unknown(ty));
711 }
712
713 // Supported SIMD vectors are homogeneous ADTs with at least one field:
714 //
715 // * #[repr(simd)] struct S(T, T, T, T);
716 // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
717 // * #[repr(simd)] struct S([T; 4])
718 //
719 // where T is a primitive scalar (integer/float/pointer).
720
721 // SIMD vectors with zero fields are not supported.
722 // (should be caught by typeck)
723 if def.non_enum_variant().fields.is_empty() {
724 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
725 }
726
727 // Type of the first ADT field:
728 let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
729
730 // Heterogeneous SIMD vectors are not supported:
731 // (should be caught by typeck)
732 for fi in &def.non_enum_variant().fields {
733 if fi.ty(tcx, substs) != f0_ty {
734 tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
735 }
736 }
737
738 // The element type and number of elements of the SIMD vector
739 // are obtained from:
740 //
741 // * the element type and length of the single array field, if
742 // the first field is of array type, or
743 //
744 // * the homogenous field type and the number of fields.
745 let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
746 // First ADT field is an array:
747
748 // SIMD vectors with multiple array fields are not supported:
749 // (should be caught by typeck)
750 if def.non_enum_variant().fields.len() != 1 {
751 tcx.sess.fatal(&format!(
752 "monomorphising SIMD type `{}` with more than one array field",
753 ty
754 ));
755 }
756
757 // Extract the number of elements from the layout of the array field:
758 let Ok(TyAndLayout {
759 layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
760 ..
761 }) = self.layout_of(f0_ty) else {
762 return Err(LayoutError::Unknown(ty));
763 };
764
765 (*e_ty, *count, true)
766 } else {
767 // First ADT field is not an array:
768 (f0_ty, def.non_enum_variant().fields.len() as _, false)
769 };
770
771 // SIMD vectors of zero length are not supported.
772 // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
773 // support.
774 //
775 // Can't be caught in typeck if the array length is generic.
776 if e_len == 0 {
777 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
778 } else if e_len > MAX_SIMD_LANES {
779 tcx.sess.fatal(&format!(
780 "monomorphising SIMD type `{}` of length greater than {}",
781 ty, MAX_SIMD_LANES,
782 ));
783 }
784
785 // Compute the ABI of the element type:
786 let e_ly = self.layout_of(e_ty)?;
787 let Abi::Scalar(e_abi) = e_ly.abi else {
788 // This error isn't caught in typeck, e.g., if
789 // the element type of the vector is generic.
790 tcx.sess.fatal(&format!(
791 "monomorphising SIMD type `{}` with a non-primitive-scalar \
792 (integer/float/pointer) element type `{}`",
793 ty, e_ty
794 ))
795 };
796
797 // Compute the size and alignment of the vector:
798 let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
799 let align = dl.vector_align(size);
800 let size = size.align_to(align.abi);
801
802 // Compute the placement of the vector fields:
803 let fields = if is_array {
804 FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
805 } else {
806 FieldsShape::Array { stride: e_ly.size, count: e_len }
807 };
808
809 tcx.intern_layout(Layout {
810 variants: Variants::Single { index: VariantIdx::new(0) },
811 fields,
812 abi: Abi::Vector { element: e_abi, count: e_len },
813 largest_niche: e_ly.largest_niche,
814 size,
815 align,
816 })
817 }
818
819 // ADTs.
820 ty::Adt(def, substs) => {
821 // Cache the field layouts.
822 let variants = def
823 .variants
824 .iter()
825 .map(|v| {
826 v.fields
827 .iter()
828 .map(|field| self.layout_of(field.ty(tcx, substs)))
829 .collect::<Result<Vec<_>, _>>()
830 })
831 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
832
833 if def.is_union() {
834 if def.repr.pack.is_some() && def.repr.align.is_some() {
835 self.tcx.sess.delay_span_bug(
836 tcx.def_span(def.did),
837 "union cannot be packed and aligned",
838 );
839 return Err(LayoutError::Unknown(ty));
840 }
841
842 let mut align =
843 if def.repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
844
845 if let Some(repr_align) = def.repr.align {
846 align = align.max(AbiAndPrefAlign::new(repr_align));
847 }
848
849 let optimize = !def.repr.inhibit_union_abi_opt();
850 let mut size = Size::ZERO;
851 let mut abi = Abi::Aggregate { sized: true };
852 let index = VariantIdx::new(0);
853 for field in &variants[index] {
854 assert!(!field.is_unsized());
855 align = align.max(field.align);
856
857 // If all non-ZST fields have the same ABI, forward this ABI
858 if optimize && !field.is_zst() {
859 // Normalize scalar_unit to the maximal valid range
860 let field_abi = match field.abi {
861 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
862 Abi::ScalarPair(x, y) => {
863 Abi::ScalarPair(scalar_unit(x.value), scalar_unit(y.value))
864 }
865 Abi::Vector { element: x, count } => {
866 Abi::Vector { element: scalar_unit(x.value), count }
867 }
868 Abi::Uninhabited | Abi::Aggregate { .. } => {
869 Abi::Aggregate { sized: true }
870 }
871 };
872
873 if size == Size::ZERO {
874 // first non ZST: initialize 'abi'
875 abi = field_abi;
876 } else if abi != field_abi {
877 // different fields have different ABI: reset to Aggregate
878 abi = Abi::Aggregate { sized: true };
879 }
880 }
881
882 size = cmp::max(size, field.size);
883 }
884
885 if let Some(pack) = def.repr.pack {
886 align = align.min(AbiAndPrefAlign::new(pack));
887 }
888
889 return Ok(tcx.intern_layout(Layout {
890 variants: Variants::Single { index },
891 fields: FieldsShape::Union(
892 NonZeroUsize::new(variants[index].len())
893 .ok_or(LayoutError::Unknown(ty))?,
894 ),
895 abi,
896 largest_niche: None,
897 align,
898 size: size.align_to(align.abi),
899 }));
900 }
901
902 // A variant is absent if it's uninhabited and only has ZST fields.
903 // Present uninhabited variants only require space for their fields,
904 // but *not* an encoding of the discriminant (e.g., a tag value).
905 // See issue #49298 for more details on the need to leave space
906 // for non-ZST uninhabited data (mostly partial initialization).
907 let absent = |fields: &[TyAndLayout<'_>]| {
908 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
909 let is_zst = fields.iter().all(|f| f.is_zst());
910 uninhabited && is_zst
911 };
912 let (present_first, present_second) = {
913 let mut present_variants = variants
914 .iter_enumerated()
915 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
916 (present_variants.next(), present_variants.next())
917 };
918 let present_first = match present_first {
919 Some(present_first) => present_first,
920 // Uninhabited because it has no variants, or only absent ones.
921 None if def.is_enum() => {
922 return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
923 }
924 // If it's a struct, still compute a layout so that we can still compute the
925 // field offsets.
926 None => VariantIdx::new(0),
927 };
928
929 let is_struct = !def.is_enum() ||
930 // Only one variant is present.
931 (present_second.is_none() &&
932 // Representation optimizations are allowed.
933 !def.repr.inhibit_enum_layout_opt());
934 if is_struct {
935 // Struct, or univariant enum equivalent to a struct.
936 // (Typechecking will reject discriminant-sizing attrs.)
937
938 let v = present_first;
939 let kind = if def.is_enum() || variants[v].is_empty() {
940 StructKind::AlwaysSized
941 } else {
942 let param_env = tcx.param_env(def.did);
943 let last_field = def.variants[v].fields.last().unwrap();
944 let always_sized =
945 tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
946 if !always_sized {
947 StructKind::MaybeUnsized
948 } else {
949 StructKind::AlwaysSized
950 }
951 };
952
953 let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr, kind)?;
954 st.variants = Variants::Single { index: v };
955 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
956 match st.abi {
957 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
958 // the asserts ensure that we are not using the
959 // `#[rustc_layout_scalar_valid_range(n)]`
960 // attribute to widen the range of anything as that would probably
961 // result in UB somewhere
962 // FIXME(eddyb) the asserts are probably not needed,
963 // as larger validity ranges would result in missed
964 // optimizations, *not* wrongly assuming the inner
965 // value is valid. e.g. unions enlarge validity ranges,
966 // because the values may be uninitialized.
967 if let Bound::Included(start) = start {
968 // FIXME(eddyb) this might be incorrect - it doesn't
969 // account for wrap-around (end < start) ranges.
970 assert!(scalar.valid_range.start <= start);
971 scalar.valid_range.start = start;
972 }
973 if let Bound::Included(end) = end {
974 // FIXME(eddyb) this might be incorrect - it doesn't
975 // account for wrap-around (end < start) ranges.
976 assert!(scalar.valid_range.end >= end);
977 scalar.valid_range.end = end;
978 }
979
980 // Update `largest_niche` if we have introduced a larger niche.
981 let niche = if def.repr.hide_niche() {
982 None
983 } else {
984 Niche::from_scalar(dl, Size::ZERO, *scalar)
985 };
986 if let Some(niche) = niche {
987 match st.largest_niche {
988 Some(largest_niche) => {
989 // Replace the existing niche even if they're equal,
990 // because this one is at a lower offset.
991 if largest_niche.available(dl) <= niche.available(dl) {
992 st.largest_niche = Some(niche);
993 }
994 }
995 None => st.largest_niche = Some(niche),
996 }
997 }
998 }
999 _ => assert!(
1000 start == Bound::Unbounded && end == Bound::Unbounded,
1001 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
1002 def,
1003 st,
1004 ),
1005 }
1006
1007 return Ok(tcx.intern_layout(st));
1008 }
1009
1010 // At this point, we have handled all unions and
1011 // structs. (We have also handled univariant enums
1012 // that allow representation optimization.)
1013 assert!(def.is_enum());
1014
1015 // The current code for niche-filling relies on variant indices
1016 // instead of actual discriminants, so dataful enums with
1017 // explicit discriminants (RFC #2363) would misbehave.
1018 let no_explicit_discriminants = def
1019 .variants
1020 .iter_enumerated()
1021 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
1022
1023 let mut niche_filling_layout = None;
1024
1025 // Niche-filling enum optimization.
1026 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
1027 let mut dataful_variant = None;
1028 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
1029
1030 // Find one non-ZST variant.
1031 'variants: for (v, fields) in variants.iter_enumerated() {
1032 if absent(fields) {
1033 continue 'variants;
1034 }
1035 for f in fields {
1036 if !f.is_zst() {
1037 if dataful_variant.is_none() {
1038 dataful_variant = Some(v);
1039 continue 'variants;
1040 } else {
1041 dataful_variant = None;
1042 break 'variants;
1043 }
1044 }
1045 }
1046 niche_variants = *niche_variants.start().min(&v)..=v;
1047 }
1048
1049 if niche_variants.start() > niche_variants.end() {
1050 dataful_variant = None;
1051 }
1052
1053 if let Some(i) = dataful_variant {
1054 let count = (niche_variants.end().as_u32()
1055 - niche_variants.start().as_u32()
1056 + 1) as u128;
1057
1058 // Find the field with the largest niche
1059 let niche_candidate = variants[i]
1060 .iter()
1061 .enumerate()
1062 .filter_map(|(j, field)| Some((j, field.largest_niche?)))
1063 .max_by_key(|(_, niche)| niche.available(dl));
1064
1065 if let Some((field_index, niche, (niche_start, niche_scalar))) =
1066 niche_candidate.and_then(|(field_index, niche)| {
1067 Some((field_index, niche, niche.reserve(self, count)?))
1068 })
1069 {
1070 let mut align = dl.aggregate_align;
1071 let st = variants
1072 .iter_enumerated()
1073 .map(|(j, v)| {
1074 let mut st = self.univariant_uninterned(
1075 ty,
1076 v,
1077 &def.repr,
1078 StructKind::AlwaysSized,
1079 )?;
1080 st.variants = Variants::Single { index: j };
1081
1082 align = align.max(st.align);
1083
1084 Ok(st)
1085 })
1086 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1087
1088 let offset = st[i].fields.offset(field_index) + niche.offset;
1089 let size = st[i].size;
1090
1091 let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
1092 Abi::Uninhabited
1093 } else {
1094 match st[i].abi {
1095 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
1096 Abi::ScalarPair(first, second) => {
1097 // We need to use scalar_unit to reset the
1098 // valid range to the maximal one for that
1099 // primitive, because only the niche is
1100 // guaranteed to be initialised, not the
1101 // other primitive.
1102 if offset.bytes() == 0 {
1103 Abi::ScalarPair(niche_scalar, scalar_unit(second.value))
1104 } else {
1105 Abi::ScalarPair(scalar_unit(first.value), niche_scalar)
1106 }
1107 }
1108 _ => Abi::Aggregate { sized: true },
1109 }
1110 };
1111
1112 let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
1113
1114 niche_filling_layout = Some(Layout {
1115 variants: Variants::Multiple {
1116 tag: niche_scalar,
1117 tag_encoding: TagEncoding::Niche {
1118 dataful_variant: i,
1119 niche_variants,
1120 niche_start,
1121 },
1122 tag_field: 0,
1123 variants: st,
1124 },
1125 fields: FieldsShape::Arbitrary {
1126 offsets: vec![offset],
1127 memory_index: vec![0],
1128 },
1129 abi,
1130 largest_niche,
1131 size,
1132 align,
1133 });
1134 }
1135 }
1136 }
1137
1138 let (mut min, mut max) = (i128::MAX, i128::MIN);
1139 let discr_type = def.repr.discr_type();
1140 let bits = Integer::from_attr(self, discr_type).size().bits();
1141 for (i, discr) in def.discriminants(tcx) {
1142 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1143 continue;
1144 }
1145 let mut x = discr.val as i128;
1146 if discr_type.is_signed() {
1147 // sign extend the raw representation to be an i128
1148 x = (x << (128 - bits)) >> (128 - bits);
1149 }
1150 if x < min {
1151 min = x;
1152 }
1153 if x > max {
1154 max = x;
1155 }
1156 }
1157 // We might have no inhabited variants, so pretend there's at least one.
1158 if (min, max) == (i128::MAX, i128::MIN) {
1159 min = 0;
1160 max = 0;
1161 }
1162 assert!(min <= max, "discriminant range is {}...{}", min, max);
1163 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1164
1165 let mut align = dl.aggregate_align;
1166 let mut size = Size::ZERO;
1167
1168 // We're interested in the smallest alignment, so start large.
1169 let mut start_align = Align::from_bytes(256).unwrap();
1170 assert_eq!(Integer::for_align(dl, start_align), None);
1171
1172 // repr(C) on an enum tells us to make a (tag, union) layout,
1173 // so we need to grow the prefix alignment to be at least
1174 // the alignment of the union. (This value is used both for
1175 // determining the alignment of the overall enum, and the
1176 // determining the alignment of the payload after the tag.)
1177 let mut prefix_align = min_ity.align(dl).abi;
1178 if def.repr.c() {
1179 for fields in &variants {
1180 for field in fields {
1181 prefix_align = prefix_align.max(field.align.abi);
1182 }
1183 }
1184 }
1185
1186 // Create the set of structs that represent each variant.
1187 let mut layout_variants = variants
1188 .iter_enumerated()
1189 .map(|(i, field_layouts)| {
1190 let mut st = self.univariant_uninterned(
1191 ty,
1192 &field_layouts,
1193 &def.repr,
1194 StructKind::Prefixed(min_ity.size(), prefix_align),
1195 )?;
1196 st.variants = Variants::Single { index: i };
1197 // Find the first field we can't move later
1198 // to make room for a larger discriminant.
1199 for field in
1200 st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
1201 {
1202 if !field.is_zst() || field.align.abi.bytes() != 1 {
1203 start_align = start_align.min(field.align.abi);
1204 break;
1205 }
1206 }
1207 size = cmp::max(size, st.size);
1208 align = align.max(st.align);
1209 Ok(st)
1210 })
1211 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1212
1213 // Align the maximum variant size to the largest alignment.
1214 size = size.align_to(align.abi);
1215
1216 if size.bytes() >= dl.obj_size_bound() {
1217 return Err(LayoutError::SizeOverflow(ty));
1218 }
1219
1220 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1221 if typeck_ity < min_ity {
1222 // It is a bug if Layout decided on a greater discriminant size than typeck for
1223 // some reason at this point (based on values discriminant can take on). Mostly
1224 // because this discriminant will be loaded, and then stored into variable of
1225 // type calculated by typeck. Consider such case (a bug): typeck decided on
1226 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1227 // discriminant values. That would be a bug, because then, in codegen, in order
1228 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1229 // space necessary to represent would have to be discarded (or layout is wrong
1230 // on thinking it needs 16 bits)
1231 bug!(
1232 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1233 min_ity,
1234 typeck_ity
1235 );
1236 // However, it is fine to make discr type however large (as an optimisation)
1237 // after this point – we’ll just truncate the value we load in codegen.
1238 }
1239
1240 // Check to see if we should use a different type for the
1241 // discriminant. We can safely use a type with the same size
1242 // as the alignment of the first field of each variant.
1243 // We increase the size of the discriminant to avoid LLVM copying
1244 // padding when it doesn't need to. This normally causes unaligned
1245 // load/stores and excessive memcpy/memset operations. By using a
1246 // bigger integer size, LLVM can be sure about its contents and
1247 // won't be so conservative.
1248
1249 // Use the initial field alignment
1250 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1251 min_ity
1252 } else {
1253 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1254 };
1255
1256 // If the alignment is not larger than the chosen discriminant size,
1257 // don't use the alignment as the final size.
1258 if ity <= min_ity {
1259 ity = min_ity;
1260 } else {
1261 // Patch up the variants' first few fields.
1262 let old_ity_size = min_ity.size();
1263 let new_ity_size = ity.size();
1264 for variant in &mut layout_variants {
1265 match variant.fields {
1266 FieldsShape::Arbitrary { ref mut offsets, .. } => {
1267 for i in offsets {
1268 if *i <= old_ity_size {
1269 assert_eq!(*i, old_ity_size);
1270 *i = new_ity_size;
1271 }
1272 }
1273 // We might be making the struct larger.
1274 if variant.size <= old_ity_size {
1275 variant.size = new_ity_size;
1276 }
1277 }
1278 _ => bug!(),
1279 }
1280 }
1281 }
1282
1283 let tag_mask = ity.size().unsigned_int_max();
1284 let tag = Scalar {
1285 value: Int(ity, signed),
1286 valid_range: WrappingRange {
1287 start: (min as u128 & tag_mask),
1288 end: (max as u128 & tag_mask),
1289 },
1290 };
1291 let mut abi = Abi::Aggregate { sized: true };
1292 if tag.value.size(dl) == size {
1293 abi = Abi::Scalar(tag);
1294 } else {
1295 // Try to use a ScalarPair for all tagged enums.
1296 let mut common_prim = None;
1297 for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
1298 let offsets = match layout_variant.fields {
1299 FieldsShape::Arbitrary { ref offsets, .. } => offsets,
1300 _ => bug!(),
1301 };
1302 let mut fields =
1303 iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
1304 let (field, offset) = match (fields.next(), fields.next()) {
1305 (None, None) => continue,
1306 (Some(pair), None) => pair,
1307 _ => {
1308 common_prim = None;
1309 break;
1310 }
1311 };
1312 let prim = match field.abi {
1313 Abi::Scalar(scalar) => scalar.value,
1314 _ => {
1315 common_prim = None;
1316 break;
1317 }
1318 };
1319 if let Some(pair) = common_prim {
1320 // This is pretty conservative. We could go fancier
1321 // by conflating things like i32 and u32, or even
1322 // realising that (u8, u8) could just cohabit with
1323 // u16 or even u32.
1324 if pair != (prim, offset) {
1325 common_prim = None;
1326 break;
1327 }
1328 } else {
1329 common_prim = Some((prim, offset));
1330 }
1331 }
1332 if let Some((prim, offset)) = common_prim {
1333 let pair = self.scalar_pair(tag, scalar_unit(prim));
1334 let pair_offsets = match pair.fields {
1335 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1336 assert_eq!(memory_index, &[0, 1]);
1337 offsets
1338 }
1339 _ => bug!(),
1340 };
1341 if pair_offsets[0] == Size::ZERO
1342 && pair_offsets[1] == *offset
1343 && align == pair.align
1344 && size == pair.size
1345 {
1346 // We can use `ScalarPair` only when it matches our
1347 // already computed layout (including `#[repr(C)]`).
1348 abi = pair.abi;
1349 }
1350 }
1351 }
1352
1353 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1354 abi = Abi::Uninhabited;
1355 }
1356
1357 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
1358
1359 let tagged_layout = Layout {
1360 variants: Variants::Multiple {
1361 tag,
1362 tag_encoding: TagEncoding::Direct,
1363 tag_field: 0,
1364 variants: layout_variants,
1365 },
1366 fields: FieldsShape::Arbitrary {
1367 offsets: vec![Size::ZERO],
1368 memory_index: vec![0],
1369 },
1370 largest_niche,
1371 abi,
1372 align,
1373 size,
1374 };
1375
1376 let best_layout = match (tagged_layout, niche_filling_layout) {
1377 (tagged_layout, Some(niche_filling_layout)) => {
1378 // Pick the smaller layout; otherwise,
1379 // pick the layout with the larger niche; otherwise,
1380 // pick tagged as it has simpler codegen.
1381 cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
1382 let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
1383 (layout.size, cmp::Reverse(niche_size))
1384 })
1385 }
1386 (tagged_layout, None) => tagged_layout,
1387 };
1388
1389 tcx.intern_layout(best_layout)
1390 }
1391
1392 // Types with no meaningful known layout.
1393 ty::Projection(_) | ty::Opaque(..) => {
1394 // NOTE(eddyb) `layout_of` query should've normalized these away,
1395 // if that was possible, so there's no reason to try again here.
1396 return Err(LayoutError::Unknown(ty));
1397 }
1398
1399 ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1400 bug!("Layout::compute: unexpected type `{}`", ty)
1401 }
1402
1403 ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
1404 return Err(LayoutError::Unknown(ty));
1405 }
1406 })
1407 }
1408 }
1409
1410 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
1411 #[derive(Clone, Debug, PartialEq)]
1412 enum SavedLocalEligibility {
1413 Unassigned,
1414 Assigned(VariantIdx),
1415 // FIXME: Use newtype_index so we aren't wasting bytes
1416 Ineligible(Option<u32>),
1417 }
1418
1419 // When laying out generators, we divide our saved local fields into two
1420 // categories: overlap-eligible and overlap-ineligible.
1421 //
1422 // Those fields which are ineligible for overlap go in a "prefix" at the
1423 // beginning of the layout, and always have space reserved for them.
1424 //
1425 // Overlap-eligible fields are only assigned to one variant, so we lay
1426 // those fields out for each variant and put them right after the
1427 // prefix.
1428 //
1429 // Finally, in the layout details, we point to the fields from the
1430 // variants they are assigned to. It is possible for some fields to be
1431 // included in multiple variants. No field ever "moves around" in the
1432 // layout; its offset is always the same.
1433 //
1434 // Also included in the layout are the upvars and the discriminant.
1435 // These are included as fields on the "outer" layout; they are not part
1436 // of any variant.
1437 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
1438 /// Compute the eligibility and assignment of each local.
generator_saved_local_eligibility( &self, info: &GeneratorLayout<'tcx>, ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>)1439 fn generator_saved_local_eligibility(
1440 &self,
1441 info: &GeneratorLayout<'tcx>,
1442 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
1443 use SavedLocalEligibility::*;
1444
1445 let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
1446 IndexVec::from_elem_n(Unassigned, info.field_tys.len());
1447
1448 // The saved locals not eligible for overlap. These will get
1449 // "promoted" to the prefix of our generator.
1450 let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
1451
1452 // Figure out which of our saved locals are fields in only
1453 // one variant. The rest are deemed ineligible for overlap.
1454 for (variant_index, fields) in info.variant_fields.iter_enumerated() {
1455 for local in fields {
1456 match assignments[*local] {
1457 Unassigned => {
1458 assignments[*local] = Assigned(variant_index);
1459 }
1460 Assigned(idx) => {
1461 // We've already seen this local at another suspension
1462 // point, so it is no longer a candidate.
1463 trace!(
1464 "removing local {:?} in >1 variant ({:?}, {:?})",
1465 local,
1466 variant_index,
1467 idx
1468 );
1469 ineligible_locals.insert(*local);
1470 assignments[*local] = Ineligible(None);
1471 }
1472 Ineligible(_) => {}
1473 }
1474 }
1475 }
1476
1477 // Next, check every pair of eligible locals to see if they
1478 // conflict.
1479 for local_a in info.storage_conflicts.rows() {
1480 let conflicts_a = info.storage_conflicts.count(local_a);
1481 if ineligible_locals.contains(local_a) {
1482 continue;
1483 }
1484
1485 for local_b in info.storage_conflicts.iter(local_a) {
1486 // local_a and local_b are storage live at the same time, therefore they
1487 // cannot overlap in the generator layout. The only way to guarantee
1488 // this is if they are in the same variant, or one is ineligible
1489 // (which means it is stored in every variant).
1490 if ineligible_locals.contains(local_b)
1491 || assignments[local_a] == assignments[local_b]
1492 {
1493 continue;
1494 }
1495
1496 // If they conflict, we will choose one to make ineligible.
1497 // This is not always optimal; it's just a greedy heuristic that
1498 // seems to produce good results most of the time.
1499 let conflicts_b = info.storage_conflicts.count(local_b);
1500 let (remove, other) =
1501 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
1502 ineligible_locals.insert(remove);
1503 assignments[remove] = Ineligible(None);
1504 trace!("removing local {:?} due to conflict with {:?}", remove, other);
1505 }
1506 }
1507
1508 // Count the number of variants in use. If only one of them, then it is
1509 // impossible to overlap any locals in our layout. In this case it's
1510 // always better to make the remaining locals ineligible, so we can
1511 // lay them out with the other locals in the prefix and eliminate
1512 // unnecessary padding bytes.
1513 {
1514 let mut used_variants = BitSet::new_empty(info.variant_fields.len());
1515 for assignment in &assignments {
1516 if let Assigned(idx) = assignment {
1517 used_variants.insert(*idx);
1518 }
1519 }
1520 if used_variants.count() < 2 {
1521 for assignment in assignments.iter_mut() {
1522 *assignment = Ineligible(None);
1523 }
1524 ineligible_locals.insert_all();
1525 }
1526 }
1527
1528 // Write down the order of our locals that will be promoted to the prefix.
1529 {
1530 for (idx, local) in ineligible_locals.iter().enumerate() {
1531 assignments[local] = Ineligible(Some(idx as u32));
1532 }
1533 }
1534 debug!("generator saved local assignments: {:?}", assignments);
1535
1536 (ineligible_locals, assignments)
1537 }
1538
1539 /// Compute the full generator layout.
generator_layout( &self, ty: Ty<'tcx>, def_id: hir::def_id::DefId, substs: SubstsRef<'tcx>, ) -> Result<&'tcx Layout, LayoutError<'tcx>>1540 fn generator_layout(
1541 &self,
1542 ty: Ty<'tcx>,
1543 def_id: hir::def_id::DefId,
1544 substs: SubstsRef<'tcx>,
1545 ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
1546 use SavedLocalEligibility::*;
1547 let tcx = self.tcx;
1548 let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
1549
1550 let info = match tcx.generator_layout(def_id) {
1551 None => return Err(LayoutError::Unknown(ty)),
1552 Some(info) => info,
1553 };
1554 let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
1555
1556 // Build a prefix layout, including "promoting" all ineligible
1557 // locals as part of the prefix. We compute the layout of all of
1558 // these fields at once to get optimal packing.
1559 let tag_index = substs.as_generator().prefix_tys().count();
1560
1561 // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
1562 let max_discr = (info.variant_fields.len() - 1) as u128;
1563 let discr_int = Integer::fit_unsigned(max_discr);
1564 let discr_int_ty = discr_int.to_ty(tcx, false);
1565 let tag = Scalar {
1566 value: Primitive::Int(discr_int, false),
1567 valid_range: WrappingRange { start: 0, end: max_discr },
1568 };
1569 let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag));
1570 let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
1571
1572 let promoted_layouts = ineligible_locals
1573 .iter()
1574 .map(|local| subst_field(info.field_tys[local]))
1575 .map(|ty| tcx.mk_maybe_uninit(ty))
1576 .map(|ty| self.layout_of(ty));
1577 let prefix_layouts = substs
1578 .as_generator()
1579 .prefix_tys()
1580 .map(|ty| self.layout_of(ty))
1581 .chain(iter::once(Ok(tag_layout)))
1582 .chain(promoted_layouts)
1583 .collect::<Result<Vec<_>, _>>()?;
1584 let prefix = self.univariant_uninterned(
1585 ty,
1586 &prefix_layouts,
1587 &ReprOptions::default(),
1588 StructKind::AlwaysSized,
1589 )?;
1590
1591 let (prefix_size, prefix_align) = (prefix.size, prefix.align);
1592
1593 // Split the prefix layout into the "outer" fields (upvars and
1594 // discriminant) and the "promoted" fields. Promoted fields will
1595 // get included in each variant that requested them in
1596 // GeneratorLayout.
1597 debug!("prefix = {:#?}", prefix);
1598 let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
1599 FieldsShape::Arbitrary { mut offsets, memory_index } => {
1600 let mut inverse_memory_index = invert_mapping(&memory_index);
1601
1602 // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
1603 // "outer" and "promoted" fields respectively.
1604 let b_start = (tag_index + 1) as u32;
1605 let offsets_b = offsets.split_off(b_start as usize);
1606 let offsets_a = offsets;
1607
1608 // Disentangle the "a" and "b" components of `inverse_memory_index`
1609 // by preserving the order but keeping only one disjoint "half" each.
1610 // FIXME(eddyb) build a better abstraction for permutations, if possible.
1611 let inverse_memory_index_b: Vec<_> =
1612 inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
1613 inverse_memory_index.retain(|&i| i < b_start);
1614 let inverse_memory_index_a = inverse_memory_index;
1615
1616 // Since `inverse_memory_index_{a,b}` each only refer to their
1617 // respective fields, they can be safely inverted
1618 let memory_index_a = invert_mapping(&inverse_memory_index_a);
1619 let memory_index_b = invert_mapping(&inverse_memory_index_b);
1620
1621 let outer_fields =
1622 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
1623 (outer_fields, offsets_b, memory_index_b)
1624 }
1625 _ => bug!(),
1626 };
1627
1628 let mut size = prefix.size;
1629 let mut align = prefix.align;
1630 let variants = info
1631 .variant_fields
1632 .iter_enumerated()
1633 .map(|(index, variant_fields)| {
1634 // Only include overlap-eligible fields when we compute our variant layout.
1635 let variant_only_tys = variant_fields
1636 .iter()
1637 .filter(|local| match assignments[**local] {
1638 Unassigned => bug!(),
1639 Assigned(v) if v == index => true,
1640 Assigned(_) => bug!("assignment does not match variant"),
1641 Ineligible(_) => false,
1642 })
1643 .map(|local| subst_field(info.field_tys[*local]));
1644
1645 let mut variant = self.univariant_uninterned(
1646 ty,
1647 &variant_only_tys
1648 .map(|ty| self.layout_of(ty))
1649 .collect::<Result<Vec<_>, _>>()?,
1650 &ReprOptions::default(),
1651 StructKind::Prefixed(prefix_size, prefix_align.abi),
1652 )?;
1653 variant.variants = Variants::Single { index };
1654
1655 let (offsets, memory_index) = match variant.fields {
1656 FieldsShape::Arbitrary { offsets, memory_index } => (offsets, memory_index),
1657 _ => bug!(),
1658 };
1659
1660 // Now, stitch the promoted and variant-only fields back together in
1661 // the order they are mentioned by our GeneratorLayout.
1662 // Because we only use some subset (that can differ between variants)
1663 // of the promoted fields, we can't just pick those elements of the
1664 // `promoted_memory_index` (as we'd end up with gaps).
1665 // So instead, we build an "inverse memory_index", as if all of the
1666 // promoted fields were being used, but leave the elements not in the
1667 // subset as `INVALID_FIELD_IDX`, which we can filter out later to
1668 // obtain a valid (bijective) mapping.
1669 const INVALID_FIELD_IDX: u32 = !0;
1670 let mut combined_inverse_memory_index =
1671 vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
1672 let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
1673 let combined_offsets = variant_fields
1674 .iter()
1675 .enumerate()
1676 .map(|(i, local)| {
1677 let (offset, memory_index) = match assignments[*local] {
1678 Unassigned => bug!(),
1679 Assigned(_) => {
1680 let (offset, memory_index) =
1681 offsets_and_memory_index.next().unwrap();
1682 (offset, promoted_memory_index.len() as u32 + memory_index)
1683 }
1684 Ineligible(field_idx) => {
1685 let field_idx = field_idx.unwrap() as usize;
1686 (promoted_offsets[field_idx], promoted_memory_index[field_idx])
1687 }
1688 };
1689 combined_inverse_memory_index[memory_index as usize] = i as u32;
1690 offset
1691 })
1692 .collect();
1693
1694 // Remove the unused slots and invert the mapping to obtain the
1695 // combined `memory_index` (also see previous comment).
1696 combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
1697 let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
1698
1699 variant.fields = FieldsShape::Arbitrary {
1700 offsets: combined_offsets,
1701 memory_index: combined_memory_index,
1702 };
1703
1704 size = size.max(variant.size);
1705 align = align.max(variant.align);
1706 Ok(variant)
1707 })
1708 .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1709
1710 size = size.align_to(align.abi);
1711
1712 let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
1713 {
1714 Abi::Uninhabited
1715 } else {
1716 Abi::Aggregate { sized: true }
1717 };
1718
1719 let layout = tcx.intern_layout(Layout {
1720 variants: Variants::Multiple {
1721 tag,
1722 tag_encoding: TagEncoding::Direct,
1723 tag_field: tag_index,
1724 variants,
1725 },
1726 fields: outer_fields,
1727 abi,
1728 largest_niche: prefix.largest_niche,
1729 size,
1730 align,
1731 });
1732 debug!("generator layout ({:?}): {:#?}", ty, layout);
1733 Ok(layout)
1734 }
1735
1736 /// This is invoked by the `layout_of` query to record the final
1737 /// layout of each type.
1738 #[inline(always)]
record_layout_for_printing(&self, layout: TyAndLayout<'tcx>)1739 fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
1740 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1741 // for dumping later.
1742 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1743 self.record_layout_for_printing_outlined(layout)
1744 }
1745 }
1746
record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>)1747 fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
1748 // Ignore layouts that are done with non-empty environments or
1749 // non-monomorphic layouts, as the user only wants to see the stuff
1750 // resulting from the final codegen session.
1751 if layout.ty.definitely_has_param_types_or_consts(self.tcx)
1752 || !self.param_env.caller_bounds().is_empty()
1753 {
1754 return;
1755 }
1756
1757 // (delay format until we actually need it)
1758 let record = |kind, packed, opt_discr_size, variants| {
1759 let type_desc = format!("{:?}", layout.ty);
1760 self.tcx.sess.code_stats.record_type_size(
1761 kind,
1762 type_desc,
1763 layout.align.abi,
1764 layout.size,
1765 packed,
1766 opt_discr_size,
1767 variants,
1768 );
1769 };
1770
1771 let adt_def = match *layout.ty.kind() {
1772 ty::Adt(ref adt_def, _) => {
1773 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1774 adt_def
1775 }
1776
1777 ty::Closure(..) => {
1778 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1779 record(DataTypeKind::Closure, false, None, vec![]);
1780 return;
1781 }
1782
1783 _ => {
1784 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1785 return;
1786 }
1787 };
1788
1789 let adt_kind = adt_def.adt_kind();
1790 let adt_packed = adt_def.repr.pack.is_some();
1791
1792 let build_variant_info = |n: Option<Ident>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
1793 let mut min_size = Size::ZERO;
1794 let field_info: Vec<_> = flds
1795 .iter()
1796 .enumerate()
1797 .map(|(i, &name)| {
1798 let field_layout = layout.field(self, i);
1799 let offset = layout.fields.offset(i);
1800 let field_end = offset + field_layout.size;
1801 if min_size < field_end {
1802 min_size = field_end;
1803 }
1804 FieldInfo {
1805 name: name.to_string(),
1806 offset: offset.bytes(),
1807 size: field_layout.size.bytes(),
1808 align: field_layout.align.abi.bytes(),
1809 }
1810 })
1811 .collect();
1812
1813 VariantInfo {
1814 name: n.map(|n| n.to_string()),
1815 kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
1816 align: layout.align.abi.bytes(),
1817 size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
1818 fields: field_info,
1819 }
1820 };
1821
1822 match layout.variants {
1823 Variants::Single { index } => {
1824 if !adt_def.variants.is_empty() && layout.fields != FieldsShape::Primitive {
1825 debug!(
1826 "print-type-size `{:#?}` variant {}",
1827 layout, adt_def.variants[index].ident
1828 );
1829 let variant_def = &adt_def.variants[index];
1830 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.ident.name).collect();
1831 record(
1832 adt_kind.into(),
1833 adt_packed,
1834 None,
1835 vec![build_variant_info(Some(variant_def.ident), &fields, layout)],
1836 );
1837 } else {
1838 // (This case arises for *empty* enums; so give it
1839 // zero variants.)
1840 record(adt_kind.into(), adt_packed, None, vec![]);
1841 }
1842 }
1843
1844 Variants::Multiple { tag, ref tag_encoding, .. } => {
1845 debug!(
1846 "print-type-size `{:#?}` adt general variants def {}",
1847 layout.ty,
1848 adt_def.variants.len()
1849 );
1850 let variant_infos: Vec<_> = adt_def
1851 .variants
1852 .iter_enumerated()
1853 .map(|(i, variant_def)| {
1854 let fields: Vec<_> =
1855 variant_def.fields.iter().map(|f| f.ident.name).collect();
1856 build_variant_info(
1857 Some(variant_def.ident),
1858 &fields,
1859 layout.for_variant(self, i),
1860 )
1861 })
1862 .collect();
1863 record(
1864 adt_kind.into(),
1865 adt_packed,
1866 match tag_encoding {
1867 TagEncoding::Direct => Some(tag.value.size(self)),
1868 _ => None,
1869 },
1870 variant_infos,
1871 );
1872 }
1873 }
1874 }
1875 }
1876
1877 /// Type size "skeleton", i.e., the only information determining a type's size.
1878 /// While this is conservative, (aside from constant sizes, only pointers,
1879 /// newtypes thereof and null pointer optimized enums are allowed), it is
1880 /// enough to statically check common use cases of transmute.
1881 #[derive(Copy, Clone, Debug)]
1882 pub enum SizeSkeleton<'tcx> {
1883 /// Any statically computable Layout.
1884 Known(Size),
1885
1886 /// A potentially-fat pointer.
1887 Pointer {
1888 /// If true, this pointer is never null.
1889 non_zero: bool,
1890 /// The type which determines the unsized metadata, if any,
1891 /// of this pointer. Either a type parameter or a projection
1892 /// depending on one, with regions erased.
1893 tail: Ty<'tcx>,
1894 },
1895 }
1896
1897 impl<'tcx> SizeSkeleton<'tcx> {
compute( ty: Ty<'tcx>, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>, ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>>1898 pub fn compute(
1899 ty: Ty<'tcx>,
1900 tcx: TyCtxt<'tcx>,
1901 param_env: ty::ParamEnv<'tcx>,
1902 ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1903 debug_assert!(!ty.has_infer_types_or_consts());
1904
1905 // First try computing a static layout.
1906 let err = match tcx.layout_of(param_env.and(ty)) {
1907 Ok(layout) => {
1908 return Ok(SizeSkeleton::Known(layout.size));
1909 }
1910 Err(err) => err,
1911 };
1912
1913 match *ty.kind() {
1914 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1915 let non_zero = !ty.is_unsafe_ptr();
1916 let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
1917 match tail.kind() {
1918 ty::Param(_) | ty::Projection(_) => {
1919 debug_assert!(tail.definitely_has_param_types_or_consts(tcx));
1920 Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
1921 }
1922 _ => bug!(
1923 "SizeSkeleton::compute({}): layout errored ({}), yet \
1924 tail `{}` is not a type parameter or a projection",
1925 ty,
1926 err,
1927 tail
1928 ),
1929 }
1930 }
1931
1932 ty::Adt(def, substs) => {
1933 // Only newtypes and enums w/ nullable pointer optimization.
1934 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1935 return Err(err);
1936 }
1937
1938 // Get a zero-sized variant or a pointer newtype.
1939 let zero_or_ptr_variant = |i| {
1940 let i = VariantIdx::new(i);
1941 let fields = def.variants[i]
1942 .fields
1943 .iter()
1944 .map(|field| SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env));
1945 let mut ptr = None;
1946 for field in fields {
1947 let field = field?;
1948 match field {
1949 SizeSkeleton::Known(size) => {
1950 if size.bytes() > 0 {
1951 return Err(err);
1952 }
1953 }
1954 SizeSkeleton::Pointer { .. } => {
1955 if ptr.is_some() {
1956 return Err(err);
1957 }
1958 ptr = Some(field);
1959 }
1960 }
1961 }
1962 Ok(ptr)
1963 };
1964
1965 let v0 = zero_or_ptr_variant(0)?;
1966 // Newtype.
1967 if def.variants.len() == 1 {
1968 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1969 return Ok(SizeSkeleton::Pointer {
1970 non_zero: non_zero
1971 || match tcx.layout_scalar_valid_range(def.did) {
1972 (Bound::Included(start), Bound::Unbounded) => start > 0,
1973 (Bound::Included(start), Bound::Included(end)) => {
1974 0 < start && start < end
1975 }
1976 _ => false,
1977 },
1978 tail,
1979 });
1980 } else {
1981 return Err(err);
1982 }
1983 }
1984
1985 let v1 = zero_or_ptr_variant(1)?;
1986 // Nullable pointer enum optimization.
1987 match (v0, v1) {
1988 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
1989 | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1990 Ok(SizeSkeleton::Pointer { non_zero: false, tail })
1991 }
1992 _ => Err(err),
1993 }
1994 }
1995
1996 ty::Projection(_) | ty::Opaque(..) => {
1997 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1998 if ty == normalized {
1999 Err(err)
2000 } else {
2001 SizeSkeleton::compute(normalized, tcx, param_env)
2002 }
2003 }
2004
2005 _ => Err(err),
2006 }
2007 }
2008
same_size(self, other: SizeSkeleton<'_>) -> bool2009 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
2010 match (self, other) {
2011 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
2012 (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
2013 a == b
2014 }
2015 _ => false,
2016 }
2017 }
2018 }
2019
2020 pub trait HasTyCtxt<'tcx>: HasDataLayout {
tcx(&self) -> TyCtxt<'tcx>2021 fn tcx(&self) -> TyCtxt<'tcx>;
2022 }
2023
2024 pub trait HasParamEnv<'tcx> {
param_env(&self) -> ty::ParamEnv<'tcx>2025 fn param_env(&self) -> ty::ParamEnv<'tcx>;
2026 }
2027
2028 impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
2029 #[inline]
data_layout(&self) -> &TargetDataLayout2030 fn data_layout(&self) -> &TargetDataLayout {
2031 &self.data_layout
2032 }
2033 }
2034
2035 impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
target_spec(&self) -> &Target2036 fn target_spec(&self) -> &Target {
2037 &self.sess.target
2038 }
2039 }
2040
2041 impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
2042 #[inline]
tcx(&self) -> TyCtxt<'tcx>2043 fn tcx(&self) -> TyCtxt<'tcx> {
2044 *self
2045 }
2046 }
2047
2048 impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
2049 #[inline]
data_layout(&self) -> &TargetDataLayout2050 fn data_layout(&self) -> &TargetDataLayout {
2051 &self.data_layout
2052 }
2053 }
2054
2055 impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
target_spec(&self) -> &Target2056 fn target_spec(&self) -> &Target {
2057 &self.sess.target
2058 }
2059 }
2060
2061 impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
2062 #[inline]
tcx(&self) -> TyCtxt<'tcx>2063 fn tcx(&self) -> TyCtxt<'tcx> {
2064 **self
2065 }
2066 }
2067
2068 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
param_env(&self) -> ty::ParamEnv<'tcx>2069 fn param_env(&self) -> ty::ParamEnv<'tcx> {
2070 self.param_env
2071 }
2072 }
2073
2074 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
data_layout(&self) -> &TargetDataLayout2075 fn data_layout(&self) -> &TargetDataLayout {
2076 self.tcx.data_layout()
2077 }
2078 }
2079
2080 impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
target_spec(&self) -> &Target2081 fn target_spec(&self) -> &Target {
2082 self.tcx.target_spec()
2083 }
2084 }
2085
2086 impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
tcx(&self) -> TyCtxt<'tcx>2087 fn tcx(&self) -> TyCtxt<'tcx> {
2088 self.tcx.tcx()
2089 }
2090 }
2091
2092 pub trait MaybeResult<T> {
2093 type Error;
2094
from(x: Result<T, Self::Error>) -> Self2095 fn from(x: Result<T, Self::Error>) -> Self;
to_result(self) -> Result<T, Self::Error>2096 fn to_result(self) -> Result<T, Self::Error>;
2097 }
2098
2099 impl<T> MaybeResult<T> for T {
2100 type Error = !;
2101
from(Ok(x): Result<T, Self::Error>) -> Self2102 fn from(Ok(x): Result<T, Self::Error>) -> Self {
2103 x
2104 }
to_result(self) -> Result<T, Self::Error>2105 fn to_result(self) -> Result<T, Self::Error> {
2106 Ok(self)
2107 }
2108 }
2109
2110 impl<T, E> MaybeResult<T> for Result<T, E> {
2111 type Error = E;
2112
from(x: Result<T, Self::Error>) -> Self2113 fn from(x: Result<T, Self::Error>) -> Self {
2114 x
2115 }
to_result(self) -> Result<T, Self::Error>2116 fn to_result(self) -> Result<T, Self::Error> {
2117 self
2118 }
2119 }
2120
2121 pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
2122
2123 /// Trait for contexts that want to be able to compute layouts of types.
2124 /// This automatically gives access to `LayoutOf`, through a blanket `impl`.
2125 pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
2126 /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
2127 /// returned from `layout_of` (see also `handle_layout_err`).
2128 type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
2129
2130 /// `Span` to use for `tcx.at(span)`, from `layout_of`.
2131 // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
2132 #[inline]
layout_tcx_at_span(&self) -> Span2133 fn layout_tcx_at_span(&self) -> Span {
2134 DUMMY_SP
2135 }
2136
2137 /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
2138 /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
2139 ///
2140 /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
2141 /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
2142 /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
2143 /// (and any `LayoutError`s are turned into fatal errors or ICEs).
handle_layout_err( &self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>, ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error2144 fn handle_layout_err(
2145 &self,
2146 err: LayoutError<'tcx>,
2147 span: Span,
2148 ty: Ty<'tcx>,
2149 ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
2150 }
2151
2152 /// Blanket extension trait for contexts that can compute layouts of types.
2153 pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
2154 /// Computes the layout of a type. Note that this implicitly
2155 /// executes in "reveal all" mode, and will normalize the input type.
2156 #[inline]
layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult2157 fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
2158 self.spanned_layout_of(ty, DUMMY_SP)
2159 }
2160
2161 /// Computes the layout of a type, at `span`. Note that this implicitly
2162 /// executes in "reveal all" mode, and will normalize the input type.
2163 // FIXME(eddyb) avoid passing information like this, and instead add more
2164 // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
2165 #[inline]
spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult2166 fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
2167 let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
2168 let tcx = self.tcx().at(span);
2169
2170 MaybeResult::from(
2171 tcx.layout_of(self.param_env().and(ty))
2172 .map_err(|err| self.handle_layout_err(err, span, ty)),
2173 )
2174 }
2175 }
2176
2177 impl<C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
2178
2179 impl LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
2180 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2181
2182 #[inline]
handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx>2183 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2184 err
2185 }
2186 }
2187
2188 impl LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
2189 type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
2190
2191 #[inline]
layout_tcx_at_span(&self) -> Span2192 fn layout_tcx_at_span(&self) -> Span {
2193 self.tcx.span
2194 }
2195
2196 #[inline]
handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx>2197 fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
2198 err
2199 }
2200 }
2201
2202 impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
2203 where
2204 C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
2205 {
ty_and_layout_for_variant( this: TyAndLayout<'tcx>, cx: &C, variant_index: VariantIdx, ) -> TyAndLayout<'tcx>2206 fn ty_and_layout_for_variant(
2207 this: TyAndLayout<'tcx>,
2208 cx: &C,
2209 variant_index: VariantIdx,
2210 ) -> TyAndLayout<'tcx> {
2211 let layout = match this.variants {
2212 Variants::Single { index }
2213 // If all variants but one are uninhabited, the variant layout is the enum layout.
2214 if index == variant_index &&
2215 // Don't confuse variants of uninhabited enums with the enum itself.
2216 // For more details see https://github.com/rust-lang/rust/issues/69763.
2217 this.fields != FieldsShape::Primitive =>
2218 {
2219 this.layout
2220 }
2221
2222 Variants::Single { index } => {
2223 let tcx = cx.tcx();
2224 let param_env = cx.param_env();
2225
2226 // Deny calling for_variant more than once for non-Single enums.
2227 if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
2228 assert_eq!(original_layout.variants, Variants::Single { index });
2229 }
2230
2231 let fields = match this.ty.kind() {
2232 ty::Adt(def, _) if def.variants.is_empty() =>
2233 bug!("for_variant called on zero-variant enum"),
2234 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
2235 _ => bug!(),
2236 };
2237 tcx.intern_layout(Layout {
2238 variants: Variants::Single { index: variant_index },
2239 fields: match NonZeroUsize::new(fields) {
2240 Some(fields) => FieldsShape::Union(fields),
2241 None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
2242 },
2243 abi: Abi::Uninhabited,
2244 largest_niche: None,
2245 align: tcx.data_layout.i8_align,
2246 size: Size::ZERO,
2247 })
2248 }
2249
2250 Variants::Multiple { ref variants, .. } => &variants[variant_index],
2251 };
2252
2253 assert_eq!(layout.variants, Variants::Single { index: variant_index });
2254
2255 TyAndLayout { ty: this.ty, layout }
2256 }
2257
ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx>2258 fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
2259 enum TyMaybeWithLayout<'tcx> {
2260 Ty(Ty<'tcx>),
2261 TyAndLayout(TyAndLayout<'tcx>),
2262 }
2263
2264 fn field_ty_or_layout(
2265 this: TyAndLayout<'tcx>,
2266 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
2267 i: usize,
2268 ) -> TyMaybeWithLayout<'tcx> {
2269 let tcx = cx.tcx();
2270 let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
2271 let layout = Layout::scalar(cx, tag);
2272 TyAndLayout { layout: tcx.intern_layout(layout), ty: tag.value.to_ty(tcx) }
2273 };
2274
2275 match *this.ty.kind() {
2276 ty::Bool
2277 | ty::Char
2278 | ty::Int(_)
2279 | ty::Uint(_)
2280 | ty::Float(_)
2281 | ty::FnPtr(_)
2282 | ty::Never
2283 | ty::FnDef(..)
2284 | ty::GeneratorWitness(..)
2285 | ty::Foreign(..)
2286 | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
2287
2288 // Potentially-fat pointers.
2289 ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
2290 assert!(i < this.fields.count());
2291
2292 // Reuse the fat `*T` type as its own thin pointer data field.
2293 // This provides information about, e.g., DST struct pointees
2294 // (which may have no non-DST form), and will work as long
2295 // as the `Abi` or `FieldsShape` is checked by users.
2296 if i == 0 {
2297 let nil = tcx.mk_unit();
2298 let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
2299 tcx.mk_mut_ptr(nil)
2300 } else {
2301 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
2302 };
2303
2304 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
2305 // the `Result` should always work because the type is
2306 // always either `*mut ()` or `&'static mut ()`.
2307 return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
2308 ty: this.ty,
2309 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
2310 });
2311 }
2312
2313 match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
2314 ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
2315 ty::Dynamic(_, _) => {
2316 TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
2317 tcx.lifetimes.re_static,
2318 tcx.mk_array(tcx.types.usize, 3),
2319 ))
2320 /* FIXME: use actual fn pointers
2321 Warning: naively computing the number of entries in the
2322 vtable by counting the methods on the trait + methods on
2323 all parent traits does not work, because some methods can
2324 be not object safe and thus excluded from the vtable.
2325 Increase this counter if you tried to implement this but
2326 failed to do it without duplicating a lot of code from
2327 other places in the compiler: 2
2328 tcx.mk_tup(&[
2329 tcx.mk_array(tcx.types.usize, 3),
2330 tcx.mk_array(Option<fn()>),
2331 ])
2332 */
2333 }
2334 _ => bug!("TyAndLayout::field({:?}): not applicable", this),
2335 }
2336 }
2337
2338 // Arrays and slices.
2339 ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
2340 ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
2341
2342 // Tuples, generators and closures.
2343 ty::Closure(_, ref substs) => field_ty_or_layout(
2344 TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
2345 cx,
2346 i,
2347 ),
2348
2349 ty::Generator(def_id, ref substs, _) => match this.variants {
2350 Variants::Single { index } => TyMaybeWithLayout::Ty(
2351 substs
2352 .as_generator()
2353 .state_tys(def_id, tcx)
2354 .nth(index.as_usize())
2355 .unwrap()
2356 .nth(i)
2357 .unwrap(),
2358 ),
2359 Variants::Multiple { tag, tag_field, .. } => {
2360 if i == tag_field {
2361 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2362 }
2363 TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
2364 }
2365 },
2366
2367 ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i].expect_ty()),
2368
2369 // ADTs.
2370 ty::Adt(def, substs) => {
2371 match this.variants {
2372 Variants::Single { index } => {
2373 TyMaybeWithLayout::Ty(def.variants[index].fields[i].ty(tcx, substs))
2374 }
2375
2376 // Discriminant field for enums (where applicable).
2377 Variants::Multiple { tag, .. } => {
2378 assert_eq!(i, 0);
2379 return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
2380 }
2381 }
2382 }
2383
2384 ty::Projection(_)
2385 | ty::Bound(..)
2386 | ty::Placeholder(..)
2387 | ty::Opaque(..)
2388 | ty::Param(_)
2389 | ty::Infer(_)
2390 | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
2391 }
2392 }
2393
2394 match field_ty_or_layout(this, cx, i) {
2395 TyMaybeWithLayout::Ty(field_ty) => {
2396 cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
2397 bug!(
2398 "failed to get layout for `{}`: {},\n\
2399 despite it being a field (#{}) of an existing layout: {:#?}",
2400 field_ty,
2401 e,
2402 i,
2403 this
2404 )
2405 })
2406 }
2407 TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
2408 }
2409 }
2410
ty_and_layout_pointee_info_at( this: TyAndLayout<'tcx>, cx: &C, offset: Size, ) -> Option<PointeeInfo>2411 fn ty_and_layout_pointee_info_at(
2412 this: TyAndLayout<'tcx>,
2413 cx: &C,
2414 offset: Size,
2415 ) -> Option<PointeeInfo> {
2416 let tcx = cx.tcx();
2417 let param_env = cx.param_env();
2418
2419 let addr_space_of_ty = |ty: Ty<'tcx>| {
2420 if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
2421 };
2422
2423 let pointee_info = match *this.ty.kind() {
2424 ty::RawPtr(mt) if offset.bytes() == 0 => {
2425 tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
2426 size: layout.size,
2427 align: layout.align.abi,
2428 safe: None,
2429 address_space: addr_space_of_ty(mt.ty),
2430 })
2431 }
2432 ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
2433 tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
2434 size: layout.size,
2435 align: layout.align.abi,
2436 safe: None,
2437 address_space: cx.data_layout().instruction_address_space,
2438 })
2439 }
2440 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
2441 let address_space = addr_space_of_ty(ty);
2442 let kind = if tcx.sess.opts.optimize == OptLevel::No {
2443 // Use conservative pointer kind if not optimizing. This saves us the
2444 // Freeze/Unpin queries, and can save time in the codegen backend (noalias
2445 // attributes in LLVM have compile-time cost even in unoptimized builds).
2446 PointerKind::Shared
2447 } else {
2448 match mt {
2449 hir::Mutability::Not => {
2450 if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
2451 PointerKind::Frozen
2452 } else {
2453 PointerKind::Shared
2454 }
2455 }
2456 hir::Mutability::Mut => {
2457 // References to self-referential structures should not be considered
2458 // noalias, as another pointer to the structure can be obtained, that
2459 // is not based-on the original reference. We consider all !Unpin
2460 // types to be potentially self-referential here.
2461 if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
2462 PointerKind::UniqueBorrowed
2463 } else {
2464 PointerKind::Shared
2465 }
2466 }
2467 }
2468 };
2469
2470 tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
2471 size: layout.size,
2472 align: layout.align.abi,
2473 safe: Some(kind),
2474 address_space,
2475 })
2476 }
2477
2478 _ => {
2479 let mut data_variant = match this.variants {
2480 // Within the discriminant field, only the niche itself is
2481 // always initialized, so we only check for a pointer at its
2482 // offset.
2483 //
2484 // If the niche is a pointer, it's either valid (according
2485 // to its type), or null (which the niche field's scalar
2486 // validity range encodes). This allows using
2487 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
2488 // this will continue to work as long as we don't start
2489 // using more niches than just null (e.g., the first page of
2490 // the address space, or unaligned pointers).
2491 Variants::Multiple {
2492 tag_encoding: TagEncoding::Niche { dataful_variant, .. },
2493 tag_field,
2494 ..
2495 } if this.fields.offset(tag_field) == offset => {
2496 Some(this.for_variant(cx, dataful_variant))
2497 }
2498 _ => Some(this),
2499 };
2500
2501 if let Some(variant) = data_variant {
2502 // We're not interested in any unions.
2503 if let FieldsShape::Union(_) = variant.fields {
2504 data_variant = None;
2505 }
2506 }
2507
2508 let mut result = None;
2509
2510 if let Some(variant) = data_variant {
2511 let ptr_end = offset + Pointer.size(cx);
2512 for i in 0..variant.fields.count() {
2513 let field_start = variant.fields.offset(i);
2514 if field_start <= offset {
2515 let field = variant.field(cx, i);
2516 result = field.to_result().ok().and_then(|field| {
2517 if ptr_end <= field_start + field.size {
2518 // We found the right field, look inside it.
2519 let field_info =
2520 field.pointee_info_at(cx, offset - field_start);
2521 field_info
2522 } else {
2523 None
2524 }
2525 });
2526 if result.is_some() {
2527 break;
2528 }
2529 }
2530 }
2531 }
2532
2533 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
2534 if let Some(ref mut pointee) = result {
2535 if let ty::Adt(def, _) = this.ty.kind() {
2536 if def.is_box() && offset.bytes() == 0 {
2537 pointee.safe = Some(PointerKind::UniqueOwned);
2538 }
2539 }
2540 }
2541
2542 result
2543 }
2544 };
2545
2546 debug!(
2547 "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
2548 offset,
2549 this.ty.kind(),
2550 pointee_info
2551 );
2552
2553 pointee_info
2554 }
2555 }
2556
2557 impl<'tcx> ty::Instance<'tcx> {
2558 // NOTE(eddyb) this is private to avoid using it from outside of
2559 // `fn_abi_of_instance` - any other uses are either too high-level
2560 // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
2561 // or should go through `FnAbi` instead, to avoid losing any
2562 // adjustments `fn_abi_of_instance` might be performing.
fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx>2563 fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
2564 // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
2565 let ty = self.ty(tcx, ty::ParamEnv::reveal_all());
2566 match *ty.kind() {
2567 ty::FnDef(..) => {
2568 // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
2569 // parameters unused if they show up in the signature, but not in the `mir::Body`
2570 // (i.e. due to being inside a projection that got normalized, see
2571 // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
2572 // track of a polymorphization `ParamEnv` to allow normalizing later.
2573 let mut sig = match *ty.kind() {
2574 ty::FnDef(def_id, substs) => tcx
2575 .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
2576 .subst(tcx, substs),
2577 _ => unreachable!(),
2578 };
2579
2580 if let ty::InstanceDef::VtableShim(..) = self.def {
2581 // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
2582 sig = sig.map_bound(|mut sig| {
2583 let mut inputs_and_output = sig.inputs_and_output.to_vec();
2584 inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
2585 sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
2586 sig
2587 });
2588 }
2589 sig
2590 }
2591 ty::Closure(def_id, substs) => {
2592 let sig = substs.as_closure().sig();
2593
2594 let bound_vars = tcx.mk_bound_variable_kinds(
2595 sig.bound_vars()
2596 .iter()
2597 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2598 );
2599 let br = ty::BoundRegion {
2600 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2601 kind: ty::BoundRegionKind::BrEnv,
2602 };
2603 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2604 let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
2605
2606 let sig = sig.skip_binder();
2607 ty::Binder::bind_with_vars(
2608 tcx.mk_fn_sig(
2609 iter::once(env_ty).chain(sig.inputs().iter().cloned()),
2610 sig.output(),
2611 sig.c_variadic,
2612 sig.unsafety,
2613 sig.abi,
2614 ),
2615 bound_vars,
2616 )
2617 }
2618 ty::Generator(_, substs, _) => {
2619 let sig = substs.as_generator().poly_sig();
2620
2621 let bound_vars = tcx.mk_bound_variable_kinds(
2622 sig.bound_vars()
2623 .iter()
2624 .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
2625 );
2626 let br = ty::BoundRegion {
2627 var: ty::BoundVar::from_usize(bound_vars.len() - 1),
2628 kind: ty::BoundRegionKind::BrEnv,
2629 };
2630 let env_region = ty::ReLateBound(ty::INNERMOST, br);
2631 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
2632
2633 let pin_did = tcx.require_lang_item(LangItem::Pin, None);
2634 let pin_adt_ref = tcx.adt_def(pin_did);
2635 let pin_substs = tcx.intern_substs(&[env_ty.into()]);
2636 let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
2637
2638 let sig = sig.skip_binder();
2639 let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
2640 let state_adt_ref = tcx.adt_def(state_did);
2641 let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
2642 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
2643 ty::Binder::bind_with_vars(
2644 tcx.mk_fn_sig(
2645 [env_ty, sig.resume_ty].iter(),
2646 &ret_ty,
2647 false,
2648 hir::Unsafety::Normal,
2649 rustc_target::spec::abi::Abi::Rust,
2650 ),
2651 bound_vars,
2652 )
2653 }
2654 _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
2655 }
2656 }
2657 }
2658
2659 /// Calculates whether a function's ABI can unwind or not.
2660 ///
2661 /// This takes two primary parameters:
2662 ///
2663 /// * `codegen_fn_attr_flags` - these are flags calculated as part of the
2664 /// codegen attrs for a defined function. For function pointers this set of
2665 /// flags is the empty set. This is only applicable for Rust-defined
2666 /// functions, and generally isn't needed except for small optimizations where
2667 /// we try to say a function which otherwise might look like it could unwind
2668 /// doesn't actually unwind (such as for intrinsics and such).
2669 ///
2670 /// * `abi` - this is the ABI that the function is defined with. This is the
2671 /// primary factor for determining whether a function can unwind or not.
2672 ///
2673 /// Note that in this case unwinding is not necessarily panicking in Rust. Rust
2674 /// panics are implemented with unwinds on most platform (when
2675 /// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
2676 /// Notably unwinding is disallowed for more non-Rust ABIs unless it's
2677 /// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
2678 /// defined for each ABI individually, but it always corresponds to some form of
2679 /// stack-based unwinding (the exact mechanism of which varies
2680 /// platform-by-platform).
2681 ///
2682 /// Rust functions are classfied whether or not they can unwind based on the
2683 /// active "panic strategy". In other words Rust functions are considered to
2684 /// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
2685 /// Note that Rust supports intermingling panic=abort and panic=unwind code, but
2686 /// only if the final panic mode is panic=abort. In this scenario any code
2687 /// previously compiled assuming that a function can unwind is still correct, it
2688 /// just never happens to actually unwind at runtime.
2689 ///
2690 /// This function's answer to whether or not a function can unwind is quite
2691 /// impactful throughout the compiler. This affects things like:
2692 ///
2693 /// * Calling a function which can't unwind means codegen simply ignores any
2694 /// associated unwinding cleanup.
2695 /// * Calling a function which can unwind from a function which can't unwind
2696 /// causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
2697 /// aborts the process.
2698 /// * This affects whether functions have the LLVM `nounwind` attribute, which
2699 /// affects various optimizations and codegen.
2700 ///
2701 /// FIXME: this is actually buggy with respect to Rust functions. Rust functions
2702 /// compiled with `-Cpanic=unwind` and referenced from another crate compiled
2703 /// with `-Cpanic=abort` will look like they can't unwind when in fact they
2704 /// might (from a foreign exception or similar).
2705 #[inline]
fn_can_unwind( tcx: TyCtxt<'tcx>, codegen_fn_attr_flags: CodegenFnAttrFlags, abi: SpecAbi, ) -> bool2706 pub fn fn_can_unwind(
2707 tcx: TyCtxt<'tcx>,
2708 codegen_fn_attr_flags: CodegenFnAttrFlags,
2709 abi: SpecAbi,
2710 ) -> bool {
2711 // Special attribute for functions which can't unwind.
2712 if codegen_fn_attr_flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
2713 return false;
2714 }
2715
2716 // Otherwise if this isn't special then unwinding is generally determined by
2717 // the ABI of the itself. ABIs like `C` have variants which also
2718 // specifically allow unwinding (`C-unwind`), but not all platform-specific
2719 // ABIs have such an option. Otherwise the only other thing here is Rust
2720 // itself, and those ABIs are determined by the panic strategy configured
2721 // for this compilation.
2722 //
2723 // Unfortunately at this time there's also another caveat. Rust [RFC
2724 // 2945][rfc] has been accepted and is in the process of being implemented
2725 // and stabilized. In this interim state we need to deal with historical
2726 // rustc behavior as well as plan for future rustc behavior.
2727 //
2728 // Historically functions declared with `extern "C"` were marked at the
2729 // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
2730 // or not. This is UB for functions in `panic=unwind` mode that then
2731 // actually panic and unwind. Note that this behavior is true for both
2732 // externally declared functions as well as Rust-defined function.
2733 //
2734 // To fix this UB rustc would like to change in the future to catch unwinds
2735 // from function calls that may unwind within a Rust-defined `extern "C"`
2736 // function and forcibly abort the process, thereby respecting the
2737 // `nounwind` attribut emitted for `extern "C"`. This behavior change isn't
2738 // ready to roll out, so determining whether or not the `C` family of ABIs
2739 // unwinds is conditional not only on their definition but also whether the
2740 // `#![feature(c_unwind)]` feature gate is active.
2741 //
2742 // Note that this means that unlike historical compilers rustc now, by
2743 // default, unconditionally thinks that the `C` ABI may unwind. This will
2744 // prevent some optimization opportunities, however, so we try to scope this
2745 // change and only assume that `C` unwinds with `panic=unwind` (as opposed
2746 // to `panic=abort`).
2747 //
2748 // Eventually the check against `c_unwind` here will ideally get removed and
2749 // this'll be a little cleaner as it'll be a straightforward check of the
2750 // ABI.
2751 //
2752 // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
2753 use SpecAbi::*;
2754 match abi {
2755 C { unwind } | Stdcall { unwind } | System { unwind } | Thiscall { unwind } => {
2756 unwind
2757 || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
2758 }
2759 Cdecl
2760 | Fastcall
2761 | Vectorcall
2762 | Aapcs
2763 | Win64
2764 | SysV64
2765 | PtxKernel
2766 | Msp430Interrupt
2767 | X86Interrupt
2768 | AmdGpuKernel
2769 | EfiApi
2770 | AvrInterrupt
2771 | AvrNonBlockingInterrupt
2772 | CCmseNonSecureCall
2773 | Wasm
2774 | RustIntrinsic
2775 | PlatformIntrinsic
2776 | Unadjusted => false,
2777 Rust | RustCall => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
2778 }
2779 }
2780
2781 #[inline]
conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv2782 pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
2783 use rustc_target::spec::abi::Abi::*;
2784 match tcx.sess.target.adjust_abi(abi) {
2785 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
2786
2787 // It's the ABI's job to select this, not ours.
2788 System { .. } => bug!("system abi should be selected elsewhere"),
2789 EfiApi => bug!("eficall abi should be selected elsewhere"),
2790
2791 Stdcall { .. } => Conv::X86Stdcall,
2792 Fastcall => Conv::X86Fastcall,
2793 Vectorcall => Conv::X86VectorCall,
2794 Thiscall { .. } => Conv::X86ThisCall,
2795 C { .. } => Conv::C,
2796 Unadjusted => Conv::C,
2797 Win64 => Conv::X86_64Win64,
2798 SysV64 => Conv::X86_64SysV,
2799 Aapcs => Conv::ArmAapcs,
2800 CCmseNonSecureCall => Conv::CCmseNonSecureCall,
2801 PtxKernel => Conv::PtxKernel,
2802 Msp430Interrupt => Conv::Msp430Intr,
2803 X86Interrupt => Conv::X86Intr,
2804 AmdGpuKernel => Conv::AmdGpuKernel,
2805 AvrInterrupt => Conv::AvrInterrupt,
2806 AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
2807 Wasm => Conv::C,
2808
2809 // These API constants ought to be more specific...
2810 Cdecl => Conv::C,
2811 }
2812 }
2813
2814 /// Error produced by attempting to compute or adjust a `FnAbi`.
2815 #[derive(Clone, Debug, HashStable)]
2816 pub enum FnAbiError<'tcx> {
2817 /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
2818 Layout(LayoutError<'tcx>),
2819
2820 /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
2821 AdjustForForeignAbi(call::AdjustForForeignAbiError),
2822 }
2823
2824 impl From<LayoutError<'tcx>> for FnAbiError<'tcx> {
from(err: LayoutError<'tcx>) -> Self2825 fn from(err: LayoutError<'tcx>) -> Self {
2826 Self::Layout(err)
2827 }
2828 }
2829
2830 impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
from(err: call::AdjustForForeignAbiError) -> Self2831 fn from(err: call::AdjustForForeignAbiError) -> Self {
2832 Self::AdjustForForeignAbi(err)
2833 }
2834 }
2835
2836 impl<'tcx> fmt::Display for FnAbiError<'tcx> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result2837 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2838 match self {
2839 Self::Layout(err) => err.fmt(f),
2840 Self::AdjustForForeignAbi(err) => err.fmt(f),
2841 }
2842 }
2843 }
2844
2845 // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
2846 // just for error handling.
2847 #[derive(Debug)]
2848 pub enum FnAbiRequest<'tcx> {
2849 OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2850 OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
2851 }
2852
2853 /// Trait for contexts that want to be able to compute `FnAbi`s.
2854 /// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
2855 pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
2856 /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
2857 /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
2858 type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
2859
2860 /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
2861 /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
2862 ///
2863 /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
2864 /// but this hook allows e.g. codegen to return only `&FnAbi` from its
2865 /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
2866 /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
handle_fn_abi_err( &self, err: FnAbiError<'tcx>, span: Span, fn_abi_request: FnAbiRequest<'tcx>, ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error2867 fn handle_fn_abi_err(
2868 &self,
2869 err: FnAbiError<'tcx>,
2870 span: Span,
2871 fn_abi_request: FnAbiRequest<'tcx>,
2872 ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
2873 }
2874
2875 /// Blanket extension trait for contexts that can compute `FnAbi`s.
2876 pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
2877 /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
2878 ///
2879 /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
2880 /// instead, where the instance is an `InstanceDef::Virtual`.
2881 #[inline]
fn_abi_of_fn_ptr( &self, sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>>, ) -> Self::FnAbiOfResult2882 fn fn_abi_of_fn_ptr(
2883 &self,
2884 sig: ty::PolyFnSig<'tcx>,
2885 extra_args: &'tcx ty::List<Ty<'tcx>>,
2886 ) -> Self::FnAbiOfResult {
2887 // FIXME(eddyb) get a better `span` here.
2888 let span = self.layout_tcx_at_span();
2889 let tcx = self.tcx().at(span);
2890
2891 MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
2892 |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
2893 ))
2894 }
2895
2896 /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
2897 /// direct calls to an `fn`.
2898 ///
2899 /// NB: that includes virtual calls, which are represented by "direct calls"
2900 /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
2901 #[inline]
fn_abi_of_instance( &self, instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>>, ) -> Self::FnAbiOfResult2902 fn fn_abi_of_instance(
2903 &self,
2904 instance: ty::Instance<'tcx>,
2905 extra_args: &'tcx ty::List<Ty<'tcx>>,
2906 ) -> Self::FnAbiOfResult {
2907 // FIXME(eddyb) get a better `span` here.
2908 let span = self.layout_tcx_at_span();
2909 let tcx = self.tcx().at(span);
2910
2911 MaybeResult::from(
2912 tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
2913 // HACK(eddyb) at least for definitions of/calls to `Instance`s,
2914 // we can get some kind of span even if one wasn't provided.
2915 // However, we don't do this early in order to avoid calling
2916 // `def_span` unconditionally (which may have a perf penalty).
2917 let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
2918 self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
2919 }),
2920 )
2921 }
2922 }
2923
2924 impl<C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
2925
fn_abi_of_fn_ptr<'tcx>( tcx: TyCtxt<'tcx>, query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>, ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>>2926 fn fn_abi_of_fn_ptr<'tcx>(
2927 tcx: TyCtxt<'tcx>,
2928 query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
2929 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2930 let (param_env, (sig, extra_args)) = query.into_parts();
2931
2932 LayoutCx { tcx, param_env }.fn_abi_new_uncached(
2933 sig,
2934 extra_args,
2935 None,
2936 CodegenFnAttrFlags::empty(),
2937 false,
2938 )
2939 }
2940
fn_abi_of_instance<'tcx>( tcx: TyCtxt<'tcx>, query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>, ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>>2941 fn fn_abi_of_instance<'tcx>(
2942 tcx: TyCtxt<'tcx>,
2943 query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
2944 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2945 let (param_env, (instance, extra_args)) = query.into_parts();
2946
2947 let sig = instance.fn_sig_for_fn_abi(tcx);
2948
2949 let caller_location = if instance.def.requires_caller_location(tcx) {
2950 Some(tcx.caller_location_ty())
2951 } else {
2952 None
2953 };
2954
2955 let attrs = tcx.codegen_fn_attrs(instance.def_id()).flags;
2956
2957 LayoutCx { tcx, param_env }.fn_abi_new_uncached(
2958 sig,
2959 extra_args,
2960 caller_location,
2961 attrs,
2962 matches!(instance.def, ty::InstanceDef::Virtual(..)),
2963 )
2964 }
2965
2966 impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
2967 // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
2968 // arguments of this method, into a separate `struct`.
fn_abi_new_uncached( &self, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>], caller_location: Option<Ty<'tcx>>, codegen_fn_attr_flags: CodegenFnAttrFlags, force_thin_self_ptr: bool, ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>>2969 fn fn_abi_new_uncached(
2970 &self,
2971 sig: ty::PolyFnSig<'tcx>,
2972 extra_args: &[Ty<'tcx>],
2973 caller_location: Option<Ty<'tcx>>,
2974 codegen_fn_attr_flags: CodegenFnAttrFlags,
2975 // FIXME(eddyb) replace this with something typed, like an `enum`.
2976 force_thin_self_ptr: bool,
2977 ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
2978 debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
2979
2980 let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
2981
2982 let conv = conv_from_spec_abi(self.tcx(), sig.abi);
2983
2984 let mut inputs = sig.inputs();
2985 let extra_args = if sig.abi == RustCall {
2986 assert!(!sig.c_variadic && extra_args.is_empty());
2987
2988 if let Some(input) = sig.inputs().last() {
2989 if let ty::Tuple(tupled_arguments) = input.kind() {
2990 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2991 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2992 } else {
2993 bug!(
2994 "argument to function with \"rust-call\" ABI \
2995 is not a tuple"
2996 );
2997 }
2998 } else {
2999 bug!(
3000 "argument to function with \"rust-call\" ABI \
3001 is not a tuple"
3002 );
3003 }
3004 } else {
3005 assert!(sig.c_variadic || extra_args.is_empty());
3006 extra_args.to_vec()
3007 };
3008
3009 let target = &self.tcx.sess.target;
3010 let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
3011 let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
3012 let linux_s390x_gnu_like =
3013 target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
3014 let linux_sparc64_gnu_like =
3015 target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
3016 let linux_powerpc_gnu_like =
3017 target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
3018 use SpecAbi::*;
3019 let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
3020
3021 // Handle safe Rust thin and fat pointers.
3022 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
3023 scalar: Scalar,
3024 layout: TyAndLayout<'tcx>,
3025 offset: Size,
3026 is_return: bool| {
3027 // Booleans are always an i1 that needs to be zero-extended.
3028 if scalar.is_bool() {
3029 attrs.ext(ArgExtension::Zext);
3030 return;
3031 }
3032
3033 // Only pointer types handled below.
3034 if scalar.value != Pointer {
3035 return;
3036 }
3037
3038 if !scalar.valid_range.contains(0) {
3039 attrs.set(ArgAttribute::NonNull);
3040 }
3041
3042 if let Some(pointee) = layout.pointee_info_at(self, offset) {
3043 if let Some(kind) = pointee.safe {
3044 attrs.pointee_align = Some(pointee.align);
3045
3046 // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
3047 // for the entire duration of the function as they can be deallocated
3048 // at any time. Set their valid size to 0.
3049 attrs.pointee_size = match kind {
3050 PointerKind::UniqueOwned => Size::ZERO,
3051 _ => pointee.size,
3052 };
3053
3054 // `Box` pointer parameters never alias because ownership is transferred
3055 // `&mut` pointer parameters never alias other parameters,
3056 // or mutable global data
3057 //
3058 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
3059 // and can be marked as both `readonly` and `noalias`, as
3060 // LLVM's definition of `noalias` is based solely on memory
3061 // dependencies rather than pointer equality
3062 //
3063 // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
3064 // for UniqueBorrowed arguments, so that the codegen backend can decide whether
3065 // or not to actually emit the attribute. It can also be controlled with the
3066 // `-Zmutable-noalias` debugging option.
3067 let no_alias = match kind {
3068 PointerKind::Shared | PointerKind::UniqueBorrowed => false,
3069 PointerKind::UniqueOwned => true,
3070 PointerKind::Frozen => !is_return,
3071 };
3072 if no_alias {
3073 attrs.set(ArgAttribute::NoAlias);
3074 }
3075
3076 if kind == PointerKind::Frozen && !is_return {
3077 attrs.set(ArgAttribute::ReadOnly);
3078 }
3079
3080 if kind == PointerKind::UniqueBorrowed && !is_return {
3081 attrs.set(ArgAttribute::NoAliasMutRef);
3082 }
3083 }
3084 }
3085 };
3086
3087 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
3088 let is_return = arg_idx.is_none();
3089
3090 let layout = self.layout_of(ty)?;
3091 let layout = if force_thin_self_ptr && arg_idx == Some(0) {
3092 // Don't pass the vtable, it's not an argument of the virtual fn.
3093 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
3094 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
3095 make_thin_self_ptr(self, layout)
3096 } else {
3097 layout
3098 };
3099
3100 let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
3101 let mut attrs = ArgAttributes::new();
3102 adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
3103 attrs
3104 });
3105
3106 if arg.layout.is_zst() {
3107 // For some forsaken reason, x86_64-pc-windows-gnu
3108 // doesn't ignore zero-sized struct arguments.
3109 // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
3110 if is_return
3111 || rust_abi
3112 || (!win_x64_gnu
3113 && !linux_s390x_gnu_like
3114 && !linux_sparc64_gnu_like
3115 && !linux_powerpc_gnu_like)
3116 {
3117 arg.mode = PassMode::Ignore;
3118 }
3119 }
3120
3121 Ok(arg)
3122 };
3123
3124 let mut fn_abi = FnAbi {
3125 ret: arg_of(sig.output(), None)?,
3126 args: inputs
3127 .iter()
3128 .cloned()
3129 .chain(extra_args)
3130 .chain(caller_location)
3131 .enumerate()
3132 .map(|(i, ty)| arg_of(ty, Some(i)))
3133 .collect::<Result<_, _>>()?,
3134 c_variadic: sig.c_variadic,
3135 fixed_count: inputs.len(),
3136 conv,
3137 can_unwind: fn_can_unwind(self.tcx(), codegen_fn_attr_flags, sig.abi),
3138 };
3139 self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
3140 debug!("fn_abi_new_uncached = {:?}", fn_abi);
3141 Ok(self.tcx.arena.alloc(fn_abi))
3142 }
3143
fn_abi_adjust_for_abi( &self, fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>, abi: SpecAbi, ) -> Result<(), FnAbiError<'tcx>>3144 fn fn_abi_adjust_for_abi(
3145 &self,
3146 fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
3147 abi: SpecAbi,
3148 ) -> Result<(), FnAbiError<'tcx>> {
3149 if abi == SpecAbi::Unadjusted {
3150 return Ok(());
3151 }
3152
3153 if abi == SpecAbi::Rust
3154 || abi == SpecAbi::RustCall
3155 || abi == SpecAbi::RustIntrinsic
3156 || abi == SpecAbi::PlatformIntrinsic
3157 {
3158 let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
3159 if arg.is_ignore() {
3160 return;
3161 }
3162
3163 match arg.layout.abi {
3164 Abi::Aggregate { .. } => {}
3165
3166 // This is a fun case! The gist of what this is doing is
3167 // that we want callers and callees to always agree on the
3168 // ABI of how they pass SIMD arguments. If we were to *not*
3169 // make these arguments indirect then they'd be immediates
3170 // in LLVM, which means that they'd used whatever the
3171 // appropriate ABI is for the callee and the caller. That
3172 // means, for example, if the caller doesn't have AVX
3173 // enabled but the callee does, then passing an AVX argument
3174 // across this boundary would cause corrupt data to show up.
3175 //
3176 // This problem is fixed by unconditionally passing SIMD
3177 // arguments through memory between callers and callees
3178 // which should get them all to agree on ABI regardless of
3179 // target feature sets. Some more information about this
3180 // issue can be found in #44367.
3181 //
3182 // Note that the platform intrinsic ABI is exempt here as
3183 // that's how we connect up to LLVM and it's unstable
3184 // anyway, we control all calls to it in libstd.
3185 Abi::Vector { .. }
3186 if abi != SpecAbi::PlatformIntrinsic
3187 && self.tcx.sess.target.simd_types_indirect =>
3188 {
3189 arg.make_indirect();
3190 return;
3191 }
3192
3193 _ => return,
3194 }
3195
3196 // Pass and return structures up to 2 pointers in size by value, matching `ScalarPair`.
3197 // LLVM will usually pass these in 2 registers, which is more efficient than by-ref.
3198 let max_by_val_size = Pointer.size(self) * 2;
3199 let size = arg.layout.size;
3200
3201 if arg.layout.is_unsized() || size > max_by_val_size {
3202 arg.make_indirect();
3203 } else {
3204 // We want to pass small aggregates as immediates, but using
3205 // a LLVM aggregate type for this leads to bad optimizations,
3206 // so we pick an appropriately sized integer type instead.
3207 arg.cast_to(Reg { kind: RegKind::Integer, size });
3208 }
3209 };
3210 fixup(&mut fn_abi.ret);
3211 for arg in &mut fn_abi.args {
3212 fixup(arg);
3213 }
3214 } else {
3215 fn_abi.adjust_for_foreign_abi(self, abi)?;
3216 }
3217
3218 Ok(())
3219 }
3220 }
3221
make_thin_self_ptr<'tcx>( cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>), layout: TyAndLayout<'tcx>, ) -> TyAndLayout<'tcx>3222 fn make_thin_self_ptr<'tcx>(
3223 cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
3224 layout: TyAndLayout<'tcx>,
3225 ) -> TyAndLayout<'tcx> {
3226 let tcx = cx.tcx();
3227 let fat_pointer_ty = if layout.is_unsized() {
3228 // unsized `self` is passed as a pointer to `self`
3229 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
3230 tcx.mk_mut_ptr(layout.ty)
3231 } else {
3232 match layout.abi {
3233 Abi::ScalarPair(..) => (),
3234 _ => bug!("receiver type has unsupported layout: {:?}", layout),
3235 }
3236
3237 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
3238 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
3239 // elsewhere in the compiler as a method on a `dyn Trait`.
3240 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
3241 // get a built-in pointer type
3242 let mut fat_pointer_layout = layout;
3243 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
3244 && !fat_pointer_layout.ty.is_region_ptr()
3245 {
3246 for i in 0..fat_pointer_layout.fields.count() {
3247 let field_layout = fat_pointer_layout.field(cx, i);
3248
3249 if !field_layout.is_zst() {
3250 fat_pointer_layout = field_layout;
3251 continue 'descend_newtypes;
3252 }
3253 }
3254
3255 bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
3256 }
3257
3258 fat_pointer_layout.ty
3259 };
3260
3261 // we now have a type like `*mut RcBox<dyn Trait>`
3262 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
3263 // this is understood as a special case elsewhere in the compiler
3264 let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
3265
3266 TyAndLayout {
3267 ty: fat_pointer_ty,
3268
3269 // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
3270 // should always work because the type is always `*mut ()`.
3271 ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
3272 }
3273 }
3274