1 pub use Integer::*;
2 pub use Primitive::*;
3 
4 use crate::spec::Target;
5 
6 use std::convert::{TryFrom, TryInto};
7 use std::fmt;
8 use std::iter::Step;
9 use std::num::NonZeroUsize;
10 use std::ops::{Add, AddAssign, Deref, Mul, RangeInclusive, Sub};
11 use std::str::FromStr;
12 
13 use rustc_index::vec::{Idx, IndexVec};
14 use rustc_macros::HashStable_Generic;
15 use rustc_serialize::json::{Json, ToJson};
16 
17 pub mod call;
18 
19 /// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
20 /// for a target, which contains everything needed to compute layouts.
21 pub struct TargetDataLayout {
22     pub endian: Endian,
23     pub i1_align: AbiAndPrefAlign,
24     pub i8_align: AbiAndPrefAlign,
25     pub i16_align: AbiAndPrefAlign,
26     pub i32_align: AbiAndPrefAlign,
27     pub i64_align: AbiAndPrefAlign,
28     pub i128_align: AbiAndPrefAlign,
29     pub f32_align: AbiAndPrefAlign,
30     pub f64_align: AbiAndPrefAlign,
31     pub pointer_size: Size,
32     pub pointer_align: AbiAndPrefAlign,
33     pub aggregate_align: AbiAndPrefAlign,
34 
35     /// Alignments for vector types.
36     pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
37 
38     pub instruction_address_space: AddressSpace,
39 
40     /// Minimum size of #[repr(C)] enums (default I32 bits)
41     pub c_enum_min_size: Integer,
42 }
43 
44 impl Default for TargetDataLayout {
45     /// Creates an instance of `TargetDataLayout`.
default() -> TargetDataLayout46     fn default() -> TargetDataLayout {
47         let align = |bits| Align::from_bits(bits).unwrap();
48         TargetDataLayout {
49             endian: Endian::Big,
50             i1_align: AbiAndPrefAlign::new(align(8)),
51             i8_align: AbiAndPrefAlign::new(align(8)),
52             i16_align: AbiAndPrefAlign::new(align(16)),
53             i32_align: AbiAndPrefAlign::new(align(32)),
54             i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
55             i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
56             f32_align: AbiAndPrefAlign::new(align(32)),
57             f64_align: AbiAndPrefAlign::new(align(64)),
58             pointer_size: Size::from_bits(64),
59             pointer_align: AbiAndPrefAlign::new(align(64)),
60             aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
61             vector_align: vec![
62                 (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
63                 (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
64             ],
65             instruction_address_space: AddressSpace::DATA,
66             c_enum_min_size: Integer::I32,
67         }
68     }
69 }
70 
71 impl TargetDataLayout {
parse(target: &Target) -> Result<TargetDataLayout, String>72     pub fn parse(target: &Target) -> Result<TargetDataLayout, String> {
73         // Parse an address space index from a string.
74         let parse_address_space = |s: &str, cause: &str| {
75             s.parse::<u32>().map(AddressSpace).map_err(|err| {
76                 format!("invalid address space `{}` for `{}` in \"data-layout\": {}", s, cause, err)
77             })
78         };
79 
80         // Parse a bit count from a string.
81         let parse_bits = |s: &str, kind: &str, cause: &str| {
82             s.parse::<u64>().map_err(|err| {
83                 format!("invalid {} `{}` for `{}` in \"data-layout\": {}", kind, s, cause, err)
84             })
85         };
86 
87         // Parse a size string.
88         let size = |s: &str, cause: &str| parse_bits(s, "size", cause).map(Size::from_bits);
89 
90         // Parse an alignment string.
91         let align = |s: &[&str], cause: &str| {
92             if s.is_empty() {
93                 return Err(format!("missing alignment for `{}` in \"data-layout\"", cause));
94             }
95             let align_from_bits = |bits| {
96                 Align::from_bits(bits).map_err(|err| {
97                     format!("invalid alignment for `{}` in \"data-layout\": {}", cause, err)
98                 })
99             };
100             let abi = parse_bits(s[0], "alignment", cause)?;
101             let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
102             Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
103         };
104 
105         let mut dl = TargetDataLayout::default();
106         let mut i128_align_src = 64;
107         for spec in target.data_layout.split('-') {
108             let spec_parts = spec.split(':').collect::<Vec<_>>();
109 
110             match &*spec_parts {
111                 ["e"] => dl.endian = Endian::Little,
112                 ["E"] => dl.endian = Endian::Big,
113                 [p] if p.starts_with('P') => {
114                     dl.instruction_address_space = parse_address_space(&p[1..], "P")?
115                 }
116                 ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
117                 ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
118                 ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
119                 [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
120                     dl.pointer_size = size(s, p)?;
121                     dl.pointer_align = align(a, p)?;
122                 }
123                 [s, ref a @ ..] if s.starts_with('i') => {
124                     let bits = match s[1..].parse::<u64>() {
125                         Ok(bits) => bits,
126                         Err(_) => {
127                             size(&s[1..], "i")?; // For the user error.
128                             continue;
129                         }
130                     };
131                     let a = align(a, s)?;
132                     match bits {
133                         1 => dl.i1_align = a,
134                         8 => dl.i8_align = a,
135                         16 => dl.i16_align = a,
136                         32 => dl.i32_align = a,
137                         64 => dl.i64_align = a,
138                         _ => {}
139                     }
140                     if bits >= i128_align_src && bits <= 128 {
141                         // Default alignment for i128 is decided by taking the alignment of
142                         // largest-sized i{64..=128}.
143                         i128_align_src = bits;
144                         dl.i128_align = a;
145                     }
146                 }
147                 [s, ref a @ ..] if s.starts_with('v') => {
148                     let v_size = size(&s[1..], "v")?;
149                     let a = align(a, s)?;
150                     if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
151                         v.1 = a;
152                         continue;
153                     }
154                     // No existing entry, add a new one.
155                     dl.vector_align.push((v_size, a));
156                 }
157                 _ => {} // Ignore everything else.
158             }
159         }
160 
161         // Perform consistency checks against the Target information.
162         if dl.endian != target.endian {
163             return Err(format!(
164                 "inconsistent target specification: \"data-layout\" claims \
165                  architecture is {}-endian, while \"target-endian\" is `{}`",
166                 dl.endian.as_str(),
167                 target.endian.as_str(),
168             ));
169         }
170 
171         if dl.pointer_size.bits() != target.pointer_width.into() {
172             return Err(format!(
173                 "inconsistent target specification: \"data-layout\" claims \
174                  pointers are {}-bit, while \"target-pointer-width\" is `{}`",
175                 dl.pointer_size.bits(),
176                 target.pointer_width
177             ));
178         }
179 
180         dl.c_enum_min_size = Integer::from_size(Size::from_bits(target.c_enum_min_bits))?;
181 
182         Ok(dl)
183     }
184 
185     /// Returns exclusive upper bound on object size.
186     ///
187     /// The theoretical maximum object size is defined as the maximum positive `isize` value.
188     /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
189     /// index every address within an object along with one byte past the end, along with allowing
190     /// `isize` to store the difference between any two pointers into an object.
191     ///
192     /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer
193     /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is
194     /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable
195     /// address space on 64-bit ARMv8 and x86_64.
196     #[inline]
obj_size_bound(&self) -> u64197     pub fn obj_size_bound(&self) -> u64 {
198         match self.pointer_size.bits() {
199             16 => 1 << 15,
200             32 => 1 << 31,
201             64 => 1 << 47,
202             bits => panic!("obj_size_bound: unknown pointer bit size {}", bits),
203         }
204     }
205 
206     #[inline]
ptr_sized_integer(&self) -> Integer207     pub fn ptr_sized_integer(&self) -> Integer {
208         match self.pointer_size.bits() {
209             16 => I16,
210             32 => I32,
211             64 => I64,
212             bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits),
213         }
214     }
215 
216     #[inline]
vector_align(&self, vec_size: Size) -> AbiAndPrefAlign217     pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
218         for &(size, align) in &self.vector_align {
219             if size == vec_size {
220                 return align;
221             }
222         }
223         // Default to natural alignment, which is what LLVM does.
224         // That is, use the size, rounded up to a power of 2.
225         AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
226     }
227 }
228 
229 pub trait HasDataLayout {
data_layout(&self) -> &TargetDataLayout230     fn data_layout(&self) -> &TargetDataLayout;
231 }
232 
233 impl HasDataLayout for TargetDataLayout {
234     #[inline]
data_layout(&self) -> &TargetDataLayout235     fn data_layout(&self) -> &TargetDataLayout {
236         self
237     }
238 }
239 
240 /// Endianness of the target, which must match cfg(target-endian).
241 #[derive(Copy, Clone, PartialEq)]
242 pub enum Endian {
243     Little,
244     Big,
245 }
246 
247 impl Endian {
as_str(&self) -> &'static str248     pub fn as_str(&self) -> &'static str {
249         match self {
250             Self::Little => "little",
251             Self::Big => "big",
252         }
253     }
254 }
255 
256 impl fmt::Debug for Endian {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result257     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
258         f.write_str(self.as_str())
259     }
260 }
261 
262 impl FromStr for Endian {
263     type Err = String;
264 
from_str(s: &str) -> Result<Self, Self::Err>265     fn from_str(s: &str) -> Result<Self, Self::Err> {
266         match s {
267             "little" => Ok(Self::Little),
268             "big" => Ok(Self::Big),
269             _ => Err(format!(r#"unknown endian: "{}""#, s)),
270         }
271     }
272 }
273 
274 impl ToJson for Endian {
to_json(&self) -> Json275     fn to_json(&self) -> Json {
276         self.as_str().to_json()
277     }
278 }
279 
280 /// Size of a type in bytes.
281 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Encodable, Decodable)]
282 #[derive(HashStable_Generic)]
283 pub struct Size {
284     // The top 3 bits are ALWAYS zero.
285     raw: u64,
286 }
287 
288 impl Size {
289     pub const ZERO: Size = Size { raw: 0 };
290 
291     /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
292     /// is not aligned.
from_bits(bits: impl TryInto<u64>) -> Size293     pub fn from_bits(bits: impl TryInto<u64>) -> Size {
294         let bits = bits.try_into().ok().unwrap();
295 
296         #[cold]
297         fn overflow(bits: u64) -> ! {
298             panic!("Size::from_bits({}) has overflowed", bits);
299         }
300 
301         // This is the largest value of `bits` that does not cause overflow
302         // during rounding, and guarantees that the resulting number of bytes
303         // cannot cause overflow when multiplied by 8.
304         if bits > 0xffff_ffff_ffff_fff8 {
305             overflow(bits);
306         }
307 
308         // Avoid potential overflow from `bits + 7`.
309         Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
310     }
311 
312     #[inline]
from_bytes(bytes: impl TryInto<u64>) -> Size313     pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
314         let bytes: u64 = bytes.try_into().ok().unwrap();
315         Size { raw: bytes }
316     }
317 
318     #[inline]
bytes(self) -> u64319     pub fn bytes(self) -> u64 {
320         self.raw
321     }
322 
323     #[inline]
bytes_usize(self) -> usize324     pub fn bytes_usize(self) -> usize {
325         self.bytes().try_into().unwrap()
326     }
327 
328     #[inline]
bits(self) -> u64329     pub fn bits(self) -> u64 {
330         self.raw << 3
331     }
332 
333     #[inline]
bits_usize(self) -> usize334     pub fn bits_usize(self) -> usize {
335         self.bits().try_into().unwrap()
336     }
337 
338     #[inline]
align_to(self, align: Align) -> Size339     pub fn align_to(self, align: Align) -> Size {
340         let mask = align.bytes() - 1;
341         Size::from_bytes((self.bytes() + mask) & !mask)
342     }
343 
344     #[inline]
is_aligned(self, align: Align) -> bool345     pub fn is_aligned(self, align: Align) -> bool {
346         let mask = align.bytes() - 1;
347         self.bytes() & mask == 0
348     }
349 
350     #[inline]
checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size>351     pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
352         let dl = cx.data_layout();
353 
354         let bytes = self.bytes().checked_add(offset.bytes())?;
355 
356         if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
357     }
358 
359     #[inline]
checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size>360     pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
361         let dl = cx.data_layout();
362 
363         let bytes = self.bytes().checked_mul(count)?;
364         if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
365     }
366 
367     /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
368     /// (i.e., if it is negative, fill with 1's on the left).
369     #[inline]
sign_extend(self, value: u128) -> u128370     pub fn sign_extend(self, value: u128) -> u128 {
371         let size = self.bits();
372         if size == 0 {
373             // Truncated until nothing is left.
374             return 0;
375         }
376         // Sign-extend it.
377         let shift = 128 - size;
378         // Shift the unsigned value to the left, then shift back to the right as signed
379         // (essentially fills with sign bit on the left).
380         (((value << shift) as i128) >> shift) as u128
381     }
382 
383     /// Truncates `value` to `self` bits.
384     #[inline]
truncate(self, value: u128) -> u128385     pub fn truncate(self, value: u128) -> u128 {
386         let size = self.bits();
387         if size == 0 {
388             // Truncated until nothing is left.
389             return 0;
390         }
391         let shift = 128 - size;
392         // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
393         (value << shift) >> shift
394     }
395 
396     #[inline]
signed_int_min(&self) -> i128397     pub fn signed_int_min(&self) -> i128 {
398         self.sign_extend(1_u128 << (self.bits() - 1)) as i128
399     }
400 
401     #[inline]
signed_int_max(&self) -> i128402     pub fn signed_int_max(&self) -> i128 {
403         i128::MAX >> (128 - self.bits())
404     }
405 
406     #[inline]
unsigned_int_max(&self) -> u128407     pub fn unsigned_int_max(&self) -> u128 {
408         u128::MAX >> (128 - self.bits())
409     }
410 }
411 
412 // Panicking addition, subtraction and multiplication for convenience.
413 // Avoid during layout computation, return `LayoutError` instead.
414 
415 impl Add for Size {
416     type Output = Size;
417     #[inline]
add(self, other: Size) -> Size418     fn add(self, other: Size) -> Size {
419         Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
420             panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
421         }))
422     }
423 }
424 
425 impl Sub for Size {
426     type Output = Size;
427     #[inline]
sub(self, other: Size) -> Size428     fn sub(self, other: Size) -> Size {
429         Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
430             panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
431         }))
432     }
433 }
434 
435 impl Mul<Size> for u64 {
436     type Output = Size;
437     #[inline]
mul(self, size: Size) -> Size438     fn mul(self, size: Size) -> Size {
439         size * self
440     }
441 }
442 
443 impl Mul<u64> for Size {
444     type Output = Size;
445     #[inline]
mul(self, count: u64) -> Size446     fn mul(self, count: u64) -> Size {
447         match self.bytes().checked_mul(count) {
448             Some(bytes) => Size::from_bytes(bytes),
449             None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
450         }
451     }
452 }
453 
454 impl AddAssign for Size {
455     #[inline]
add_assign(&mut self, other: Size)456     fn add_assign(&mut self, other: Size) {
457         *self = *self + other;
458     }
459 }
460 
461 impl Step for Size {
462     #[inline]
steps_between(start: &Self, end: &Self) -> Option<usize>463     fn steps_between(start: &Self, end: &Self) -> Option<usize> {
464         u64::steps_between(&start.bytes(), &end.bytes())
465     }
466 
467     #[inline]
forward_checked(start: Self, count: usize) -> Option<Self>468     fn forward_checked(start: Self, count: usize) -> Option<Self> {
469         u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
470     }
471 
472     #[inline]
forward(start: Self, count: usize) -> Self473     fn forward(start: Self, count: usize) -> Self {
474         Self::from_bytes(u64::forward(start.bytes(), count))
475     }
476 
477     #[inline]
forward_unchecked(start: Self, count: usize) -> Self478     unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
479         Self::from_bytes(u64::forward_unchecked(start.bytes(), count))
480     }
481 
482     #[inline]
backward_checked(start: Self, count: usize) -> Option<Self>483     fn backward_checked(start: Self, count: usize) -> Option<Self> {
484         u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
485     }
486 
487     #[inline]
backward(start: Self, count: usize) -> Self488     fn backward(start: Self, count: usize) -> Self {
489         Self::from_bytes(u64::backward(start.bytes(), count))
490     }
491 
492     #[inline]
backward_unchecked(start: Self, count: usize) -> Self493     unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
494         Self::from_bytes(u64::backward_unchecked(start.bytes(), count))
495     }
496 }
497 
498 /// Alignment of a type in bytes (always a power of two).
499 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Encodable, Decodable)]
500 #[derive(HashStable_Generic)]
501 pub struct Align {
502     pow2: u8,
503 }
504 
505 impl Align {
506     pub const ONE: Align = Align { pow2: 0 };
507 
508     #[inline]
from_bits(bits: u64) -> Result<Align, String>509     pub fn from_bits(bits: u64) -> Result<Align, String> {
510         Align::from_bytes(Size::from_bits(bits).bytes())
511     }
512 
513     #[inline]
from_bytes(align: u64) -> Result<Align, String>514     pub fn from_bytes(align: u64) -> Result<Align, String> {
515         // Treat an alignment of 0 bytes like 1-byte alignment.
516         if align == 0 {
517             return Ok(Align::ONE);
518         }
519 
520         #[cold]
521         fn not_power_of_2(align: u64) -> String {
522             format!("`{}` is not a power of 2", align)
523         }
524 
525         #[cold]
526         fn too_large(align: u64) -> String {
527             format!("`{}` is too large", align)
528         }
529 
530         let mut bytes = align;
531         let mut pow2: u8 = 0;
532         while (bytes & 1) == 0 {
533             pow2 += 1;
534             bytes >>= 1;
535         }
536         if bytes != 1 {
537             return Err(not_power_of_2(align));
538         }
539         if pow2 > 29 {
540             return Err(too_large(align));
541         }
542 
543         Ok(Align { pow2 })
544     }
545 
546     #[inline]
bytes(self) -> u64547     pub fn bytes(self) -> u64 {
548         1 << self.pow2
549     }
550 
551     #[inline]
bits(self) -> u64552     pub fn bits(self) -> u64 {
553         self.bytes() * 8
554     }
555 
556     /// Computes the best alignment possible for the given offset
557     /// (the largest power of two that the offset is a multiple of).
558     ///
559     /// N.B., for an offset of `0`, this happens to return `2^64`.
560     #[inline]
max_for_offset(offset: Size) -> Align561     pub fn max_for_offset(offset: Size) -> Align {
562         Align { pow2: offset.bytes().trailing_zeros() as u8 }
563     }
564 
565     /// Lower the alignment, if necessary, such that the given offset
566     /// is aligned to it (the offset is a multiple of the alignment).
567     #[inline]
restrict_for_offset(self, offset: Size) -> Align568     pub fn restrict_for_offset(self, offset: Size) -> Align {
569         self.min(Align::max_for_offset(offset))
570     }
571 }
572 
573 /// A pair of alignments, ABI-mandated and preferred.
574 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Encodable, Decodable)]
575 #[derive(HashStable_Generic)]
576 pub struct AbiAndPrefAlign {
577     pub abi: Align,
578     pub pref: Align,
579 }
580 
581 impl AbiAndPrefAlign {
582     #[inline]
new(align: Align) -> AbiAndPrefAlign583     pub fn new(align: Align) -> AbiAndPrefAlign {
584         AbiAndPrefAlign { abi: align, pref: align }
585     }
586 
587     #[inline]
min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign588     pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
589         AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
590     }
591 
592     #[inline]
max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign593     pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
594         AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
595     }
596 }
597 
598 /// Integers, also used for enum discriminants.
599 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, HashStable_Generic)]
600 pub enum Integer {
601     I8,
602     I16,
603     I32,
604     I64,
605     I128,
606 }
607 
608 impl Integer {
609     #[inline]
size(self) -> Size610     pub fn size(self) -> Size {
611         match self {
612             I8 => Size::from_bytes(1),
613             I16 => Size::from_bytes(2),
614             I32 => Size::from_bytes(4),
615             I64 => Size::from_bytes(8),
616             I128 => Size::from_bytes(16),
617         }
618     }
619 
align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign620     pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
621         let dl = cx.data_layout();
622 
623         match self {
624             I8 => dl.i8_align,
625             I16 => dl.i16_align,
626             I32 => dl.i32_align,
627             I64 => dl.i64_align,
628             I128 => dl.i128_align,
629         }
630     }
631 
632     /// Finds the smallest Integer type which can represent the signed value.
633     #[inline]
fit_signed(x: i128) -> Integer634     pub fn fit_signed(x: i128) -> Integer {
635         match x {
636             -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
637             -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
638             -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
639             -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
640             _ => I128,
641         }
642     }
643 
644     /// Finds the smallest Integer type which can represent the unsigned value.
645     #[inline]
fit_unsigned(x: u128) -> Integer646     pub fn fit_unsigned(x: u128) -> Integer {
647         match x {
648             0..=0x0000_0000_0000_00ff => I8,
649             0..=0x0000_0000_0000_ffff => I16,
650             0..=0x0000_0000_ffff_ffff => I32,
651             0..=0xffff_ffff_ffff_ffff => I64,
652             _ => I128,
653         }
654     }
655 
656     /// Finds the smallest integer with the given alignment.
for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer>657     pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
658         let dl = cx.data_layout();
659 
660         for candidate in [I8, I16, I32, I64, I128] {
661             if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
662                 return Some(candidate);
663             }
664         }
665         None
666     }
667 
668     /// Find the largest integer with the given alignment or less.
approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer669     pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
670         let dl = cx.data_layout();
671 
672         // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
673         for candidate in [I64, I32, I16] {
674             if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
675                 return candidate;
676             }
677         }
678         I8
679     }
680 
681     // FIXME(eddyb) consolidate this and other methods that find the appropriate
682     // `Integer` given some requirements.
683     #[inline]
from_size(size: Size) -> Result<Self, String>684     fn from_size(size: Size) -> Result<Self, String> {
685         match size.bits() {
686             8 => Ok(Integer::I8),
687             16 => Ok(Integer::I16),
688             32 => Ok(Integer::I32),
689             64 => Ok(Integer::I64),
690             128 => Ok(Integer::I128),
691             _ => Err(format!("rust does not support integers with {} bits", size.bits())),
692         }
693     }
694 }
695 
696 /// Fundamental unit of memory access and layout.
697 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
698 pub enum Primitive {
699     /// The `bool` is the signedness of the `Integer` type.
700     ///
701     /// One would think we would not care about such details this low down,
702     /// but some ABIs are described in terms of C types and ISAs where the
703     /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
704     /// a negative integer passed by zero-extension will appear positive in
705     /// the callee, and most operations on it will produce the wrong values.
706     Int(Integer, bool),
707     F32,
708     F64,
709     Pointer,
710 }
711 
712 impl Primitive {
size<C: HasDataLayout>(self, cx: &C) -> Size713     pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
714         let dl = cx.data_layout();
715 
716         match self {
717             Int(i, _) => i.size(),
718             F32 => Size::from_bits(32),
719             F64 => Size::from_bits(64),
720             Pointer => dl.pointer_size,
721         }
722     }
723 
align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign724     pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
725         let dl = cx.data_layout();
726 
727         match self {
728             Int(i, _) => i.align(dl),
729             F32 => dl.f32_align,
730             F64 => dl.f64_align,
731             Pointer => dl.pointer_align,
732         }
733     }
734 
735     // FIXME(eddyb) remove, it's trivial thanks to `matches!`.
736     #[inline]
is_float(self) -> bool737     pub fn is_float(self) -> bool {
738         matches!(self, F32 | F64)
739     }
740 
741     // FIXME(eddyb) remove, it's completely unused.
742     #[inline]
is_int(self) -> bool743     pub fn is_int(self) -> bool {
744         matches!(self, Int(..))
745     }
746 }
747 
748 /// Inclusive wrap-around range of valid values, that is, if
749 /// start > end, it represents `start..=MAX`,
750 /// followed by `0..=end`.
751 ///
752 /// That is, for an i8 primitive, a range of `254..=2` means following
753 /// sequence:
754 ///
755 ///    254 (-2), 255 (-1), 0, 1, 2
756 ///
757 /// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
758 #[derive(Clone, Copy, PartialEq, Eq, Hash)]
759 #[derive(HashStable_Generic)]
760 pub struct WrappingRange {
761     pub start: u128,
762     pub end: u128,
763 }
764 
765 impl WrappingRange {
766     /// Returns `true` if `v` is contained in the range.
767     #[inline(always)]
contains(&self, v: u128) -> bool768     pub fn contains(&self, v: u128) -> bool {
769         if self.start <= self.end {
770             self.start <= v && v <= self.end
771         } else {
772             self.start <= v || v <= self.end
773         }
774     }
775 
776     /// Returns `self` with replaced `start`
777     #[inline(always)]
with_start(mut self, start: u128) -> Self778     pub fn with_start(mut self, start: u128) -> Self {
779         self.start = start;
780         self
781     }
782 
783     /// Returns `self` with replaced `end`
784     #[inline(always)]
with_end(mut self, end: u128) -> Self785     pub fn with_end(mut self, end: u128) -> Self {
786         self.end = end;
787         self
788     }
789 
790     /// Returns `true` if `size` completely fills the range.
791     #[inline]
is_full_for(&self, size: Size) -> bool792     pub fn is_full_for(&self, size: Size) -> bool {
793         let max_value = size.unsigned_int_max();
794         debug_assert!(self.start <= max_value && self.end <= max_value);
795         self.start == (self.end.wrapping_add(1) & max_value)
796     }
797 }
798 
799 impl fmt::Debug for WrappingRange {
fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result800     fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
801         if self.start > self.end {
802             write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
803         } else {
804             write!(fmt, "{}..={}", self.start, self.end)?;
805         }
806         Ok(())
807     }
808 }
809 
810 /// Information about one scalar component of a Rust type.
811 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
812 #[derive(HashStable_Generic)]
813 pub struct Scalar {
814     pub value: Primitive,
815 
816     // FIXME(eddyb) always use the shortest range, e.g., by finding
817     // the largest space between two consecutive valid values and
818     // taking everything else as the (shortest) valid range.
819     pub valid_range: WrappingRange,
820 }
821 
822 impl Scalar {
823     #[inline]
is_bool(&self) -> bool824     pub fn is_bool(&self) -> bool {
825         matches!(
826             self,
827             Scalar { value: Int(I8, false), valid_range: WrappingRange { start: 0, end: 1 } }
828         )
829     }
830 
831     /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
832     #[inline]
is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool833     pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
834         self.valid_range.is_full_for(self.value.size(cx))
835     }
836 }
837 
838 /// Describes how the fields of a type are located in memory.
839 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
840 pub enum FieldsShape {
841     /// Scalar primitives and `!`, which never have fields.
842     Primitive,
843 
844     /// All fields start at no offset. The `usize` is the field count.
845     Union(NonZeroUsize),
846 
847     /// Array/vector-like placement, with all fields of identical types.
848     Array { stride: Size, count: u64 },
849 
850     /// Struct-like placement, with precomputed offsets.
851     ///
852     /// Fields are guaranteed to not overlap, but note that gaps
853     /// before, between and after all the fields are NOT always
854     /// padding, and as such their contents may not be discarded.
855     /// For example, enum variants leave a gap at the start,
856     /// where the discriminant field in the enum layout goes.
857     Arbitrary {
858         /// Offsets for the first byte of each field,
859         /// ordered to match the source definition order.
860         /// This vector does not go in increasing order.
861         // FIXME(eddyb) use small vector optimization for the common case.
862         offsets: Vec<Size>,
863 
864         /// Maps source order field indices to memory order indices,
865         /// depending on how the fields were reordered (if at all).
866         /// This is a permutation, with both the source order and the
867         /// memory order using the same (0..n) index ranges.
868         ///
869         /// Note that during computation of `memory_index`, sometimes
870         /// it is easier to operate on the inverse mapping (that is,
871         /// from memory order to source order), and that is usually
872         /// named `inverse_memory_index`.
873         ///
874         // FIXME(eddyb) build a better abstraction for permutations, if possible.
875         // FIXME(camlorn) also consider small vector  optimization here.
876         memory_index: Vec<u32>,
877     },
878 }
879 
880 impl FieldsShape {
881     #[inline]
count(&self) -> usize882     pub fn count(&self) -> usize {
883         match *self {
884             FieldsShape::Primitive => 0,
885             FieldsShape::Union(count) => count.get(),
886             FieldsShape::Array { count, .. } => count.try_into().unwrap(),
887             FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
888         }
889     }
890 
891     #[inline]
offset(&self, i: usize) -> Size892     pub fn offset(&self, i: usize) -> Size {
893         match *self {
894             FieldsShape::Primitive => {
895                 unreachable!("FieldsShape::offset: `Primitive`s have no fields")
896             }
897             FieldsShape::Union(count) => {
898                 assert!(
899                     i < count.get(),
900                     "tried to access field {} of union with {} fields",
901                     i,
902                     count
903                 );
904                 Size::ZERO
905             }
906             FieldsShape::Array { stride, count } => {
907                 let i = u64::try_from(i).unwrap();
908                 assert!(i < count);
909                 stride * i
910             }
911             FieldsShape::Arbitrary { ref offsets, .. } => offsets[i],
912         }
913     }
914 
915     #[inline]
memory_index(&self, i: usize) -> usize916     pub fn memory_index(&self, i: usize) -> usize {
917         match *self {
918             FieldsShape::Primitive => {
919                 unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
920             }
921             FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
922             FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(),
923         }
924     }
925 
926     /// Gets source indices of the fields by increasing offsets.
927     #[inline]
index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a928     pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
929         let mut inverse_small = [0u8; 64];
930         let mut inverse_big = vec![];
931         let use_small = self.count() <= inverse_small.len();
932 
933         // We have to write this logic twice in order to keep the array small.
934         if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
935             if use_small {
936                 for i in 0..self.count() {
937                     inverse_small[memory_index[i] as usize] = i as u8;
938                 }
939             } else {
940                 inverse_big = vec![0; self.count()];
941                 for i in 0..self.count() {
942                     inverse_big[memory_index[i] as usize] = i as u32;
943                 }
944             }
945         }
946 
947         (0..self.count()).map(move |i| match *self {
948             FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
949             FieldsShape::Arbitrary { .. } => {
950                 if use_small {
951                     inverse_small[i] as usize
952                 } else {
953                     inverse_big[i] as usize
954                 }
955             }
956         })
957     }
958 }
959 
960 /// An identifier that specifies the address space that some operation
961 /// should operate on. Special address spaces have an effect on code generation,
962 /// depending on the target and the address spaces it implements.
963 #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
964 pub struct AddressSpace(pub u32);
965 
966 impl AddressSpace {
967     /// The default address space, corresponding to data space.
968     pub const DATA: Self = AddressSpace(0);
969 }
970 
971 /// Describes how values of the type are passed by target ABIs,
972 /// in terms of categories of C types there are ABI rules for.
973 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
974 pub enum Abi {
975     Uninhabited,
976     Scalar(Scalar),
977     ScalarPair(Scalar, Scalar),
978     Vector {
979         element: Scalar,
980         count: u64,
981     },
982     Aggregate {
983         /// If true, the size is exact, otherwise it's only a lower bound.
984         sized: bool,
985     },
986 }
987 
988 impl Abi {
989     /// Returns `true` if the layout corresponds to an unsized type.
990     #[inline]
is_unsized(&self) -> bool991     pub fn is_unsized(&self) -> bool {
992         match *self {
993             Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
994             Abi::Aggregate { sized } => !sized,
995         }
996     }
997 
998     /// Returns `true` if this is a single signed integer scalar
999     #[inline]
is_signed(&self) -> bool1000     pub fn is_signed(&self) -> bool {
1001         match self {
1002             Abi::Scalar(scal) => match scal.value {
1003                 Primitive::Int(_, signed) => signed,
1004                 _ => false,
1005             },
1006             _ => panic!("`is_signed` on non-scalar ABI {:?}", self),
1007         }
1008     }
1009 
1010     /// Returns `true` if this is an uninhabited type
1011     #[inline]
is_uninhabited(&self) -> bool1012     pub fn is_uninhabited(&self) -> bool {
1013         matches!(*self, Abi::Uninhabited)
1014     }
1015 
1016     /// Returns `true` is this is a scalar type
1017     #[inline]
is_scalar(&self) -> bool1018     pub fn is_scalar(&self) -> bool {
1019         matches!(*self, Abi::Scalar(_))
1020     }
1021 }
1022 
1023 rustc_index::newtype_index! {
1024     pub struct VariantIdx {
1025         derive [HashStable_Generic]
1026     }
1027 }
1028 
1029 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
1030 pub enum Variants {
1031     /// Single enum variants, structs/tuples, unions, and all non-ADTs.
1032     Single { index: VariantIdx },
1033 
1034     /// Enum-likes with more than one inhabited variant: each variant comes with
1035     /// a *discriminant* (usually the same as the variant index but the user can
1036     /// assign explicit discriminant values).  That discriminant is encoded
1037     /// as a *tag* on the machine.  The layout of each variant is
1038     /// a struct, and they all have space reserved for the tag.
1039     /// For enums, the tag is the sole field of the layout.
1040     Multiple {
1041         tag: Scalar,
1042         tag_encoding: TagEncoding,
1043         tag_field: usize,
1044         variants: IndexVec<VariantIdx, Layout>,
1045     },
1046 }
1047 
1048 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
1049 pub enum TagEncoding {
1050     /// The tag directly stores the discriminant, but possibly with a smaller layout
1051     /// (so converting the tag to the discriminant can require sign extension).
1052     Direct,
1053 
1054     /// Niche (values invalid for a type) encoding the discriminant:
1055     /// Discriminant and variant index coincide.
1056     /// The variant `dataful_variant` contains a niche at an arbitrary
1057     /// offset (field `tag_field` of the enum), which for a variant with
1058     /// discriminant `d` is set to
1059     /// `(d - niche_variants.start).wrapping_add(niche_start)`.
1060     ///
1061     /// For example, `Option<(usize, &T)>`  is represented such that
1062     /// `None` has a null pointer for the second tuple field, and
1063     /// `Some` is the identity function (with a non-null reference).
1064     Niche {
1065         dataful_variant: VariantIdx,
1066         niche_variants: RangeInclusive<VariantIdx>,
1067         niche_start: u128,
1068     },
1069 }
1070 
1071 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, HashStable_Generic)]
1072 pub struct Niche {
1073     pub offset: Size,
1074     pub scalar: Scalar,
1075 }
1076 
1077 impl Niche {
from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self>1078     pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
1079         let niche = Niche { offset, scalar };
1080         if niche.available(cx) > 0 { Some(niche) } else { None }
1081     }
1082 
available<C: HasDataLayout>(&self, cx: &C) -> u1281083     pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
1084         let Scalar { value, valid_range: v } = self.scalar;
1085         let size = value.size(cx);
1086         assert!(size.bits() <= 128);
1087         let max_value = size.unsigned_int_max();
1088 
1089         // Find out how many values are outside the valid range.
1090         let niche = v.end.wrapping_add(1)..v.start;
1091         niche.end.wrapping_sub(niche.start) & max_value
1092     }
1093 
reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)>1094     pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
1095         assert!(count > 0);
1096 
1097         let Scalar { value, valid_range: v } = self.scalar;
1098         let size = value.size(cx);
1099         assert!(size.bits() <= 128);
1100         let max_value = size.unsigned_int_max();
1101 
1102         let niche = v.end.wrapping_add(1)..v.start;
1103         let available = niche.end.wrapping_sub(niche.start) & max_value;
1104         if count > available {
1105             return None;
1106         }
1107 
1108         // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound.
1109         // Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy the niche of zero.
1110         // This is accomplished by prefering enums with 2 variants(`count==1`) and always taking the shortest path to niche zero.
1111         // Having `None` in niche zero can enable some special optimizations.
1112         //
1113         // Bound selection criteria:
1114         // 1. Select closest to zero given wrapping semantics.
1115         // 2. Avoid moving past zero if possible.
1116         //
1117         // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly.
1118         // If niche zero is already reserved, the selection of bounds are of little interest.
1119         let move_start = |v: WrappingRange| {
1120             let start = v.start.wrapping_sub(count) & max_value;
1121             Some((start, Scalar { value, valid_range: v.with_start(start) }))
1122         };
1123         let move_end = |v: WrappingRange| {
1124             let start = v.end.wrapping_add(1) & max_value;
1125             let end = v.end.wrapping_add(count) & max_value;
1126             Some((start, Scalar { value, valid_range: v.with_end(end) }))
1127         };
1128         let distance_end_zero = max_value - v.end;
1129         if v.start > v.end {
1130             // zero is unavailable because wrapping occurs
1131             move_end(v)
1132         } else if v.start <= distance_end_zero {
1133             if count <= v.start {
1134                 move_start(v)
1135             } else {
1136                 // moved past zero, use other bound
1137                 move_end(v)
1138             }
1139         } else {
1140             let end = v.end.wrapping_add(count) & max_value;
1141             let overshot_zero = (1..=v.end).contains(&end);
1142             if overshot_zero {
1143                 // moved past zero, use other bound
1144                 move_start(v)
1145             } else {
1146                 move_end(v)
1147             }
1148         }
1149     }
1150 }
1151 
1152 #[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
1153 pub struct Layout {
1154     /// Says where the fields are located within the layout.
1155     pub fields: FieldsShape,
1156 
1157     /// Encodes information about multi-variant layouts.
1158     /// Even with `Multiple` variants, a layout still has its own fields! Those are then
1159     /// shared between all variants. One of them will be the discriminant,
1160     /// but e.g. generators can have more.
1161     ///
1162     /// To access all fields of this layout, both `fields` and the fields of the active variant
1163     /// must be taken into account.
1164     pub variants: Variants,
1165 
1166     /// The `abi` defines how this data is passed between functions, and it defines
1167     /// value restrictions via `valid_range`.
1168     ///
1169     /// Note that this is entirely orthogonal to the recursive structure defined by
1170     /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
1171     /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
1172     /// have to be taken into account to find all fields of this layout.
1173     pub abi: Abi,
1174 
1175     /// The leaf scalar with the largest number of invalid values
1176     /// (i.e. outside of its `valid_range`), if it exists.
1177     pub largest_niche: Option<Niche>,
1178 
1179     pub align: AbiAndPrefAlign,
1180     pub size: Size,
1181 }
1182 
1183 impl Layout {
scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self1184     pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
1185         let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
1186         let size = scalar.value.size(cx);
1187         let align = scalar.value.align(cx);
1188         Layout {
1189             variants: Variants::Single { index: VariantIdx::new(0) },
1190             fields: FieldsShape::Primitive,
1191             abi: Abi::Scalar(scalar),
1192             largest_niche,
1193             size,
1194             align,
1195         }
1196     }
1197 }
1198 
1199 /// The layout of a type, alongside the type itself.
1200 /// Provides various type traversal APIs (e.g., recursing into fields).
1201 ///
1202 /// Note that the layout is NOT guaranteed to always be identical
1203 /// to that obtained from `layout_of(ty)`, as we need to produce
1204 /// layouts for which Rust types do not exist, such as enum variants
1205 /// or synthetic fields of enums (i.e., discriminants) and fat pointers.
1206 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable_Generic)]
1207 pub struct TyAndLayout<'a, Ty> {
1208     pub ty: Ty,
1209     pub layout: &'a Layout,
1210 }
1211 
1212 impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
1213     type Target = &'a Layout;
deref(&self) -> &&'a Layout1214     fn deref(&self) -> &&'a Layout {
1215         &self.layout
1216     }
1217 }
1218 
1219 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
1220 pub enum PointerKind {
1221     /// Most general case, we know no restrictions to tell LLVM.
1222     Shared,
1223 
1224     /// `&T` where `T` contains no `UnsafeCell`, is `noalias` and `readonly`.
1225     Frozen,
1226 
1227     /// `&mut T` which is `noalias` but not `readonly`.
1228     UniqueBorrowed,
1229 
1230     /// `Box<T>`, unlike `UniqueBorrowed`, it also has `noalias` on returns.
1231     UniqueOwned,
1232 }
1233 
1234 #[derive(Copy, Clone, Debug)]
1235 pub struct PointeeInfo {
1236     pub size: Size,
1237     pub align: Align,
1238     pub safe: Option<PointerKind>,
1239     pub address_space: AddressSpace,
1240 }
1241 
1242 /// Trait that needs to be implemented by the higher-level type representation
1243 /// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality.
1244 pub trait TyAbiInterface<'a, C>: Sized {
ty_and_layout_for_variant( this: TyAndLayout<'a, Self>, cx: &C, variant_index: VariantIdx, ) -> TyAndLayout<'a, Self>1245     fn ty_and_layout_for_variant(
1246         this: TyAndLayout<'a, Self>,
1247         cx: &C,
1248         variant_index: VariantIdx,
1249     ) -> TyAndLayout<'a, Self>;
ty_and_layout_field(this: TyAndLayout<'a, Self>, cx: &C, i: usize) -> TyAndLayout<'a, Self>1250     fn ty_and_layout_field(this: TyAndLayout<'a, Self>, cx: &C, i: usize) -> TyAndLayout<'a, Self>;
ty_and_layout_pointee_info_at( this: TyAndLayout<'a, Self>, cx: &C, offset: Size, ) -> Option<PointeeInfo>1251     fn ty_and_layout_pointee_info_at(
1252         this: TyAndLayout<'a, Self>,
1253         cx: &C,
1254         offset: Size,
1255     ) -> Option<PointeeInfo>;
1256 }
1257 
1258 impl<'a, Ty> TyAndLayout<'a, Ty> {
for_variant<C>(self, cx: &C, variant_index: VariantIdx) -> Self where Ty: TyAbiInterface<'a, C>,1259     pub fn for_variant<C>(self, cx: &C, variant_index: VariantIdx) -> Self
1260     where
1261         Ty: TyAbiInterface<'a, C>,
1262     {
1263         Ty::ty_and_layout_for_variant(self, cx, variant_index)
1264     }
1265 
field<C>(self, cx: &C, i: usize) -> Self where Ty: TyAbiInterface<'a, C>,1266     pub fn field<C>(self, cx: &C, i: usize) -> Self
1267     where
1268         Ty: TyAbiInterface<'a, C>,
1269     {
1270         Ty::ty_and_layout_field(self, cx, i)
1271     }
1272 
pointee_info_at<C>(self, cx: &C, offset: Size) -> Option<PointeeInfo> where Ty: TyAbiInterface<'a, C>,1273     pub fn pointee_info_at<C>(self, cx: &C, offset: Size) -> Option<PointeeInfo>
1274     where
1275         Ty: TyAbiInterface<'a, C>,
1276     {
1277         Ty::ty_and_layout_pointee_info_at(self, cx, offset)
1278     }
1279 }
1280 
1281 impl<'a, Ty> TyAndLayout<'a, Ty> {
1282     /// Returns `true` if the layout corresponds to an unsized type.
is_unsized(&self) -> bool1283     pub fn is_unsized(&self) -> bool {
1284         self.abi.is_unsized()
1285     }
1286 
1287     /// Returns `true` if the type is a ZST and not unsized.
is_zst(&self) -> bool1288     pub fn is_zst(&self) -> bool {
1289         match self.abi {
1290             Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
1291             Abi::Uninhabited => self.size.bytes() == 0,
1292             Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
1293         }
1294     }
1295 
1296     /// Determines if this type permits "raw" initialization by just transmuting some
1297     /// memory into an instance of `T`.
1298     /// `zero` indicates if the memory is zero-initialized, or alternatively
1299     /// left entirely uninitialized.
1300     /// This is conservative: in doubt, it will answer `true`.
1301     ///
1302     /// FIXME: Once we removed all the conservatism, we could alternatively
1303     /// create an all-0/all-undef constant and run the const value validator to see if
1304     /// this is a valid value for the given type.
might_permit_raw_init<C>(self, cx: &C, zero: bool) -> bool where Self: Copy, Ty: TyAbiInterface<'a, C>, C: HasDataLayout,1305     pub fn might_permit_raw_init<C>(self, cx: &C, zero: bool) -> bool
1306     where
1307         Self: Copy,
1308         Ty: TyAbiInterface<'a, C>,
1309         C: HasDataLayout,
1310     {
1311         let scalar_allows_raw_init = move |s: Scalar| -> bool {
1312             if zero {
1313                 // The range must contain 0.
1314                 s.valid_range.contains(0)
1315             } else {
1316                 // The range must include all values.
1317                 s.is_always_valid(cx)
1318             }
1319         };
1320 
1321         // Check the ABI.
1322         let valid = match self.abi {
1323             Abi::Uninhabited => false, // definitely UB
1324             Abi::Scalar(s) => scalar_allows_raw_init(s),
1325             Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
1326             Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
1327             Abi::Aggregate { .. } => true, // Fields are checked below.
1328         };
1329         if !valid {
1330             // This is definitely not okay.
1331             return false;
1332         }
1333 
1334         // If we have not found an error yet, we need to recursively descend into fields.
1335         match &self.fields {
1336             FieldsShape::Primitive | FieldsShape::Union { .. } => {}
1337             FieldsShape::Array { .. } => {
1338                 // FIXME(#66151): For now, we are conservative and do not check arrays.
1339             }
1340             FieldsShape::Arbitrary { offsets, .. } => {
1341                 for idx in 0..offsets.len() {
1342                     if !self.field(cx, idx).might_permit_raw_init(cx, zero) {
1343                         // We found a field that is unhappy with this kind of initialization.
1344                         return false;
1345                     }
1346                 }
1347             }
1348         }
1349 
1350         // FIXME(#66151): For now, we are conservative and do not check `self.variants`.
1351         true
1352     }
1353 }
1354