diff --git a/crates/hir-def/src/adt.rs b/crates/hir-def/src/adt.rs index 938db032fbc8..62efc4098684 100644 --- a/crates/hir-def/src/adt.rs +++ b/crates/hir-def/src/adt.rs @@ -1,6 +1,6 @@ //! Defines hir-level representation of structs, enums and unions -use std::{num::NonZeroU32, sync::Arc}; +use std::sync::Arc; use base_db::CrateId; use either::Either; @@ -18,6 +18,7 @@ use crate::{ db::DefDatabase, intern::Interned, item_tree::{AttrOwner, Field, FieldAstId, Fields, ItemTree, ModItem, RawVisibilityId}, + layout::{Align, ReprFlags, ReprOptions}, nameres::diagnostics::DefDiagnostic, src::HasChildSource, src::HasSource, @@ -34,7 +35,7 @@ use cfg::CfgOptions; pub struct StructData { pub name: Name, pub variant_data: Arc, - pub repr: Option, + pub repr: Option, pub visibility: RawVisibility, } @@ -42,7 +43,7 @@ pub struct StructData { pub struct EnumData { pub name: Name, pub variants: Arena, - pub repr: Option, + pub repr: Option, pub visibility: RawVisibility, } @@ -67,80 +68,74 @@ pub struct FieldData { pub visibility: RawVisibility, } -#[derive(Copy, Debug, Clone, PartialEq, Eq)] -pub enum ReprKind { - C, - BuiltinInt { builtin: Either, is_c: bool }, - Transparent, - Default, -} - -#[derive(Copy, Debug, Clone, PartialEq, Eq)] -pub struct ReprData { - pub kind: ReprKind, - pub packed: bool, - pub align: Option, -} - fn repr_from_value( db: &dyn DefDatabase, krate: CrateId, item_tree: &ItemTree, of: AttrOwner, -) -> Option { +) -> Option { item_tree.attrs(db, krate, of).by_key("repr").tt_values().find_map(parse_repr_tt) } -fn parse_repr_tt(tt: &Subtree) -> Option { +fn parse_repr_tt(tt: &Subtree) -> Option { match tt.delimiter { Some(Delimiter { kind: DelimiterKind::Parenthesis, .. }) => {} _ => return None, } - let mut data = ReprData { kind: ReprKind::Default, packed: false, align: None }; + let mut flags = ReprFlags::empty(); + let mut int = None; + let mut max_align: Option = None; + let mut min_pack: Option = None; let mut tts = tt.token_trees.iter().peekable(); while let Some(tt) = tts.next() { if let TokenTree::Leaf(Leaf::Ident(ident)) = tt { - match &*ident.text { + flags.insert(match &*ident.text { "packed" => { - data.packed = true; - if let Some(TokenTree::Subtree(_)) = tts.peek() { + let pack = if let Some(TokenTree::Subtree(tt)) = tts.peek() { tts.next(); - } + if let Some(TokenTree::Leaf(Leaf::Literal(lit))) = tt.token_trees.first() { + lit.text.parse().unwrap_or_default() + } else { + 0 + } + } else { + 0 + }; + let pack = Align::from_bytes(pack).unwrap(); + min_pack = + Some(if let Some(min_pack) = min_pack { min_pack.min(pack) } else { pack }); + ReprFlags::empty() } "align" => { if let Some(TokenTree::Subtree(tt)) = tts.peek() { tts.next(); if let Some(TokenTree::Leaf(Leaf::Literal(lit))) = tt.token_trees.first() { if let Ok(align) = lit.text.parse() { - data.align = Some(align); + let align = Align::from_bytes(align).ok(); + max_align = max_align.max(align); } } } + ReprFlags::empty() } - "C" => { - if let ReprKind::BuiltinInt { is_c, .. } = &mut data.kind { - *is_c = true; - } else { - data.kind = ReprKind::C; - } - } - "transparent" => data.kind = ReprKind::Transparent, + "C" => ReprFlags::IS_C, + "transparent" => ReprFlags::IS_TRANSPARENT, repr => { - let is_c = matches!(data.kind, ReprKind::C); if let Some(builtin) = BuiltinInt::from_suffix(repr) .map(Either::Left) .or_else(|| BuiltinUint::from_suffix(repr).map(Either::Right)) { - data.kind = ReprKind::BuiltinInt { builtin, is_c }; + int = Some(builtin); } + ReprFlags::empty() } - } + }) } } - Some(data) + Some(ReprOptions { int, align: max_align, pack: min_pack, flags }) } impl StructData { @@ -283,7 +278,7 @@ impl EnumData { pub fn variant_body_type(&self) -> Either { match self.repr { - Some(ReprData { kind: ReprKind::BuiltinInt { builtin, .. }, .. }) => builtin, + Some(ReprOptions { int: Some(builtin), .. }) => builtin, _ => Either::Left(BuiltinInt::Isize), } } diff --git a/crates/hir-def/src/layout.rs b/crates/hir-def/src/layout.rs new file mode 100644 index 000000000000..cc8177376f7e --- /dev/null +++ b/crates/hir-def/src/layout.rs @@ -0,0 +1,1173 @@ +//! Definitions related to binary representations of types + +use bitflags::bitflags; +use either::Either; +use std::{ + cmp, fmt, + num::NonZeroUsize, + ops::{Add, AddAssign, Mul, Sub}, +}; + +use crate::{ + builtin_type::{BuiltinInt, BuiltinUint}, + LocalEnumVariantId, +}; +use la_arena::ArenaMap; + +/// Size of a type in bytes. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Size { + raw: u64, +} + +// This is debug-printed a lot in larger structs, don't waste too much space there +impl fmt::Debug for Size { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Size({} bytes)", self.raw) + } +} + +// Panicking addition, subtraction and multiplication for convenience. +// Avoid during layout computation, return `LayoutError` instead. + +impl Add for Size { + type Output = Size; + #[inline] + fn add(self, other: Size) -> Size { + Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| { + panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes()) + })) + } +} + +impl Sub for Size { + type Output = Size; + #[inline] + fn sub(self, other: Size) -> Size { + Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| { + panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes()) + })) + } +} + +impl Mul for u64 { + type Output = Size; + #[inline] + fn mul(self, size: Size) -> Size { + size * self + } +} + +impl Mul for Size { + type Output = Size; + #[inline] + fn mul(self, count: u64) -> Size { + match self.bytes().checked_mul(count) { + Some(bytes) => Size::from_bytes(bytes), + None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count), + } + } +} + +impl AddAssign for Size { + #[inline] + fn add_assign(&mut self, other: Size) { + *self = *self + other; + } +} + +impl Size { + pub const ZERO: Size = Size { raw: 0 }; + + /// Rounds `bits` up to the next-higher byte boundary, if `bits` is + /// not a multiple of 8. + pub fn from_bits(bits: impl TryInto) -> Size { + let bits = bits.try_into().ok().unwrap(); + // Avoid potential overflow from `bits + 7`. + Size { raw: bits / 8 + ((bits % 8) + 7) / 8 } + } + + #[inline] + pub fn from_bytes(bytes: impl TryInto) -> Size { + let bytes: u64 = bytes.try_into().ok().unwrap(); + Size { raw: bytes } + } + + #[inline] + pub fn bytes(self) -> u64 { + self.raw + } + + #[inline] + pub fn bytes_usize(self) -> usize { + self.bytes().try_into().unwrap() + } + + #[inline] + pub fn bits(self) -> u64 { + #[cold] + fn overflow(bytes: u64) -> ! { + panic!("Size::bits: {} bytes in bits doesn't fit in u64", bytes) + } + + self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes())) + } + + #[inline] + pub fn bits_usize(self) -> usize { + self.bits().try_into().unwrap() + } + + #[inline] + pub fn checked_add(self, offset: Size, dl: &TargetDataLayout) -> Option { + let bytes = self.bytes().checked_add(offset.bytes())?; + + if bytes < dl.obj_size_bound() { + Some(Size::from_bytes(bytes)) + } else { + None + } + } + + #[inline] + pub fn checked_mul(self, count: u64, dl: &TargetDataLayout) -> Option { + let bytes = self.bytes().checked_mul(count)?; + if bytes < dl.obj_size_bound() { + Some(Size::from_bytes(bytes)) + } else { + None + } + } + + #[inline] + pub fn align_to(self, align: Align) -> Size { + let mask = align.bytes() - 1; + Size::from_bytes((self.bytes() + mask) & !mask) + } + + #[inline] + pub fn is_aligned(self, align: Align) -> bool { + let mask = align.bytes() - 1; + self.bytes() & mask == 0 + } + + /// Truncates `value` to `self` bits and then sign-extends it to 128 bits + /// (i.e., if it is negative, fill with 1's on the left). + #[inline] + pub fn sign_extend(self, value: u128) -> u128 { + let size = self.bits(); + if size == 0 { + // Truncated until nothing is left. + return 0; + } + // Sign-extend it. + let shift = 128 - size; + // Shift the unsigned value to the left, then shift back to the right as signed + // (essentially fills with sign bit on the left). + (((value << shift) as i128) >> shift) as u128 + } + + /// Truncates `value` to `self` bits. + #[inline] + pub fn truncate(self, value: u128) -> u128 { + let size = self.bits(); + if size == 0 { + // Truncated until nothing is left. + return 0; + } + let shift = 128 - size; + // Truncate (shift left to drop out leftover values, shift right to fill with zeroes). + (value << shift) >> shift + } + + #[inline] + pub fn signed_int_min(&self) -> i128 { + self.sign_extend(1_u128 << (self.bits() - 1)) as i128 + } + + #[inline] + pub fn signed_int_max(&self) -> i128 { + i128::MAX >> (128 - self.bits()) + } + + #[inline] + pub fn unsigned_int_max(&self) -> u128 { + u128::MAX >> (128 - self.bits()) + } +} + +#[derive(Copy, Clone, Debug)] +pub enum StructKind { + /// A tuple, closure, or univariant which cannot be coerced to unsized. + AlwaysSized, + /// A univariant, the last field of which may be coerced to unsized. + MaybeUnsized, + /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag). + Prefixed(Size, Align), +} + +/// Describes how the fields of a type are located in memory. +#[derive(PartialEq, Eq, Hash, Debug, Clone)] +pub enum FieldsShape { + /// Scalar primitives and `!`, which never have fields. + Primitive, + + /// All fields start at no offset. The `usize` is the field count. + Union(NonZeroUsize), + + /// Array/vector-like placement, with all fields of identical types. + Array { stride: Size, count: u64 }, + + /// Struct-like placement, with precomputed offsets. + /// + /// Fields are guaranteed to not overlap, but note that gaps + /// before, between and after all the fields are NOT always + /// padding, and as such their contents may not be discarded. + /// For example, enum variants leave a gap at the start, + /// where the discriminant field in the enum layout goes. + Arbitrary { + /// Offsets for the first byte of each field, + /// ordered to match the source definition order. + /// This vector does not go in increasing order. + // FIXME(eddyb) use small vector optimization for the common case. + offsets: Vec, + + /// Maps source order field indices to memory order indices, + /// depending on how the fields were reordered (if at all). + /// This is a permutation, with both the source order and the + /// memory order using the same (0..n) index ranges. + /// + /// Note that during computation of `memory_index`, sometimes + /// it is easier to operate on the inverse mapping (that is, + /// from memory order to source order), and that is usually + /// named `inverse_memory_index`. + /// + // FIXME(eddyb) build a better abstraction for permutations, if possible. + // FIXME(camlorn) also consider small vector optimization here. + memory_index: Vec, + }, +} + +impl FieldsShape { + #[inline] + pub fn count(&self) -> usize { + match *self { + FieldsShape::Primitive => 0, + FieldsShape::Union(count) => count.get(), + FieldsShape::Array { count, .. } => count.try_into().unwrap(), + FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(), + } + } + + #[inline] + pub fn offset(&self, i: usize, dl: &TargetDataLayout) -> Size { + match *self { + FieldsShape::Primitive => { + unreachable!("FieldsShape::offset: `Primitive`s have no fields") + } + FieldsShape::Union(count) => { + assert!( + i < count.get(), + "tried to access field {} of union with {} fields", + i, + count + ); + Size::ZERO + } + FieldsShape::Array { stride, count } => { + let i = u64::try_from(i).unwrap(); + assert!(i < count); + stride.checked_mul(i, dl).unwrap() + } + FieldsShape::Arbitrary { ref offsets, .. } => offsets[i], + } + } + + #[inline] + pub fn memory_index(&self, i: usize) -> usize { + match *self { + FieldsShape::Primitive => { + unreachable!("FieldsShape::memory_index: `Primitive`s have no fields") + } + FieldsShape::Union(_) | FieldsShape::Array { .. } => i, + FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(), + } + } + + /// Gets source indices of the fields by increasing offsets. + #[inline] + pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator + 'a { + let mut inverse_small = [0u8; 64]; + let mut inverse_big = vec![]; + let use_small = self.count() <= inverse_small.len(); + + // We have to write this logic twice in order to keep the array small. + if let FieldsShape::Arbitrary { ref memory_index, .. } = *self { + if use_small { + for i in 0..self.count() { + inverse_small[memory_index[i] as usize] = i as u8; + } + } else { + inverse_big = vec![0; self.count()]; + for i in 0..self.count() { + inverse_big[memory_index[i] as usize] = i as u32; + } + } + } + + (0..self.count()).map(move |i| match *self { + FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i, + FieldsShape::Arbitrary { .. } => { + if use_small { + inverse_small[i] as usize + } else { + inverse_big[i] as usize + } + } + }) + } +} + +/// Integers, also used for enum discriminants. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +pub enum Integer { + I8, + I16, + I32, + I64, + I128, +} + +impl Integer { + #[inline] + pub fn size(self) -> Size { + match self { + Integer::I8 => Size::from_bytes(1), + Integer::I16 => Size::from_bytes(2), + Integer::I32 => Size::from_bytes(4), + Integer::I64 => Size::from_bytes(8), + Integer::I128 => Size::from_bytes(16), + } + } + + pub fn align(self, dl: &TargetDataLayout) -> AbiAndPrefAlign { + match self { + Integer::I8 => dl.i8_align, + Integer::I16 => dl.i16_align, + Integer::I32 => dl.i32_align, + Integer::I64 => dl.i64_align, + Integer::I128 => dl.i128_align, + } + } + + /// Finds the smallest integer with the given alignment. + pub fn for_align(dl: &TargetDataLayout, wanted: Align) -> Option { + use Integer::*; + for candidate in [I8, I16, I32, I64, I128] { + if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() { + return Some(candidate); + } + } + None + } + + /// Finds the smallest Integer type which can represent the signed value. + #[inline] + pub fn fit_signed(x: i128) -> Integer { + match x { + -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => Integer::I8, + -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => Integer::I16, + -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => Integer::I32, + -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => Integer::I64, + _ => Integer::I128, + } + } + + /// Finds the smallest Integer type which can represent the unsigned value. + #[inline] + pub fn fit_unsigned(x: u128) -> Integer { + match x { + 0..=0x0000_0000_0000_00ff => Integer::I8, + 0..=0x0000_0000_0000_ffff => Integer::I16, + 0..=0x0000_0000_ffff_ffff => Integer::I32, + 0..=0xffff_ffff_ffff_ffff => Integer::I64, + _ => Integer::I128, + } + } + + /// Gets the Integer type from an attr::IntType. + pub fn from_attr(dl: &TargetDataLayout, ity: Either) -> Integer { + match ity { + Either::Left(BuiltinInt::I8) | Either::Right(BuiltinUint::U8) => Integer::I8, + Either::Left(BuiltinInt::I16) | Either::Right(BuiltinUint::U16) => Integer::I16, + Either::Left(BuiltinInt::I32) | Either::Right(BuiltinUint::U32) => Integer::I32, + Either::Left(BuiltinInt::I64) | Either::Right(BuiltinUint::U64) => Integer::I64, + Either::Left(BuiltinInt::I128) | Either::Right(BuiltinUint::U128) => Integer::I128, + Either::Left(BuiltinInt::Isize) | Either::Right(BuiltinUint::Usize) => { + dl.ptr_sized_integer() + } + } + } + + /// Finds the appropriate Integer type and signedness for the given + /// signed discriminant range and `#[repr]` attribute. + /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but + /// that shouldn't affect anything, other than maybe debuginfo. + pub fn repr_discr( + dl: &TargetDataLayout, + repr: &ReprOptions, + min: i128, + max: i128, + ) -> Result<(Integer, bool), LayoutError> { + // Theoretically, negative values could be larger in unsigned representation + // than the unsigned representation of the signed minimum. However, if there + // are any negative values, the only valid unsigned representation is u128 + // which can fit all i128 values, so the result remains unaffected. + let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128)); + let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max)); + + if let Some(ity) = repr.int { + let discr = Integer::from_attr(dl, ity); + let fit = if ity.is_left() { signed_fit } else { unsigned_fit }; + if discr < fit { + return Err(LayoutError::UserError( + "Integer::repr_discr: `#[repr]` hint too small for \ + discriminant range of enum " + .to_string(), + )); + } + return Ok((discr, ity.is_left())); + } + + let at_least = if repr.c() { + // This is usually I32, however it can be different on some platforms, + // notably hexagon and arm-none/thumb-none + dl.c_enum_min_size + } else { + // repr(Rust) enums try to be as small as possible + Integer::I8 + }; + + // If there are no negative values, we can use the unsigned fit. + Ok(if min >= 0 { + (cmp::max(unsigned_fit, at_least), false) + } else { + (cmp::max(signed_fit, at_least), true) + }) + } +} + +/// Endianness of the target, which must match cfg(target-endian). +#[derive(Copy, Clone, PartialEq, Eq)] +pub enum Endian { + Little, + Big, +} + +impl Endian { + pub fn as_str(&self) -> &'static str { + match self { + Self::Little => "little", + Self::Big => "big", + } + } +} + +impl fmt::Debug for Endian { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(self.as_str()) + } +} + +/// An identifier that specifies the address space that some operation +/// should operate on. Special address spaces have an effect on code generation, +/// depending on the target and the address spaces it implements. +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct AddressSpace(pub u32); + +/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout) +/// for a target, which contains everything needed to compute layouts. +#[derive(Debug, PartialEq, Eq)] +pub struct TargetDataLayout { + pub endian: Endian, + pub i1_align: AbiAndPrefAlign, + pub i8_align: AbiAndPrefAlign, + pub i16_align: AbiAndPrefAlign, + pub i32_align: AbiAndPrefAlign, + pub i64_align: AbiAndPrefAlign, + pub i128_align: AbiAndPrefAlign, + pub f32_align: AbiAndPrefAlign, + pub f64_align: AbiAndPrefAlign, + pub pointer_size: Size, + pub pointer_align: AbiAndPrefAlign, + pub aggregate_align: AbiAndPrefAlign, + + /// Alignments for vector types. + pub vector_align: Vec<(Size, AbiAndPrefAlign)>, + + pub instruction_address_space: AddressSpace, + + /// Minimum size of #[repr(C)] enums (default I32 bits) + pub c_enum_min_size: Integer, +} + +impl TargetDataLayout { + /// Returns exclusive upper bound on object size. + /// + /// The theoretical maximum object size is defined as the maximum positive `isize` value. + /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly + /// index every address within an object along with one byte past the end, along with allowing + /// `isize` to store the difference between any two pointers into an object. + /// + /// The upper bound on 64-bit currently needs to be lower because LLVM uses a 64-bit integer + /// to represent object size in bits. It would need to be 1 << 61 to account for this, but is + /// currently conservatively bounded to 1 << 47 as that is enough to cover the current usable + /// address space on 64-bit ARMv8 and x86_64. + #[inline] + pub fn obj_size_bound(&self) -> u64 { + match self.pointer_size.bits() { + 16 => 1 << 15, + 32 => 1 << 31, + 64 => 1 << 47, + bits => panic!("obj_size_bound: unknown pointer bit size {}", bits), + } + } + + #[inline] + pub fn ptr_sized_integer(&self) -> Integer { + match self.pointer_size.bits() { + 16 => Integer::I16, + 32 => Integer::I32, + 64 => Integer::I64, + bits => panic!("ptr_sized_integer: unknown pointer bit size {}", bits), + } + } +} + +/// Fundamental unit of memory access and layout. +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub enum Primitive { + /// The `bool` is the signedness of the `Integer` type. + /// + /// One would think we would not care about such details this low down, + /// but some ABIs are described in terms of C types and ISAs where the + /// integer arithmetic is done on {sign,zero}-extended registers, e.g. + /// a negative integer passed by zero-extension will appear positive in + /// the callee, and most operations on it will produce the wrong values. + Int(Integer, bool), + F32, + F64, + Pointer, +} + +impl Primitive { + pub fn size(self, dl: &TargetDataLayout) -> Size { + match self { + Primitive::Int(i, _) => i.size(), + Primitive::F32 => Size::from_bits(32), + Primitive::F64 => Size::from_bits(64), + Primitive::Pointer => dl.pointer_size, + } + } + + pub fn align(self, dl: &TargetDataLayout) -> AbiAndPrefAlign { + match self { + Primitive::Int(i, _) => i.align(dl), + Primitive::F32 => dl.f32_align, + Primitive::F64 => dl.f64_align, + Primitive::Pointer => dl.pointer_align, + } + } +} + +/// Inclusive wrap-around range of valid values, that is, if +/// start > end, it represents `start..=MAX`, +/// followed by `0..=end`. +/// +/// That is, for an i8 primitive, a range of `254..=2` means following +/// sequence: +/// +/// 254 (-2), 255 (-1), 0, 1, 2 +/// +/// This is intended specifically to mirror LLVM’s `!range` metadata semantics. +#[derive(Clone, Copy, PartialEq, Eq, Hash)] +pub struct WrappingRange { + pub start: u128, + pub end: u128, +} + +impl WrappingRange { + pub fn full(size: Size) -> Self { + Self { start: 0, end: size.unsigned_int_max() } + } + + /// Returns `true` if `v` is contained in the range. + #[inline(always)] + pub fn contains(&self, v: u128) -> bool { + if self.start <= self.end { + self.start <= v && v <= self.end + } else { + self.start <= v || v <= self.end + } + } + + /// Returns `self` with replaced `start` + #[inline(always)] + pub fn with_start(mut self, start: u128) -> Self { + self.start = start; + self + } + + /// Returns `self` with replaced `end` + #[inline(always)] + pub fn with_end(mut self, end: u128) -> Self { + self.end = end; + self + } + + /// Returns `true` if `size` completely fills the range. + #[inline] + pub fn is_full_for(&self, size: Size) -> bool { + let max_value = size.unsigned_int_max(); + debug_assert!(self.start <= max_value && self.end <= max_value); + self.start == (self.end.wrapping_add(1) & max_value) + } +} + +impl fmt::Debug for WrappingRange { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.start > self.end { + write!(fmt, "(..={}) | ({}..)", self.end, self.start)?; + } else { + write!(fmt, "{}..={}", self.start, self.end)?; + } + Ok(()) + } +} + +/// Information about one scalar component of a Rust type. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum Scalar { + Initialized { + value: Primitive, + + // FIXME(eddyb) always use the shortest range, e.g., by finding + // the largest space between two consecutive valid values and + // taking everything else as the (shortest) valid range. + valid_range: WrappingRange, + }, + Union { + /// Even for unions, we need to use the correct registers for the kind of + /// values inside the union, so we keep the `Primitive` type around. We + /// also use it to compute the size of the scalar. + /// However, unions never have niches and even allow undef, + /// so there is no `valid_range`. + value: Primitive, + }, +} + +impl Scalar { + #[inline] + pub fn is_bool(&self) -> bool { + matches!( + self, + Scalar::Initialized { + value: Primitive::Int(Integer::I8, false), + valid_range: WrappingRange { start: 0, end: 1 } + } + ) + } + + /// Get the primitive representation of this type, ignoring the valid range and whether the + /// value is allowed to be undefined (due to being a union). + pub fn primitive(&self) -> Primitive { + match *self { + Scalar::Initialized { value, .. } | Scalar::Union { value } => value, + } + } + + pub fn align(self, cx: &TargetDataLayout) -> AbiAndPrefAlign { + self.primitive().align(cx) + } + + pub fn size(self, cx: &TargetDataLayout) -> Size { + self.primitive().size(cx) + } + + #[inline] + pub fn to_union(&self) -> Self { + Self::Union { value: self.primitive() } + } + + #[inline] + pub fn valid_range(&self, cx: &TargetDataLayout) -> WrappingRange { + match *self { + Scalar::Initialized { valid_range, .. } => valid_range, + Scalar::Union { value } => WrappingRange::full(value.size(cx)), + } + } + + #[inline] + /// Allows the caller to mutate the valid range. This operation will panic if attempted on a union. + pub fn valid_range_mut(&mut self) -> &mut WrappingRange { + match self { + Scalar::Initialized { valid_range, .. } => valid_range, + Scalar::Union { .. } => panic!("cannot change the valid range of a union"), + } + } + + /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout + #[inline] + pub fn is_always_valid(&self, cx: &TargetDataLayout) -> bool { + match *self { + Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)), + Scalar::Union { .. } => true, + } + } + + /// Returns `true` if this type can be left uninit. + #[inline] + pub fn is_uninit_valid(&self) -> bool { + match *self { + Scalar::Initialized { .. } => false, + Scalar::Union { .. } => true, + } + } +} + +/// Describes how values of the type are passed by target ABIs, +/// in terms of categories of C types there are ABI rules for. +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum Abi { + Uninhabited, + Scalar(Scalar), + ScalarPair(Scalar, Scalar), + Vector { + element: Scalar, + count: u64, + }, + Aggregate { + /// If true, the size is exact, otherwise it's only a lower bound. + sized: bool, + }, +} + +impl Abi { + /// Returns `true` if the layout corresponds to an unsized type. + #[inline] + pub fn is_unsized(&self) -> bool { + match *self { + Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false, + Abi::Aggregate { sized } => !sized, + } + } + + /// Returns `true` if this is an uninhabited type + #[inline] + pub fn is_uninhabited(&self) -> bool { + matches!(*self, Abi::Uninhabited) + } + + /// Returns `true` is this is a scalar type + #[inline] + pub fn is_scalar(&self) -> bool { + matches!(*self, Abi::Scalar(_)) + } +} + +/// Alignment of a type in bytes (always a power of two). +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Align { + pow2: u8, +} + +// This is debug-printed a lot in larger structs, don't waste too much space there +impl fmt::Debug for Align { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Align({} bytes)", self.bytes()) + } +} + +impl Align { + pub const ONE: Align = Align { pow2: 0 }; + pub const MAX: Align = Align { pow2: 29 }; + + #[inline] + pub fn from_bytes(align: u64) -> Result { + // Treat an alignment of 0 bytes like 1-byte alignment. + if align == 0 { + return Ok(Align::ONE); + } + + #[cold] + fn not_power_of_2(align: u64) -> String { + format!("`{}` is not a power of 2", align) + } + + #[cold] + fn too_large(align: u64) -> String { + format!("`{}` is too large", align) + } + + let mut bytes = align; + let mut pow2: u8 = 0; + while (bytes & 1) == 0 { + pow2 += 1; + bytes >>= 1; + } + if bytes != 1 { + return Err(not_power_of_2(align)); + } + if pow2 > Self::MAX.pow2 { + return Err(too_large(align)); + } + + Ok(Align { pow2 }) + } + + #[inline] + pub fn bytes(self) -> u64 { + 1 << self.pow2 + } + + #[inline] + pub fn bits(self) -> u64 { + self.bytes() * 8 + } + + /// Computes the best alignment possible for the given offset + /// (the largest power of two that the offset is a multiple of). + /// + /// N.B., for an offset of `0`, this happens to return `2^64`. + #[inline] + pub fn max_for_offset(offset: Size) -> Align { + Align { pow2: offset.bytes().trailing_zeros() as u8 } + } + + /// Lower the alignment, if necessary, such that the given offset + /// is aligned to it (the offset is a multiple of the alignment). + #[inline] + pub fn restrict_for_offset(self, offset: Size) -> Align { + self.min(Align::max_for_offset(offset)) + } +} + +/// A pair of alignments, ABI-mandated and preferred. +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +pub struct AbiAndPrefAlign { + pub abi: Align, + pub pref: Align, +} + +impl AbiAndPrefAlign { + #[inline] + pub fn new(align: Align) -> AbiAndPrefAlign { + AbiAndPrefAlign { abi: align, pref: align } + } + + #[inline] + pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign { + AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) } + } + + #[inline] + pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign { + AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) } + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub struct Niche { + pub offset: Size, + pub value: Primitive, + pub valid_range: WrappingRange, +} + +impl Niche { + pub fn from_scalar(cx: &TargetDataLayout, offset: Size, scalar: Scalar) -> Option { + let (value, valid_range) = match scalar { + Scalar::Initialized { value, valid_range } => (value, valid_range), + _ => return None, + }; + let niche = Niche { offset, value, valid_range }; + if niche.available(cx) > 0 { + Some(niche) + } else { + None + } + } + + pub fn available(&self, cx: &TargetDataLayout) -> u128 { + let Self { value, valid_range: v, .. } = *self; + let size = value.size(cx); + assert!(size.bits() <= 128); + let max_value = size.unsigned_int_max(); + + // Find out how many values are outside the valid range. + let niche = v.end.wrapping_add(1)..v.start; + niche.end.wrapping_sub(niche.start) & max_value + } + + pub fn reserve(&self, cx: &TargetDataLayout, count: u128) -> Option<(u128, Scalar)> { + assert!(count > 0); + + let Self { value, valid_range: v, .. } = *self; + let size = value.size(cx); + assert!(size.bits() <= 128); + let max_value = size.unsigned_int_max(); + + let niche = v.end.wrapping_add(1)..v.start; + let available = niche.end.wrapping_sub(niche.start) & max_value; + if count > available { + return None; + } + + // Extend the range of valid values being reserved by moving either `v.start` or `v.end` bound. + // Given an eventual `Option`, we try to maximize the chance for `None` to occupy the niche of zero. + // This is accomplished by preferring enums with 2 variants(`count==1`) and always taking the shortest path to niche zero. + // Having `None` in niche zero can enable some special optimizations. + // + // Bound selection criteria: + // 1. Select closest to zero given wrapping semantics. + // 2. Avoid moving past zero if possible. + // + // In practice this means that enums with `count > 1` are unlikely to claim niche zero, since they have to fit perfectly. + // If niche zero is already reserved, the selection of bounds are of little interest. + let move_start = |v: WrappingRange| { + let start = v.start.wrapping_sub(count) & max_value; + Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) })) + }; + let move_end = |v: WrappingRange| { + let start = v.end.wrapping_add(1) & max_value; + let end = v.end.wrapping_add(count) & max_value; + Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) })) + }; + let distance_end_zero = max_value - v.end; + if v.start > v.end { + // zero is unavailable because wrapping occurs + move_end(v) + } else if v.start <= distance_end_zero { + if count <= v.start { + move_start(v) + } else { + // moved past zero, use other bound + move_end(v) + } + } else { + let end = v.end.wrapping_add(count) & max_value; + let overshot_zero = (1..=v.end).contains(&end); + if overshot_zero { + // moved past zero, use other bound + move_start(v) + } else { + move_end(v) + } + } + } +} + +#[derive(PartialEq, Eq, Hash, Debug, Clone)] +pub enum TagEncoding { + /// The tag directly stores the discriminant, but possibly with a smaller layout + /// (so converting the tag to the discriminant can require sign extension). + Direct, + + /// Niche (values invalid for a type) encoding the discriminant: + /// Discriminant and variant index coincide. + /// The variant `untagged_variant` contains a niche at an arbitrary + /// offset (field `tag_field` of the enum), which for a variant with + /// discriminant `d` is set to + /// `(d - niche_variants.start).wrapping_add(niche_start)`. + /// + /// For example, `Option<(usize, &T)>` is represented such that + /// `None` has a null pointer for the second tuple field, and + /// `Some` is the identity function (with a non-null reference). + Niche { untagged_variant: LocalEnumVariantId, niche_start: u128 }, +} + +#[derive(PartialEq, Eq, Hash, Debug, Clone)] +pub enum Variants { + /// Single enum variants, structs/tuples, unions, and all non-ADTs. + Single, + + /// Enum-likes with more than one inhabited variant: each variant comes with + /// a *discriminant* (usually the same as the variant index but the user can + /// assign explicit discriminant values). That discriminant is encoded + /// as a *tag* on the machine. The layout of each variant is + /// a struct, and they all have space reserved for the tag. + /// For enums, the tag is the sole field of the layout. + Multiple { + tag: Scalar, + tag_encoding: TagEncoding, + tag_field: usize, + variants: ArenaMap, + }, +} + +bitflags! { + #[derive(Default)] + pub struct ReprFlags: u8 { + const IS_C = 1 << 0; + const IS_SIMD = 1 << 1; + const IS_TRANSPARENT = 1 << 2; + // Internal only for now. If true, don't reorder fields. + const IS_LINEAR = 1 << 3; + // Any of these flags being set prevent field reordering optimisation. + const IS_UNOPTIMISABLE = ReprFlags::IS_C.bits + | ReprFlags::IS_SIMD.bits + | ReprFlags::IS_LINEAR.bits; + } +} + +/// Represents the repr options provided by the user, +#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)] +pub struct ReprOptions { + pub int: Option>, + pub align: Option, + pub pack: Option, + pub flags: ReprFlags, +} + +impl ReprOptions { + #[inline] + pub fn simd(&self) -> bool { + self.flags.contains(ReprFlags::IS_SIMD) + } + + #[inline] + pub fn c(&self) -> bool { + self.flags.contains(ReprFlags::IS_C) + } + + #[inline] + pub fn packed(&self) -> bool { + self.pack.is_some() + } + + #[inline] + pub fn transparent(&self) -> bool { + self.flags.contains(ReprFlags::IS_TRANSPARENT) + } + + #[inline] + pub fn linear(&self) -> bool { + self.flags.contains(ReprFlags::IS_LINEAR) + } + + /// Returns the discriminant type, given these `repr` options. + /// This must only be called on enums! + pub fn discr_type(&self) -> Either { + self.int.unwrap_or(Either::Left(BuiltinInt::Isize)) + } + + /// Returns `true` if this `#[repr()]` should inhabit "smart enum + /// layout" optimizations, such as representing `Foo<&T>` as a + /// single pointer. + pub fn inhibit_enum_layout_opt(&self) -> bool { + self.c() || self.int.is_some() + } + + /// Returns `true` if this `#[repr()]` should inhibit struct field reordering + /// optimizations, such as with `repr(C)`, `repr(packed(1))`, or `repr()`. + pub fn inhibit_struct_field_reordering_opt(&self) -> bool { + if let Some(pack) = self.pack { + if pack.bytes() == 1 { + return true; + } + } + + self.flags.intersects(ReprFlags::IS_UNOPTIMISABLE) || self.int.is_some() + } + + /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations. + pub fn inhibit_union_abi_opt(&self) -> bool { + self.c() + } +} + +#[derive(PartialEq, Eq, Hash, Clone)] +pub struct Layout { + /// Says where the fields are located within the layout. + pub fields: FieldsShape, + + /// Encodes information about multi-variant layouts. + /// Even with `Multiple` variants, a layout still has its own fields! Those are then + /// shared between all variants. One of them will be the discriminant, + /// but e.g. generators can have more. + /// + /// To access all fields of this layout, both `fields` and the fields of the active variant + /// must be taken into account. + pub variants: Variants, + + /// The `abi` defines how this data is passed between functions, and it defines + /// value restrictions via `valid_range`. + /// + /// Note that this is entirely orthogonal to the recursive structure defined by + /// `variants` and `fields`; for example, `ManuallyDrop>` has + /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants` + /// have to be taken into account to find all fields of this layout. + pub abi: Abi, + + /// The leaf scalar with the largest number of invalid values + /// (i.e. outside of its `valid_range`), if it exists. + pub largest_niche: Option, + + pub align: AbiAndPrefAlign, + pub size: Size, +} + +impl Layout { + pub fn scalar(dl: &TargetDataLayout, scalar: Scalar) -> Self { + let largest_niche = Niche::from_scalar(dl, Size::ZERO, scalar); + let size = scalar.size(dl); + let align = scalar.align(dl); + Layout { + variants: Variants::Single, + fields: FieldsShape::Primitive, + abi: Abi::Scalar(scalar), + largest_niche, + size, + align, + } + } +} + +impl fmt::Debug for Layout { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // This is how `Layout` used to print before it become + // `Interned`. We print it like this to avoid having to update + // expected output in a lot of tests. + let Layout { size, align, abi, fields, largest_niche, variants } = self; + f.debug_struct("Layout") + .field("size", size) + .field("align", align) + .field("abi", abi) + .field("fields", fields) + .field("largest_niche", largest_niche) + .field("variants", variants) + .finish() + } +} + +impl Layout { + pub fn is_unsized(&self) -> bool { + self.abi.is_unsized() + } + + /// Returns `true` if the type is a ZST and not unsized. + pub fn is_zst(&self) -> bool { + match self.abi { + Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false, + Abi::Uninhabited => self.size.bytes() == 0, + Abi::Aggregate { sized } => sized && self.size.bytes() == 0, + } + } +} + +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum LayoutError { + UserError(String), + SizeOverflow, + HasPlaceholder, + NotImplemented, +} diff --git a/crates/hir-def/src/lib.rs b/crates/hir-def/src/lib.rs index 5c7aa72349f6..8267ef09cb0a 100644 --- a/crates/hir-def/src/lib.rs +++ b/crates/hir-def/src/lib.rs @@ -34,6 +34,7 @@ pub mod adt; pub mod data; pub mod generics; pub mod lang_item; +pub mod layout; pub mod expr; pub mod body; diff --git a/crates/hir-expand/src/name.rs b/crates/hir-expand/src/name.rs index 259fe1327f88..beff3f6ad962 100644 --- a/crates/hir-expand/src/name.rs +++ b/crates/hir-expand/src/name.rs @@ -419,6 +419,7 @@ pub mod known { shr, sub_assign, sub, + unsafe_cell, va_list ); diff --git a/crates/hir-ty/src/db.rs b/crates/hir-ty/src/db.rs index 932fce83563d..e80f18811f29 100644 --- a/crates/hir-ty/src/db.rs +++ b/crates/hir-ty/src/db.rs @@ -6,8 +6,11 @@ use std::sync::Arc; use arrayvec::ArrayVec; use base_db::{impl_intern_key, salsa, CrateId, Upcast}; use hir_def::{ - db::DefDatabase, expr::ExprId, BlockId, ConstId, ConstParamId, DefWithBodyId, EnumVariantId, - FunctionId, GenericDefId, ImplId, LifetimeParamId, LocalFieldId, TypeOrConstParamId, VariantId, + db::DefDatabase, + expr::ExprId, + layout::{Layout, LayoutError, TargetDataLayout}, + AdtId, BlockId, ConstId, ConstParamId, DefWithBodyId, EnumVariantId, FunctionId, GenericDefId, + ImplId, LifetimeParamId, LocalFieldId, TypeOrConstParamId, VariantId, }; use la_arena::ArenaMap; @@ -16,7 +19,7 @@ use crate::{ consteval::{ComputedExpr, ConstEvalError}, method_resolution::{InherentImpls, TraitImpls, TyFingerprint}, Binders, CallableDefId, FnDefId, GenericArg, ImplTraitId, InferenceResult, Interner, PolyFnSig, - QuantifiedWhereClause, ReturnTypeImplTraits, TraitRef, Ty, TyDefId, ValueTyDefId, + QuantifiedWhereClause, ReturnTypeImplTraits, Substitution, TraitRef, Ty, TyDefId, ValueTyDefId, }; use hir_expand::name::Name; @@ -57,6 +60,13 @@ pub trait HirDatabase: DefDatabase + Upcast { #[salsa::invoke(crate::lower::field_types_query)] fn field_types(&self, var: VariantId) -> Arc>>; + #[salsa::invoke(crate::layout::layout_of_adt_query)] + #[salsa::cycle(crate::layout::layout_of_adt_recover)] + fn layout_of_adt(&self, def: AdtId, subst: Substitution) -> Result; + + #[salsa::invoke(crate::layout::current_target_data_layout_query)] + fn current_target_data_layout(&self) -> Arc; + #[salsa::invoke(crate::lower::callable_item_sig)] fn callable_item_signature(&self, def: CallableDefId) -> PolyFnSig; diff --git a/crates/hir-ty/src/diagnostics/match_check.rs b/crates/hir-ty/src/diagnostics/match_check.rs index d51ad72bd27b..e0905e01b6a1 100644 --- a/crates/hir-ty/src/diagnostics/match_check.rs +++ b/crates/hir-ty/src/diagnostics/match_check.rs @@ -12,16 +12,16 @@ pub(crate) mod usefulness; use chalk_ir::Mutability; use hir_def::{ - adt::VariantData, body::Body, expr::PatId, AdtId, EnumVariantId, HasModule, LocalFieldId, - VariantId, + adt::VariantData, body::Body, expr::PatId, AdtId, EnumVariantId, LocalFieldId, VariantId, }; -use hir_expand::name::{name, Name}; +use hir_expand::name::Name; use stdx::{always, never}; use crate::{ db::HirDatabase, display::{HirDisplay, HirDisplayError, HirFormatter}, infer::BindingMode, + lang_items::is_box, InferenceResult, Interner, Substitution, Ty, TyExt, TyKind, }; @@ -405,13 +405,6 @@ where } } -fn is_box(adt: AdtId, db: &dyn HirDatabase) -> bool { - let owned_box = name![owned_box].to_smol_str(); - let krate = adt.module(db.upcast()).krate(); - let box_adt = db.lang_item(krate, owned_box).and_then(|it| it.as_struct()).map(AdtId::from); - Some(adt) == box_adt -} - pub(crate) trait PatternFoldable: Sized { fn fold_with(&self, folder: &mut F) -> Self { self.super_fold_with(folder) diff --git a/crates/hir-ty/src/lang_items.rs b/crates/hir-ty/src/lang_items.rs new file mode 100644 index 000000000000..afc54e729f9c --- /dev/null +++ b/crates/hir-ty/src/lang_items.rs @@ -0,0 +1,20 @@ +//! Functions to detect special lang items + +use hir_def::{AdtId, HasModule}; +use hir_expand::name; + +use crate::db::HirDatabase; + +pub fn is_box(adt: AdtId, db: &dyn HirDatabase) -> bool { + let owned_box = name![owned_box].to_smol_str(); + let krate = adt.module(db.upcast()).krate(); + let box_adt = db.lang_item(krate, owned_box).and_then(|it| it.as_struct()).map(AdtId::from); + Some(adt) == box_adt +} + +pub fn is_unsafe_cell(adt: AdtId, db: &dyn HirDatabase) -> bool { + let owned_box = name![unsafe_cell].to_smol_str(); + let krate = adt.module(db.upcast()).krate(); + let box_adt = db.lang_item(krate, owned_box).and_then(|it| it.as_struct()).map(AdtId::from); + Some(adt) == box_adt +} diff --git a/crates/hir-ty/src/layout.rs b/crates/hir-ty/src/layout.rs new file mode 100644 index 000000000000..ca39fde11898 --- /dev/null +++ b/crates/hir-ty/src/layout.rs @@ -0,0 +1,271 @@ +//! Compute the binary representation of a type + +use chalk_ir::{AdtId, TyKind}; +pub(self) use hir_def::layout::*; +use hir_def::LocalFieldId; + +use crate::{db::HirDatabase, Interner, Substitution, Ty}; + +use self::adt::univariant; +pub use self::{ + adt::{layout_of_adt_query, layout_of_adt_recover}, + target::current_target_data_layout_query, +}; + +macro_rules! user_error { + ($x: expr) => { + return Err(LayoutError::UserError(format!($x))) + }; +} + +mod adt; +mod target; + +fn scalar_unit(dl: &TargetDataLayout, value: Primitive) -> Scalar { + Scalar::Initialized { value, valid_range: WrappingRange::full(value.size(dl)) } +} + +fn scalar(dl: &TargetDataLayout, value: Primitive) -> Layout { + Layout::scalar(dl, scalar_unit(dl, value)) +} + +fn scalar_pair(dl: &TargetDataLayout, a: Scalar, b: Scalar) -> Layout { + let b_align = b.align(dl); + let align = a.align(dl).max(b_align).max(dl.aggregate_align); + let b_offset = a.size(dl).align_to(b_align.abi); + let size = b_offset.checked_add(b.size(dl), dl).unwrap().align_to(align.abi); + + // HACK(nox): We iter on `b` and then `a` because `max_by_key` + // returns the last maximum. + let largest_niche = Niche::from_scalar(dl, b_offset, b) + .into_iter() + .chain(Niche::from_scalar(dl, Size::ZERO, a)) + .max_by_key(|niche| niche.available(dl)); + + Layout { + variants: Variants::Single, + fields: FieldsShape::Arbitrary { + offsets: vec![Size::ZERO, b_offset], + memory_index: vec![0, 1], + }, + abi: Abi::ScalarPair(a, b), + largest_niche, + align, + size, + } +} + +pub fn layout_of_ty(db: &dyn HirDatabase, ty: &Ty) -> Result { + let dl = &*db.current_target_data_layout(); + Ok(match ty.kind(Interner) { + TyKind::Adt(AdtId(def), subst) => db.layout_of_adt(*def, subst.clone())?, + TyKind::Scalar(s) => match s { + chalk_ir::Scalar::Bool => Layout::scalar( + dl, + Scalar::Initialized { + value: Primitive::Int(Integer::I8, false), + valid_range: WrappingRange { start: 0, end: 1 }, + }, + ), + chalk_ir::Scalar::Char => Layout::scalar( + dl, + Scalar::Initialized { + value: Primitive::Int(Integer::I32, false), + valid_range: WrappingRange { start: 0, end: 0x10FFFF }, + }, + ), + chalk_ir::Scalar::Int(i) => scalar( + dl, + Primitive::Int( + match i { + chalk_ir::IntTy::Isize => dl.ptr_sized_integer(), + chalk_ir::IntTy::I8 => Integer::I8, + chalk_ir::IntTy::I16 => Integer::I16, + chalk_ir::IntTy::I32 => Integer::I32, + chalk_ir::IntTy::I64 => Integer::I64, + chalk_ir::IntTy::I128 => Integer::I128, + }, + false, + ), + ), + chalk_ir::Scalar::Uint(i) => scalar( + dl, + Primitive::Int( + match i { + chalk_ir::UintTy::Usize => dl.ptr_sized_integer(), + chalk_ir::UintTy::U8 => Integer::I8, + chalk_ir::UintTy::U16 => Integer::I16, + chalk_ir::UintTy::U32 => Integer::I32, + chalk_ir::UintTy::U64 => Integer::I64, + chalk_ir::UintTy::U128 => Integer::I128, + }, + true, + ), + ), + chalk_ir::Scalar::Float(f) => scalar( + dl, + match f { + chalk_ir::FloatTy::F32 => Primitive::F32, + chalk_ir::FloatTy::F64 => Primitive::F64, + }, + ), + }, + TyKind::Tuple(len, tys) => { + let kind = if *len == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized }; + + univariant( + dl, + &tys.iter(Interner) + .map(|k| layout_of_ty(db, k.assert_ty_ref(Interner))) + .collect::, _>>()?, + &ReprOptions::default(), + kind, + )? + } + TyKind::Array(element, count) => { + let count = match count.data(Interner).value { + chalk_ir::ConstValue::Concrete(c) => match c.interned { + hir_def::type_ref::ConstScalar::Int(x) => x as u64, + hir_def::type_ref::ConstScalar::UInt(x) => x as u64, + hir_def::type_ref::ConstScalar::Unknown => { + user_error!("unknown const generic parameter") + } + _ => user_error!("mismatched type of const generic parameter"), + }, + _ => return Err(LayoutError::HasPlaceholder), + }; + let element = layout_of_ty(db, element)?; + let size = element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow)?; + + let abi = if count != 0 && matches!(element.abi, Abi::Uninhabited) { + Abi::Uninhabited + } else { + Abi::Aggregate { sized: true } + }; + + let largest_niche = if count != 0 { element.largest_niche } else { None }; + + Layout { + variants: Variants::Single, + fields: FieldsShape::Array { stride: element.size, count }, + abi, + largest_niche, + align: element.align, + size, + } + } + TyKind::Slice(element) => { + let element = layout_of_ty(db, element)?; + Layout { + variants: Variants::Single, + fields: FieldsShape::Array { stride: element.size, count: 0 }, + abi: Abi::Aggregate { sized: false }, + largest_niche: None, + align: element.align, + size: Size::ZERO, + } + } + // Potentially-wide pointers. + TyKind::Ref(_, _, pointee) | TyKind::Raw(_, pointee) => { + let mut data_ptr = scalar_unit(dl, Primitive::Pointer); + if matches!(ty.kind(Interner), TyKind::Ref(..)) { + data_ptr.valid_range_mut().start = 1; + } + + // let pointee = tcx.normalize_erasing_regions(param_env, pointee); + // if pointee.is_sized(tcx.at(DUMMY_SP), param_env) { + // return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr))); + // } + + let unsized_part = struct_tail_erasing_lifetimes(db, pointee.clone()); + let metadata = match unsized_part.kind(Interner) { + TyKind::Slice(_) | TyKind::Str => { + scalar_unit(dl, Primitive::Int(dl.ptr_sized_integer(), false)) + } + TyKind::Dyn(..) => { + let mut vtable = scalar_unit(dl, Primitive::Pointer); + vtable.valid_range_mut().start = 1; + vtable + } + _ => { + // pointee is sized + return Ok(Layout::scalar(dl, data_ptr)); + } + }; + + // Effectively a (ptr, meta) tuple. + scalar_pair(dl, data_ptr, metadata) + } + TyKind::FnDef(_, _) => { + univariant(dl, &[], &ReprOptions::default(), StructKind::AlwaysSized)? + } + TyKind::Str => Layout { + variants: Variants::Single, + fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 }, + abi: Abi::Aggregate { sized: false }, + largest_niche: None, + align: dl.i8_align, + size: Size::ZERO, + }, + TyKind::Never => Layout { + variants: Variants::Single, + fields: FieldsShape::Primitive, + abi: Abi::Uninhabited, + largest_niche: None, + align: dl.i8_align, + size: Size::ZERO, + }, + TyKind::Dyn(_) | TyKind::Foreign(_) => { + let mut unit = univariant(dl, &[], &ReprOptions::default(), StructKind::AlwaysSized)?; + match unit.abi { + Abi::Aggregate { ref mut sized } => *sized = false, + _ => user_error!("bug"), + } + unit + } + TyKind::Function(_) => { + let mut ptr = scalar_unit(dl, Primitive::Pointer); + ptr.valid_range_mut().start = 1; + Layout::scalar(dl, ptr) + } + TyKind::Closure(_, _) + | TyKind::OpaqueType(_, _) + | TyKind::Generator(_, _) + | TyKind::GeneratorWitness(_, _) => return Err(LayoutError::NotImplemented), + TyKind::AssociatedType(_, _) + | TyKind::Error + | TyKind::Alias(_) + | TyKind::Placeholder(_) + | TyKind::BoundVar(_) + | TyKind::InferenceVar(_, _) => return Err(LayoutError::HasPlaceholder), + }) +} + +fn struct_tail_erasing_lifetimes(db: &dyn HirDatabase, pointee: Ty) -> Ty { + match pointee.kind(Interner) { + TyKind::Adt(AdtId(adt), subst) => match adt { + &hir_def::AdtId::StructId(i) => { + let data = db.struct_data(i); + let mut it = data.variant_data.fields().iter().rev(); + match it.next() { + Some((f, _)) => field_ty(db, i.into(), f, subst), + None => pointee, + } + } + _ => pointee, + }, + _ => pointee, + } +} + +fn field_ty( + db: &dyn HirDatabase, + def: hir_def::VariantId, + fd: LocalFieldId, + subst: &Substitution, +) -> Ty { + db.field_types(def)[fd].clone().substitute(Interner, subst) +} + +#[cfg(test)] +mod tests; diff --git a/crates/hir-ty/src/layout/adt.rs b/crates/hir-ty/src/layout/adt.rs new file mode 100644 index 000000000000..e353034eb99f --- /dev/null +++ b/crates/hir-ty/src/layout/adt.rs @@ -0,0 +1,900 @@ +//! Compute the binary representation of structs, unions and enums + +use std::{ + cmp::{self, Ordering}, + iter, + num::NonZeroUsize, +}; + +use chalk_ir::TyKind; +use hir_def::{ + adt::VariantData, + layout::{ + Abi, AbiAndPrefAlign, Align, FieldsShape, Integer, Layout, LayoutError, Niche, Primitive, + ReprOptions, Scalar, Size, StructKind, TagEncoding, TargetDataLayout, Variants, + WrappingRange, + }, + AdtId, EnumVariantId, LocalEnumVariantId, UnionId, VariantId, +}; +use la_arena::{ArenaMap, RawIdx}; + +use crate::{ + db::HirDatabase, + lang_items::is_unsafe_cell, + layout::{field_ty, scalar_unit}, + Interner, Substitution, +}; + +use super::layout_of_ty; + +pub fn layout_of_adt_query( + db: &dyn HirDatabase, + def: AdtId, + subst: Substitution, +) -> Result { + let handle_variant = |def: VariantId, var: &VariantData| { + var.fields() + .iter() + .map(|(fd, _)| layout_of_ty(db, &field_ty(db, def, fd, &subst))) + .collect::, _>>() + }; + fn struct_variant_idx() -> LocalEnumVariantId { + LocalEnumVariantId::from_raw(RawIdx::from(0)) + } + let (variants, is_enum, repr) = match def { + AdtId::StructId(s) => { + let data = db.struct_data(s); + let mut r = ArenaMap::new(); + r.insert(struct_variant_idx(), handle_variant(s.into(), &data.variant_data)?); + (r, false, data.repr.unwrap_or_default()) + } + AdtId::UnionId(id) => return layout_of_union(db, id, &subst), + AdtId::EnumId(e) => { + let data = db.enum_data(e); + let r = data + .variants + .iter() + .map(|(idx, v)| { + Ok(( + idx, + handle_variant( + EnumVariantId { parent: e, local_id: idx }.into(), + &v.variant_data, + )?, + )) + }) + .collect::>()?; + (r, true, data.repr.unwrap_or_default()) + } + }; + + // A variant is absent if it's uninhabited and only has ZST fields. + // Present uninhabited variants only require space for their fields, + // but *not* an encoding of the discriminant (e.g., a tag value). + // See issue #49298 for more details on the need to leave space + // for non-ZST uninhabited data (mostly partial initialization). + let absent = |fields: &[Layout]| { + let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited()); + let is_zst = fields.iter().all(|f| f.is_zst()); + uninhabited && is_zst + }; + let (present_first, present_second) = { + let mut present_variants = + variants.iter().filter_map(|(i, v)| if absent(v) { None } else { Some(i) }); + (present_variants.next(), present_variants.next()) + }; + let present_first = match present_first { + Some(present_first) => present_first, + // Uninhabited because it has no variants, or only absent ones. + None if is_enum => return layout_of_ty(db, &TyKind::Never.intern(Interner)), + // If it's a struct, still compute a layout so that we can still compute the + // field offsets. + None => struct_variant_idx(), + }; + + let is_univariant = !is_enum || + // Only one variant is present. + (present_second.is_none() && + // Representation optimizations are allowed. + !repr.inhibit_enum_layout_opt()); + let dl = &*db.current_target_data_layout(); + + if is_univariant { + // Struct, or univariant enum equivalent to a struct. + // (Typechecking will reject discriminant-sizing attrs.) + + let v = present_first; + let kind = if is_enum || variants[v].is_empty() { + StructKind::AlwaysSized + } else { + let always_sized = !variants[v].last().unwrap().is_unsized(); + if !always_sized { + StructKind::MaybeUnsized + } else { + StructKind::AlwaysSized + } + }; + + let mut st = univariant(dl, &variants[v], &repr, kind)?; + st.variants = Variants::Single; + + if is_unsafe_cell(def, db) { + let hide_niches = |scalar: &mut _| match scalar { + Scalar::Initialized { value, valid_range } => { + *valid_range = WrappingRange::full(value.size(dl)) + } + // Already doesn't have any niches + Scalar::Union { .. } => {} + }; + match &mut st.abi { + Abi::Uninhabited => {} + Abi::Scalar(scalar) => hide_niches(scalar), + Abi::ScalarPair(a, b) => { + hide_niches(a); + hide_niches(b); + } + Abi::Vector { element, count: _ } => hide_niches(element), + Abi::Aggregate { sized: _ } => {} + } + st.largest_niche = None; + } + return Ok(st); + } + + // Until we've decided whether to use the tagged or + // niche filling LayoutS, we don't want to intern the + // variant layouts, so we can't store them in the + // overall LayoutS. Store the overall LayoutS + // and the variant LayoutSs here until then. + struct TmpLayout { + layout: Layout, + variants: ArenaMap, + } + + let calculate_niche_filling_layout = || -> Result, LayoutError> { + // The current code for niche-filling relies on variant indices + // instead of actual discriminants, so enums with + // explicit discriminants (RFC #2363) would misbehave. + if repr.inhibit_enum_layout_opt() + // FIXME: bring these codes back + // || def + // .variants() + // .iter_enumerated() + // .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32())) + { + return Ok(None); + } + + if variants.iter().count() < 2 { + return Ok(None); + } + + let mut align = dl.aggregate_align; + let mut variant_layouts = variants + .iter() + .map(|(j, v)| { + let mut st = univariant(dl, v, &repr, StructKind::AlwaysSized)?; + st.variants = Variants::Single; + + align = align.max(st.align); + + Ok((j, st)) + }) + .collect::, _>>()?; + + let largest_variant_index = match variant_layouts + .iter() + .max_by_key(|(_i, layout)| layout.size.bytes()) + .map(|(i, _layout)| i) + { + None => return Ok(None), + Some(i) => i, + }; + + let count = variants + .iter() + .map(|(i, _)| i) + .filter(|x| *x != largest_variant_index && !absent(&variants[*x])) + .count() as u128; + + // Find the field with the largest niche + let (field_index, niche, (niche_start, niche_scalar)) = match variants + [largest_variant_index] + .iter() + .enumerate() + .filter_map(|(j, field)| Some((j, field.largest_niche?))) + .max_by_key(|(_, niche)| niche.available(dl)) + .and_then(|(j, niche)| Some((j, niche, niche.reserve(dl, count)?))) + { + None => return Ok(None), + Some(x) => x, + }; + + let niche_offset = + niche.offset + variant_layouts[largest_variant_index].fields.offset(field_index, dl); + let niche_size = niche.value.size(dl); + let size = variant_layouts[largest_variant_index].size.align_to(align.abi); + + let all_variants_fit = variant_layouts.iter_mut().all(|(i, layout)| { + if i == largest_variant_index { + return true; + } + + layout.largest_niche = None; + + if layout.size <= niche_offset { + // This variant will fit before the niche. + return true; + } + + // Determine if it'll fit after the niche. + let this_align = layout.align.abi; + let this_offset = (niche_offset + niche_size).align_to(this_align); + + if this_offset + layout.size > size { + return false; + } + + // It'll fit, but we need to make some adjustments. + match layout.fields { + FieldsShape::Arbitrary { ref mut offsets, .. } => { + for (j, offset) in offsets.iter_mut().enumerate() { + if !variants[i][j].is_zst() { + *offset += this_offset; + } + } + } + _ => { + panic!("Layout of fields should be Arbitrary for variants") + } + } + + // It can't be a Scalar or ScalarPair because the offset isn't 0. + if !layout.abi.is_uninhabited() { + layout.abi = Abi::Aggregate { sized: true }; + } + layout.size += this_offset; + + true + }); + + if !all_variants_fit { + return Ok(None); + } + + let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar); + + let others_zst = variant_layouts + .iter() + .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO); + let same_size = size == variant_layouts[largest_variant_index].size; + let same_align = align == variant_layouts[largest_variant_index].align; + + let abi = if variant_layouts.iter().all(|(_, v)| v.abi.is_uninhabited()) { + Abi::Uninhabited + } else if same_size && same_align && others_zst { + match variant_layouts[largest_variant_index].abi { + // When the total alignment and size match, we can use the + // same ABI as the scalar variant with the reserved niche. + Abi::Scalar(_) => Abi::Scalar(niche_scalar), + Abi::ScalarPair(first, second) => { + // Only the niche is guaranteed to be initialised, + // so use union layouts for the other primitive. + if niche_offset == Size::ZERO { + Abi::ScalarPair(niche_scalar, second.to_union()) + } else { + Abi::ScalarPair(first.to_union(), niche_scalar) + } + } + _ => Abi::Aggregate { sized: true }, + } + } else { + Abi::Aggregate { sized: true } + }; + + let layout = Layout { + variants: Variants::Multiple { + tag: niche_scalar, + tag_encoding: TagEncoding::Niche { + untagged_variant: largest_variant_index, + niche_start, + }, + tag_field: 0, + variants: ArenaMap::new(), + }, + fields: FieldsShape::Arbitrary { offsets: vec![niche_offset], memory_index: vec![0] }, + abi, + largest_niche, + size, + align, + }; + + Ok(Some(TmpLayout { layout, variants: variant_layouts })) + }; + + let niche_filling_layout = calculate_niche_filling_layout()?; + + let (mut min, mut max) = (i128::MAX, i128::MIN); + // FIXME: bring these back + // let discr_type = repr.discr_type(); + // let bits = Integer::from_attr(dl, discr_type).size().bits(); + // for (i, discr) in def.discriminants(tcx) { + // if variants[i].iter().any(|f| f.abi.is_uninhabited()) { + // continue; + // } + // let mut x = discr.val as i128; + // if discr_type.is_signed() { + // // sign extend the raw representation to be an i128 + // x = (x << (128 - bits)) >> (128 - bits); + // } + // if x < min { + // min = x; + // } + // if x > max { + // max = x; + // } + // } + // We might have no inhabited variants, so pretend there's at least one. + if (min, max) == (i128::MAX, i128::MIN) { + min = 0; + max = 0; + } + assert!(min <= max, "discriminant range is {}...{}", min, max); + let (min_ity, signed) = Integer::repr_discr(dl, &repr, min, max)?; + + let mut align = dl.aggregate_align; + let mut size = Size::ZERO; + + // We're interested in the smallest alignment, so start large. + let mut start_align = Align::from_bytes(256).unwrap(); + assert_eq!(Integer::for_align(dl, start_align), None); + + // repr(C) on an enum tells us to make a (tag, union) layout, + // so we need to grow the prefix alignment to be at least + // the alignment of the union. (This value is used both for + // determining the alignment of the overall enum, and the + // determining the alignment of the payload after the tag.) + let mut prefix_align = min_ity.align(dl).abi; + if repr.c() { + for (_, fields) in variants.iter() { + for field in fields { + prefix_align = prefix_align.max(field.align.abi); + } + } + } + + // Create the set of structs that represent each variant. + let mut layout_variants = variants + .iter() + .map(|(i, field_layouts)| { + let mut st = univariant( + dl, + &field_layouts, + &repr, + StructKind::Prefixed(min_ity.size(), prefix_align), + )?; + st.variants = Variants::Single; + // Find the first field we can't move later + // to make room for a larger discriminant. + for field in st.fields.index_by_increasing_offset().map(|j| &field_layouts[j]) { + if !field.is_zst() || field.align.abi.bytes() != 1 { + start_align = start_align.min(field.align.abi); + break; + } + } + size = cmp::max(size, st.size); + align = align.max(st.align); + Ok((i, st)) + }) + .collect::, _>>()?; + + // Align the maximum variant size to the largest alignment. + size = size.align_to(align.abi); + + if size.bytes() >= dl.obj_size_bound() { + return Err(LayoutError::SizeOverflow); + } + + // Check to see if we should use a different type for the + // discriminant. We can safely use a type with the same size + // as the alignment of the first field of each variant. + // We increase the size of the discriminant to avoid LLVM copying + // padding when it doesn't need to. This normally causes unaligned + // load/stores and excessive memcpy/memset operations. By using a + // bigger integer size, LLVM can be sure about its contents and + // won't be so conservative. + + // Use the initial field alignment + let mut ity = if repr.c() || repr.int.is_some() { + min_ity + } else { + Integer::for_align(dl, start_align).unwrap_or(min_ity) + }; + + // If the alignment is not larger than the chosen discriminant size, + // don't use the alignment as the final size. + if ity <= min_ity { + ity = min_ity; + } else { + // Patch up the variants' first few fields. + // Patch up the variants' first few fields. + let old_ity_size = min_ity.size(); + let new_ity_size = ity.size(); + for (_, variant) in layout_variants.iter_mut() { + match variant.fields { + FieldsShape::Arbitrary { ref mut offsets, .. } => { + for i in offsets { + if *i <= old_ity_size { + assert_eq!(*i, old_ity_size); + *i = new_ity_size; + } + } + // We might be making the struct larger. + if variant.size <= old_ity_size { + variant.size = new_ity_size; + } + } + _ => user_error!("bug"), + } + } + } + + let tag_mask = ity.size().unsigned_int_max(); + let tag = Scalar::Initialized { + value: Primitive::Int(ity, signed), + valid_range: WrappingRange { + start: (min as u128 & tag_mask), + end: (max as u128 & tag_mask), + }, + }; + let mut abi = Abi::Aggregate { sized: true }; + + if layout_variants.iter().all(|(_, v)| v.abi.is_uninhabited()) { + abi = Abi::Uninhabited; + } else if tag.size(dl) == size { + // Make sure we only use scalar layout when the enum is entirely its + // own tag (i.e. it has no padding nor any non-ZST variant fields). + abi = Abi::Scalar(tag); + } else { + // Try to use a ScalarPair for all tagged enums. + let mut common_prim = None; + let mut common_prim_initialized_in_all_variants = true; + for ((_, field_layouts), (_, layout_variant)) in + iter::zip(variants.iter(), layout_variants.iter()) + { + let offsets = match layout_variant.fields { + FieldsShape::Arbitrary { ref offsets, .. } => offsets, + _ => user_error!("bug"), + }; + let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst()); + let (field, offset) = match (fields.next(), fields.next()) { + (None, None) => { + common_prim_initialized_in_all_variants = false; + continue; + } + (Some(pair), None) => pair, + _ => { + common_prim = None; + break; + } + }; + let prim = match field.abi { + Abi::Scalar(scalar) => { + common_prim_initialized_in_all_variants &= + matches!(scalar, Scalar::Initialized { .. }); + scalar.primitive() + } + _ => { + common_prim = None; + break; + } + }; + if let Some(pair) = common_prim { + // This is pretty conservative. We could go fancier + // by conflating things like i32 and u32, or even + // realising that (u8, u8) could just cohabit with + // u16 or even u32. + if pair != (prim, offset) { + common_prim = None; + break; + } + } else { + common_prim = Some((prim, offset)); + } + } + if let Some((prim, offset)) = common_prim { + let prim_scalar = if common_prim_initialized_in_all_variants { + scalar_unit(dl, prim) + } else { + // Common prim might be uninit. + Scalar::Union { value: prim } + }; + let pair = scalar_pair(dl, tag, prim_scalar); + let pair_offsets = match pair.fields { + FieldsShape::Arbitrary { ref offsets, ref memory_index } => { + assert_eq!(memory_index, &[0, 1]); + offsets + } + _ => user_error!("bug"), + }; + if pair_offsets[0] == Size::ZERO + && pair_offsets[1] == *offset + && align == pair.align + && size == pair.size + { + // We can use `ScalarPair` only when it matches our + // already computed layout (including `#[repr(C)]`). + abi = pair.abi; + } + } + } + + // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the + // variants to ensure they are consistent. This is because a downcast is + // semantically a NOP, and thus should not affect layout. + if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) { + for (_, variant) in layout_variants.iter_mut() { + // We only do this for variants with fields; the others are not accessed anyway. + // Also do not overwrite any already existing "clever" ABIs. + if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) { + variant.abi = abi; + // Also need to bump up the size and alignment, so that the entire value fits in here. + variant.size = cmp::max(variant.size, size); + variant.align.abi = cmp::max(variant.align.abi, align.abi); + } + } + } + + let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag); + + let tagged_layout = Layout { + variants: Variants::Multiple { + tag, + tag_encoding: TagEncoding::Direct, + tag_field: 0, + variants: ArenaMap::new(), + }, + fields: FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }, + largest_niche, + abi, + align, + size, + }; + + let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants }; + + let mut best_layout = match (tagged_layout, niche_filling_layout) { + (tl, Some(nl)) => { + // Pick the smaller layout; otherwise, + // pick the layout with the larger niche; otherwise, + // pick tagged as it has simpler codegen. + use Ordering::*; + let niche_size = + |tmp_l: &TmpLayout| tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl)); + match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) { + (Greater, _) => nl, + (Equal, Less) => nl, + _ => tl, + } + } + (tl, None) => tl, + }; + + // Now we can intern the variant layouts and store them in the enum layout. + best_layout.layout.variants = match best_layout.layout.variants { + Variants::Multiple { tag, tag_encoding, tag_field, .. } => { + Variants::Multiple { tag, tag_encoding, tag_field, variants: best_layout.variants } + } + _ => user_error!("bug"), + }; + + Ok(best_layout.layout) +} + +pub fn layout_of_adt_recover( + _: &dyn HirDatabase, + _: &[String], + _: &AdtId, + _: &Substitution, +) -> Result { + user_error!("infinite sized recursive type"); +} + +pub(crate) fn univariant( + dl: &TargetDataLayout, + fields: &[Layout], + repr: &ReprOptions, + kind: StructKind, +) -> Result { + let pack = repr.pack; + if pack.is_some() && repr.align.is_some() { + user_error!("Struct can not be packed and aligned"); + } + + let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align }; + + let mut inverse_memory_index: Vec = (0..fields.len() as u32).collect(); + + let optimize = !repr.inhibit_struct_field_reordering_opt(); + if optimize { + let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() }; + let optimizing = &mut inverse_memory_index[..end]; + let field_align = |f: &Layout| { + if let Some(pack) = pack { + f.align.abi.min(pack) + } else { + f.align.abi + } + }; + + match kind { + StructKind::AlwaysSized | StructKind::MaybeUnsized => { + optimizing.sort_by_key(|&x| { + // Place ZSTs first to avoid "interesting offsets", + // especially with only one or two non-ZST fields. + let f = &fields[x as usize]; + (!f.is_zst(), cmp::Reverse(field_align(f))) + }); + } + + StructKind::Prefixed(..) => { + // Sort in ascending alignment so that the layout stays optimal + // regardless of the prefix + optimizing.sort_by_key(|&x| field_align(&fields[x as usize])); + } + } + } + + // inverse_memory_index holds field indices by increasing memory offset. + // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5. + // We now write field offsets to the corresponding offset slot; + // field 5 with offset 0 puts 0 in offsets[5]. + // At the bottom of this function, we invert `inverse_memory_index` to + // produce `memory_index` (see `invert_mapping`). + + let mut sized = true; + let mut offsets = vec![Size::ZERO; fields.len()]; + let mut offset = Size::ZERO; + let mut largest_niche = None; + let mut largest_niche_available = 0; + + if let StructKind::Prefixed(prefix_size, prefix_align) = kind { + let prefix_align = + if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align }; + align = align.max(AbiAndPrefAlign::new(prefix_align)); + offset = prefix_size.align_to(prefix_align); + } + + for &i in &inverse_memory_index { + let field = &fields[i as usize]; + if !sized { + user_error!("Unsized field is not last field"); + } + + if field.is_unsized() { + sized = false; + } + + // Invariant: offset < dl.obj_size_bound() <= 1<<61 + let field_align = if let Some(pack) = pack { + field.align.min(AbiAndPrefAlign::new(pack)) + } else { + field.align + }; + offset = offset.align_to(field_align.abi); + align = align.max(field_align); + + offsets[i as usize] = offset; + + if let Some(mut niche) = field.largest_niche { + let available = niche.available(dl); + if available > largest_niche_available { + largest_niche_available = available; + niche.offset = + niche.offset.checked_add(offset, dl).ok_or(LayoutError::SizeOverflow)?; + largest_niche = Some(niche); + } + } + + offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow)?; + } + + if let Some(repr_align) = repr.align { + align = align.max(AbiAndPrefAlign::new(repr_align)); + } + + let min_size = offset; + + // As stated above, inverse_memory_index holds field indices by increasing offset. + // This makes it an already-sorted view of the offsets vec. + // To invert it, consider: + // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0. + // Field 5 would be the first element, so memory_index is i: + // Note: if we didn't optimize, it's already right. + + let memory_index = + if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index }; + + let size = min_size.align_to(align.abi); + let mut abi = Abi::Aggregate { sized }; + + // Unpack newtype ABIs and find scalar pairs. + if sized && size.bytes() > 0 { + // All other fields must be ZSTs. + let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst()); + + match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) { + // We have exactly one non-ZST field. + (Some((i, field)), None, None) => { + // Field fills the struct and it has a scalar or scalar pair ABI. + if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size { + match field.abi { + // For plain scalars, or vectors of them, we can't unpack + // newtypes for `#[repr(C)]`, as that affects C ABIs. + Abi::Scalar(_) | Abi::Vector { .. } if optimize => { + abi = field.abi; + } + // But scalar pairs are Rust-specific and get + // treated as aggregates by C ABIs anyway. + Abi::ScalarPair(..) => { + abi = field.abi; + } + _ => {} + } + } + } + + // Two non-ZST fields, and they're both scalars. + (Some((i, a)), Some((j, b)), None) => { + match (a.abi, b.abi) { + (Abi::Scalar(a), Abi::Scalar(b)) => { + // Order by the memory placement, not source order. + let ((i, a), (j, b)) = if offsets[i] < offsets[j] { + ((i, a), (j, b)) + } else { + ((j, b), (i, a)) + }; + let pair = scalar_pair(dl, a, b); + let pair_offsets = match pair.fields { + FieldsShape::Arbitrary { ref offsets, .. } => offsets, + _ => unreachable!(), + }; + if offsets[i] == pair_offsets[0] + && offsets[j] == pair_offsets[1] + && align == pair.align + && size == pair.size + { + // We can use `ScalarPair` only when it matches our + // already computed layout (including `#[repr(C)]`). + abi = pair.abi; + } + } + _ => {} + } + } + + _ => {} + } + } + + if fields.iter().any(|f| f.abi.is_uninhabited()) { + abi = Abi::Uninhabited; + } + + Ok(Layout { + variants: Variants::Single, + fields: FieldsShape::Arbitrary { offsets, memory_index }, + abi, + largest_niche, + align, + size, + }) +} + +fn layout_of_union( + db: &dyn HirDatabase, + id: UnionId, + subst: &Substitution, +) -> Result { + let dl = &*db.current_target_data_layout(); + + let union_data = db.union_data(id); + + let repr = union_data.repr.unwrap_or_default(); + let fields = union_data.variant_data.fields(); + + if repr.pack.is_some() && repr.align.is_some() { + user_error!("union cannot be packed and aligned"); + } + + let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align }; + if let Some(repr_align) = repr.align { + align = align.max(AbiAndPrefAlign::new(repr_align)); + } + + let optimize = !repr.inhibit_union_abi_opt(); + let mut size = Size::ZERO; + let mut abi = Abi::Aggregate { sized: true }; + for (fd, _) in fields.iter() { + let field_ty = field_ty(db, id.into(), fd, subst); + let field = layout_of_ty(db, &field_ty)?; + if field.is_unsized() { + user_error!("unsized union field"); + } + // If all non-ZST fields have the same ABI, forward this ABI + if optimize && !field.is_zst() { + // Discard valid range information and allow undef + let field_abi = match field.abi { + Abi::Scalar(x) => Abi::Scalar(x.to_union()), + Abi::ScalarPair(x, y) => Abi::ScalarPair(x.to_union(), y.to_union()), + Abi::Vector { element: x, count } => Abi::Vector { element: x.to_union(), count }, + Abi::Uninhabited | Abi::Aggregate { .. } => Abi::Aggregate { sized: true }, + }; + + if size == Size::ZERO { + // first non ZST: initialize 'abi' + abi = field_abi; + } else if abi != field_abi { + // different fields have different ABI: reset to Aggregate + abi = Abi::Aggregate { sized: true }; + } + } + + size = cmp::max(size, field.size); + } + + if let Some(pack) = repr.pack { + align = align.min(AbiAndPrefAlign::new(pack)); + } + + Ok(Layout { + variants: Variants::Single, + fields: FieldsShape::Union( + NonZeroUsize::new(fields.len()) + .ok_or(LayoutError::UserError("union with zero fields".to_string()))?, + ), + abi, + largest_niche: None, + align, + size: size.align_to(align.abi), + }) +} + +// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`. +// This is used to go between `memory_index` (source field order to memory order) +// and `inverse_memory_index` (memory order to source field order). +// See also `FieldsShape::Arbitrary::memory_index` for more details. +// FIXME(eddyb) build a better abstraction for permutations, if possible. +fn invert_mapping(map: &[u32]) -> Vec { + let mut inverse = vec![0; map.len()]; + for i in 0..map.len() { + inverse[map[i] as usize] = i as u32; + } + inverse +} + +fn scalar_pair(dl: &TargetDataLayout, a: Scalar, b: Scalar) -> Layout { + let b_align = b.align(dl); + let align = a.align(dl).max(b_align).max(dl.aggregate_align); + let b_offset = a.size(dl).align_to(b_align.abi); + let size = b_offset.checked_add(b.size(dl), dl).unwrap().align_to(align.abi); + + // HACK(nox): We iter on `b` and then `a` because `max_by_key` + // returns the last maximum. + let largest_niche = Niche::from_scalar(dl, b_offset, b) + .into_iter() + .chain(Niche::from_scalar(dl, Size::ZERO, a)) + .max_by_key(|niche| niche.available(dl)); + + Layout { + variants: Variants::Single, + fields: FieldsShape::Arbitrary { + offsets: vec![Size::ZERO, b_offset], + memory_index: vec![0, 1], + }, + abi: Abi::ScalarPair(a, b), + largest_niche, + align, + size, + } +} diff --git a/crates/hir-ty/src/layout/target.rs b/crates/hir-ty/src/layout/target.rs new file mode 100644 index 000000000000..ba810b12b1fb --- /dev/null +++ b/crates/hir-ty/src/layout/target.rs @@ -0,0 +1,44 @@ +//! Target dependent parameters needed for layouts + +use std::sync::Arc; + +use hir_def::layout::TargetDataLayout; + +use crate::db::HirDatabase; + +use super::{AbiAndPrefAlign, AddressSpace, Align, Endian, Integer, Size}; + +pub fn current_target_data_layout_query(db: &dyn HirDatabase) -> Arc { + let crate_graph = db.crate_graph(); + let cfg_options = &crate_graph[crate_graph.iter().next().unwrap()].cfg_options; + let endian = match cfg_options.get_cfg_values("target_endian").next() { + Some(x) if x.as_str() == "big" => Endian::Big, + _ => Endian::Little, + }; + let pointer_size = + Size::from_bytes(match cfg_options.get_cfg_values("target_pointer_width").next() { + Some(x) => match x.as_str() { + "16" => 2, + "32" => 4, + _ => 8, + }, + _ => 8, + }); + Arc::new(TargetDataLayout { + endian, + i1_align: AbiAndPrefAlign::new(Align::from_bytes(1).unwrap()), + i8_align: AbiAndPrefAlign::new(Align::from_bytes(1).unwrap()), + i16_align: AbiAndPrefAlign::new(Align::from_bytes(2).unwrap()), + i32_align: AbiAndPrefAlign::new(Align::from_bytes(4).unwrap()), + i64_align: AbiAndPrefAlign::new(Align::from_bytes(8).unwrap()), + i128_align: AbiAndPrefAlign::new(Align::from_bytes(8).unwrap()), + f32_align: AbiAndPrefAlign::new(Align::from_bytes(4).unwrap()), + f64_align: AbiAndPrefAlign::new(Align::from_bytes(8).unwrap()), + pointer_size, + pointer_align: AbiAndPrefAlign::new(Align::from_bytes(8).unwrap()), + aggregate_align: AbiAndPrefAlign::new(Align::from_bytes(1).unwrap()), + vector_align: vec![], + instruction_address_space: AddressSpace(0), + c_enum_min_size: Integer::I32, + }) +} diff --git a/crates/hir-ty/src/layout/tests.rs b/crates/hir-ty/src/layout/tests.rs new file mode 100644 index 000000000000..9543b4dcbc6b --- /dev/null +++ b/crates/hir-ty/src/layout/tests.rs @@ -0,0 +1,167 @@ +use base_db::fixture::WithFixture; +use chalk_ir::{AdtId, TyKind}; +use hir_def::{ + db::DefDatabase, + layout::{Layout, LayoutError}, +}; + +use crate::{test_db::TestDB, Interner, Substitution}; + +use super::layout_of_ty; + +fn eval_goal(ra_fixture: &str) -> Result { + let (db, file_id) = TestDB::with_single_file(ra_fixture); + let module_id = db.module_for_file(file_id); + let def_map = module_id.def_map(&db); + let scope = &def_map[module_id.local_id].scope; + let adt_id = scope + .declarations() + .into_iter() + .find_map(|x| match x { + hir_def::ModuleDefId::AdtId(x) => { + let name = match x { + hir_def::AdtId::StructId(x) => db.struct_data(x).name.to_string(), + hir_def::AdtId::UnionId(x) => db.union_data(x).name.to_string(), + hir_def::AdtId::EnumId(x) => db.enum_data(x).name.to_string(), + }; + if name == "Goal" { + Some(x) + } else { + None + } + } + _ => None, + }) + .unwrap(); + let goal_ty = TyKind::Adt(AdtId(adt_id), Substitution::empty(Interner)).intern(Interner); + layout_of_ty(&db, &goal_ty) +} + +fn check_size_and_align(ra_fixture: &str, size: u64, align: u64) { + let l = eval_goal(ra_fixture).unwrap(); + assert_eq!(l.size.bytes(), size); + assert_eq!(l.align.abi.bytes(), align); +} + +fn check_fail(ra_fixture: &str, e: LayoutError) { + let r = eval_goal(ra_fixture); + assert_eq!(r, Err(e)); +} + +macro_rules! size_and_align { + ($($t:tt)*) => { + { + #[allow(dead_code)] + $($t)* + check_size_and_align( + stringify!($($t)*), + ::std::mem::size_of::() as u64, + ::std::mem::align_of::() as u64, + ); + } + }; +} + +#[test] +fn hello_world() { + size_and_align! { + struct Goal(i32); + } + //check_size_and_align(r#"struct Goal(i32)"#, 4, 4); +} + +#[test] +fn field_order_optimization() { + size_and_align! { + struct Goal(u8, i32, u8); + } + size_and_align! { + #[repr(C)] + struct Goal(u8, i32, u8); + } +} + +#[test] +fn recursive() { + size_and_align! { + struct Goal { + left: &'static Goal, + right: &'static Goal, + } + } + size_and_align! { + struct BoxLike(*mut T); + struct Goal(BoxLike); + } + check_fail( + r#"struct Goal(Goal);"#, + LayoutError::UserError("infinite sized recursive type".to_string()), + ); + check_fail( + r#" + struct Foo(Foo); + struct Goal(Foo); + "#, + LayoutError::UserError("infinite sized recursive type".to_string()), + ); +} + +#[test] +fn generic() { + size_and_align! { + struct Pair(A, B); + struct Goal(Pair, i64>); + } + size_and_align! { + struct X { + field1: [i32; N], + field2: [u8; N], + } + struct Goal(X<1000>); + } +} + +#[test] +fn enums() { + size_and_align! { + enum Goal { + Quit, + Move { x: i32, y: i32 }, + ChangeColor(i32, i32, i32), + } + } +} + +#[test] +fn primitives() { + size_and_align! { + struct Goal(i32, i128, isize, usize, f32, f64, bool, char); + } +} + +#[test] +fn tuple() { + size_and_align! { + struct Goal((), (i32, u64, bool)); + } +} + +#[test] +fn niche_optimization() { + check_size_and_align( + r#" + //- minicore: option + struct Goal(Option<&i32>); + "#, + 8, + 8, + ); + check_size_and_align( + r#" + //- minicore: option + struct Goal(Option>); + "#, + 1, + 1, + ); +} diff --git a/crates/hir-ty/src/lib.rs b/crates/hir-ty/src/lib.rs index 39514fc44e6c..2a41cafba98a 100644 --- a/crates/hir-ty/src/lib.rs +++ b/crates/hir-ty/src/lib.rs @@ -27,6 +27,8 @@ pub mod display; pub mod method_resolution; pub mod primitive; pub mod traits; +pub mod layout; +pub mod lang_items; #[cfg(test)] mod tests; diff --git a/crates/hir/src/lib.rs b/crates/hir/src/lib.rs index cbbcaebb4285..42b7c0781bf4 100644 --- a/crates/hir/src/lib.rs +++ b/crates/hir/src/lib.rs @@ -39,12 +39,13 @@ use arrayvec::ArrayVec; use base_db::{CrateDisplayName, CrateId, CrateOrigin, Edition, FileId, ProcMacroKind}; use either::Either; use hir_def::{ - adt::{ReprData, VariantData}, + adt::VariantData, body::{BodyDiagnostic, SyntheticSyntax}, expr::{BindingAnnotation, LabelId, Pat, PatId}, generics::{TypeOrConstParamData, TypeParamProvenance}, item_tree::ItemTreeNode, lang_item::LangItemTarget, + layout::{Layout, LayoutError, ReprOptions}, nameres::{self, diagnostics::DefDiagnostic}, per_ns::PerNs, resolver::{HasResolver, Resolver}, @@ -59,6 +60,7 @@ use hir_ty::{ all_super_traits, autoderef, consteval::{unknown_const_as_generic, ComputedExpr, ConstEvalError, ConstExt}, diagnostics::BodyValidationDiagnostic, + layout::layout_of_ty, method_resolution::{self, TyFingerprint}, primitive::UintTy, traits::FnTrait, @@ -844,6 +846,10 @@ impl Field { self.parent.variant_data(db).fields()[self.id].name.clone() } + pub fn index(&self) -> usize { + u32::from(self.id.into_raw()) as usize + } + /// Returns the type as in the signature of the struct (i.e., with /// placeholder types for type parameters). Only use this in the context of /// the field definition. @@ -859,6 +865,10 @@ impl Field { Type::new(db, var_id, ty) } + pub fn layout(&self, db: &dyn HirDatabase) -> Result { + layout_of_ty(db, &self.ty(db).ty) + } + pub fn parent_def(&self, _db: &dyn HirDatabase) -> VariantDef { self.parent } @@ -900,7 +910,7 @@ impl Struct { Type::from_def(db, self.id) } - pub fn repr(self, db: &dyn HirDatabase) -> Option { + pub fn repr(self, db: &dyn HirDatabase) -> Option { db.struct_data(self.id).repr.clone() } @@ -1076,6 +1086,13 @@ impl Adt { }) } + pub fn layout(self, db: &dyn HirDatabase) -> Result { + if db.generic_params(self.into()).iter().count() != 0 { + return Err(LayoutError::HasPlaceholder); + } + db.layout_of_adt(self.into(), Substitution::empty(Interner)) + } + /// Turns this ADT into a type. Any type parameters of the ADT will be /// turned into unknown types, which is good for e.g. finding the most /// general set of completions, but will not look very nice when printed. @@ -3031,7 +3048,7 @@ impl Type { let adt = adt_id.into(); match adt { - Adt::Struct(s) => matches!(s.repr(db), Some(ReprData { packed: true, .. })), + Adt::Struct(s) => s.repr(db).unwrap_or_default().pack.is_some(), _ => false, } } diff --git a/crates/ide/src/hover/render.rs b/crates/ide/src/hover/render.rs index fb00a40f9619..470c6626f9d9 100644 --- a/crates/ide/src/hover/render.rs +++ b/crates/ide/src/hover/render.rs @@ -2,7 +2,10 @@ use std::fmt::Display; use either::Either; -use hir::{AsAssocItem, AttributeTemplate, HasAttrs, HasSource, HirDisplay, Semantics, TypeInfo}; +use hir::{ + db::HirDatabase, Adt, AsAssocItem, AttributeTemplate, HasAttrs, HasSource, HirDisplay, + Semantics, TypeInfo, +}; use ide_db::{ base_db::SourceDatabase, defs::Definition, @@ -388,10 +391,30 @@ pub(super) fn definition( let mod_path = definition_mod_path(db, &def); let (label, docs) = match def { Definition::Macro(it) => label_and_docs(db, it), - Definition::Field(it) => label_and_docs(db, it), + Definition::Field(it) => label_and_layout_info_and_docs(db, it, |&it| { + let var_def = it.parent_def(db); + let id = it.index(); + let layout = it.layout(db).ok()?; + let offset = match var_def { + hir::VariantDef::Struct(s) => { + let layout = Adt::from(s).layout(db).ok()?; + layout.fields.offset(id, &db.current_target_data_layout()) + } + _ => return None, + }; + Some(format!( + "size = {}, align = {}, offset = {}", + layout.size.bytes(), + layout.align.abi.bytes(), + offset.bytes() + )) + }), Definition::Module(it) => label_and_docs(db, it), Definition::Function(it) => label_and_docs(db, it), - Definition::Adt(it) => label_and_docs(db, it), + Definition::Adt(it) => label_and_layout_info_and_docs(db, it, |&it| { + let layout = it.layout(db).ok()?; + Some(format!("size = {}, align = {}", layout.size.bytes(), layout.align.abi.bytes())) + }), Definition::Variant(it) => label_value_and_docs(db, it, |&it| { if !it.parent_enum(db).is_data_carrying(db) { match it.eval(db) { @@ -489,6 +512,25 @@ where (label, docs) } +fn label_and_layout_info_and_docs( + db: &RootDatabase, + def: D, + value_extractor: E, +) -> (String, Option) +where + D: HasAttrs + HirDisplay, + E: Fn(&D) -> Option, + V: Display, +{ + let label = if let Some(value) = value_extractor(&def) { + format!("{} // {}", def.display(db), value) + } else { + def.display(db).to_string() + }; + let docs = def.attrs(db).docs(); + (label, docs) +} + fn label_value_and_docs( db: &RootDatabase, def: D, diff --git a/crates/ide/src/hover/tests.rs b/crates/ide/src/hover/tests.rs index f8be4cfb04c2..f630c3b36dc6 100644 --- a/crates/ide/src/hover/tests.rs +++ b/crates/ide/src/hover/tests.rs @@ -522,6 +522,27 @@ fn main() { } ); } +#[test] +fn hover_field_offset() { + // Hovering over the field when instantiating + check( + r#" +struct Foo { fiel$0d_a: u8, field_b: i32, field_c: i16 } +"#, + expect![[r#" + *field_a* + + ```rust + test::Foo + ``` + + ```rust + field_a: u8 // size = 1, align = 1, offset = 6 + ``` + "#]], + ); +} + #[test] fn hover_shows_struct_field_info() { // Hovering over the field when instantiating @@ -534,16 +555,16 @@ fn main() { } "#, expect![[r#" - *field_a* + *field_a* - ```rust - test::Foo - ``` + ```rust + test::Foo + ``` - ```rust - field_a: u32 - ``` - "#]], + ```rust + field_a: u32 // size = 4, align = 4, offset = 0 + ``` + "#]], ); // Hovering over the field in the definition @@ -556,16 +577,16 @@ fn main() { } "#, expect![[r#" - *field_a* + *field_a* - ```rust - test::Foo - ``` + ```rust + test::Foo + ``` - ```rust - field_a: u32 - ``` - "#]], + ```rust + field_a: u32 // size = 4, align = 4, offset = 0 + ``` + "#]], ); } @@ -1508,30 +1529,30 @@ struct Bar; fn foo() { let bar = Ba$0r; } "#, - expect![[r##" - *Bar* + expect![[r#" + *Bar* - ```rust - test - ``` + ```rust + test + ``` - ```rust - struct Bar - ``` + ```rust + struct Bar // size = 0, align = 1 + ``` - --- + --- - This is an example - multiline doc + This is an example + multiline doc - # Example + # Example - ``` - let five = 5; + ``` + let five = 5; - assert_eq!(6, my_crate::add_one(5)); - ``` - "##]], + assert_eq!(6, my_crate::add_one(5)); + ``` + "#]], ); } @@ -1545,20 +1566,20 @@ struct Bar; fn foo() { let bar = Ba$0r; } "#, expect![[r#" - *Bar* + *Bar* - ```rust - test - ``` + ```rust + test + ``` - ```rust - struct Bar - ``` + ```rust + struct Bar // size = 0, align = 1 + ``` - --- + --- - bar docs - "#]], + bar docs + "#]], ); } @@ -1574,22 +1595,22 @@ struct Bar; fn foo() { let bar = Ba$0r; } "#, expect![[r#" - *Bar* + *Bar* - ```rust - test - ``` + ```rust + test + ``` - ```rust - struct Bar - ``` + ```rust + struct Bar // size = 0, align = 1 + ``` - --- + --- - bar docs 0 - bar docs 1 - bar docs 2 - "#]], + bar docs 0 + bar docs 1 + bar docs 2 + "#]], ); } @@ -1602,20 +1623,20 @@ pub struct Foo; pub struct B$0ar "#, expect![[r#" - *Bar* + *Bar* - ```rust - test - ``` + ```rust + test + ``` - ```rust - pub struct Bar - ``` + ```rust + pub struct Bar // size = 0, align = 1 + ``` - --- + --- - [external](https://www.google.com) - "#]], + [external](https://www.google.com) + "#]], ); } @@ -1629,20 +1650,20 @@ pub struct Foo; pub struct B$0ar "#, expect![[r#" - *Bar* + *Bar* - ```rust - test - ``` + ```rust + test + ``` - ```rust - pub struct Bar - ``` + ```rust + pub struct Bar // size = 0, align = 1 + ``` - --- + --- - [baz](Baz) - "#]], + [baz](Baz) + "#]], ); } @@ -2960,7 +2981,7 @@ fn main() { ``` ```rust - f: i32 + f: i32 // size = 4, align = 4, offset = 0 ``` "#]], ); @@ -4203,20 +4224,20 @@ pub fn gimme() -> theitem::TheItem { } "#, expect![[r#" - *[`TheItem`]* + *[`TheItem`]* - ```rust - test::theitem - ``` + ```rust + test::theitem + ``` - ```rust - pub struct TheItem - ``` + ```rust + pub struct TheItem // size = 0, align = 1 + ``` - --- + --- - This is the item. Cool! - "#]], + This is the item. Cool! + "#]], ); } @@ -4351,20 +4372,20 @@ mod string { } "#, expect![[r#" - *String* + *String* - ```rust - main - ``` + ```rust + main + ``` - ```rust - struct String - ``` + ```rust + struct String // size = 0, align = 1 + ``` - --- + --- - Custom `String` type. - "#]], + Custom `String` type. + "#]], ) } @@ -5025,7 +5046,7 @@ foo_macro!( ``` ```rust - pub struct Foo + pub struct Foo // size = 0, align = 1 ``` --- @@ -5040,7 +5061,7 @@ fn hover_intra_in_attr() { check( r#" #[doc = "Doc comment for [`Foo$0`]"] -pub struct Foo; +pub struct Foo(i32); "#, expect![[r#" *[`Foo`]* @@ -5050,7 +5071,7 @@ pub struct Foo; ``` ```rust - pub struct Foo + pub struct Foo // size = 4, align = 4 ``` --- diff --git a/lib/la-arena/src/map.rs b/lib/la-arena/src/map.rs index 5f347e274500..b9d491da3c0c 100644 --- a/lib/la-arena/src/map.rs +++ b/lib/la-arena/src/map.rs @@ -86,6 +86,14 @@ impl ArenaMap, V> { self.v.iter().enumerate().filter_map(|(idx, o)| Some((Self::from_idx(idx), o.as_ref()?))) } + /// Returns an iterator over the arena indexes and values in the map. + pub fn iter_mut(&mut self) -> impl Iterator, &mut V)> { + self.v + .iter_mut() + .enumerate() + .filter_map(|(idx, o)| Some((Self::from_idx(idx), o.as_mut()?))) + } + /// Gets the given key's corresponding entry in the map for in-place manipulation. pub fn entry(&mut self, idx: Idx) -> Entry<'_, Idx, V> { let idx = Self::to_idx(idx);