Skip to content

Commit

Permalink
Auto merge of rust-lang#115915 - cjgillot:const-pair, r=<try>
Browse files Browse the repository at this point in the history
WIP Replace ConstValue::Slice by ConstValue::ScalarPair

Both the interpreter and codegen have dedicated paths for scalar pairs. Having one in ConstValue makes is able to represent all "immediate" values.

In order to mitigate the increase in the size of the ConstValue struct, a first commit interns it.
  • Loading branch information
bors committed Sep 17, 2023
2 parents 8ed1d4a + e91604e commit 5b77e18
Show file tree
Hide file tree
Showing 96 changed files with 643 additions and 577 deletions.
189 changes: 91 additions & 98 deletions compiler/rustc_codegen_cranelift/src/constant.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::mir::interpret::{
read_target_uint, AllocId, ConstValue, ErrorHandled, GlobalAlloc, Scalar,
read_target_uint, AllocId, ConstValue, ConstValueKind, ErrorHandled, GlobalAlloc, Scalar,
};

use cranelift_module::*;
Expand Down Expand Up @@ -103,6 +103,91 @@ pub(crate) fn codegen_constant_operand<'tcx>(
codegen_const_value(fx, const_val, ty)
}

pub(crate) fn codegen_const_scalar<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
scalar: Scalar,
layout: TyAndLayout<'tcx>,
) -> CValue<'tcx> {
match scalar {
Scalar::Int(int) => {
if fx.clif_type(layout.ty).is_some() {
return CValue::const_val(fx, layout, int);
} else {
let raw_val = int.to_bits(int.size()).unwrap();
let val = match int.size().bytes() {
1 => fx.bcx.ins().iconst(types::I8, raw_val as i64),
2 => fx.bcx.ins().iconst(types::I16, raw_val as i64),
4 => fx.bcx.ins().iconst(types::I32, raw_val as i64),
8 => fx.bcx.ins().iconst(types::I64, raw_val as i64),
16 => {
let lsb = fx.bcx.ins().iconst(types::I64, raw_val as u64 as i64);
let msb = fx.bcx.ins().iconst(types::I64, (raw_val >> 64) as u64 as i64);
fx.bcx.ins().iconcat(lsb, msb)
}
_ => unreachable!(),
};

// FIXME avoid this extra copy to the stack and directly write to the final
// destination
let place = CPlace::new_stack_slot(fx, layout);
place.to_ptr().store(fx, val, MemFlags::trusted());
place.to_cvalue(fx)
}
}
Scalar::Ptr(ptr, _size) => {
let (alloc_id, offset) = ptr.into_parts(); // we know the `offset` is relative
let base_addr = match fx.tcx.global_alloc(alloc_id) {
GlobalAlloc::Memory(alloc) => {
let data_id = data_id_for_alloc_id(
&mut fx.constants_cx,
fx.module,
alloc_id,
alloc.inner().mutability,
);
let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
if fx.clif_comments.enabled() {
fx.add_comment(local_data_id, format!("{:?}", alloc_id));
}
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
}
GlobalAlloc::Function(instance) => {
let func_id = crate::abi::import_function(fx.tcx, fx.module, instance);
let local_func_id = fx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
}
GlobalAlloc::VTable(ty, trait_ref) => {
let alloc_id = fx.tcx.vtable_allocation((ty, trait_ref));
let alloc = fx.tcx.global_alloc(alloc_id).unwrap_memory();
// FIXME: factor this common code with the `Memory` arm into a function?
let data_id = data_id_for_alloc_id(
&mut fx.constants_cx,
fx.module,
alloc_id,
alloc.inner().mutability,
);
let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
}
GlobalAlloc::Static(def_id) => {
assert!(fx.tcx.is_static(def_id));
let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
let local_data_id = fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
if fx.clif_comments.enabled() {
fx.add_comment(local_data_id, format!("{:?}", def_id));
}
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
}
};
let val = if offset.bytes() != 0 {
fx.bcx.ins().iadd_imm(base_addr, i64::try_from(offset.bytes()).unwrap())
} else {
base_addr
};
CValue::by_val(val, layout)
}
}
}

pub(crate) fn codegen_const_value<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
const_val: ConstValue<'tcx>,
Expand All @@ -115,107 +200,15 @@ pub(crate) fn codegen_const_value<'tcx>(
return CValue::by_ref(crate::Pointer::dangling(layout.align.pref), layout);
}

match const_val {
ConstValue::ZeroSized => unreachable!(), // we already handled ZST above
ConstValue::Scalar(x) => match x {
Scalar::Int(int) => {
if fx.clif_type(layout.ty).is_some() {
return CValue::const_val(fx, layout, int);
} else {
let raw_val = int.to_bits(int.size()).unwrap();
let val = match int.size().bytes() {
1 => fx.bcx.ins().iconst(types::I8, raw_val as i64),
2 => fx.bcx.ins().iconst(types::I16, raw_val as i64),
4 => fx.bcx.ins().iconst(types::I32, raw_val as i64),
8 => fx.bcx.ins().iconst(types::I64, raw_val as i64),
16 => {
let lsb = fx.bcx.ins().iconst(types::I64, raw_val as u64 as i64);
let msb =
fx.bcx.ins().iconst(types::I64, (raw_val >> 64) as u64 as i64);
fx.bcx.ins().iconcat(lsb, msb)
}
_ => unreachable!(),
};

// FIXME avoid this extra copy to the stack and directly write to the final
// destination
let place = CPlace::new_stack_slot(fx, layout);
place.to_ptr().store(fx, val, MemFlags::trusted());
place.to_cvalue(fx)
}
}
Scalar::Ptr(ptr, _size) => {
let (alloc_id, offset) = ptr.into_parts(); // we know the `offset` is relative
let base_addr = match fx.tcx.global_alloc(alloc_id) {
GlobalAlloc::Memory(alloc) => {
let data_id = data_id_for_alloc_id(
&mut fx.constants_cx,
fx.module,
alloc_id,
alloc.inner().mutability,
);
let local_data_id =
fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
if fx.clif_comments.enabled() {
fx.add_comment(local_data_id, format!("{:?}", alloc_id));
}
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
}
GlobalAlloc::Function(instance) => {
let func_id = crate::abi::import_function(fx.tcx, fx.module, instance);
let local_func_id =
fx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
}
GlobalAlloc::VTable(ty, trait_ref) => {
let alloc_id = fx.tcx.vtable_allocation((ty, trait_ref));
let alloc = fx.tcx.global_alloc(alloc_id).unwrap_memory();
// FIXME: factor this common code with the `Memory` arm into a function?
let data_id = data_id_for_alloc_id(
&mut fx.constants_cx,
fx.module,
alloc_id,
alloc.inner().mutability,
);
let local_data_id =
fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
}
GlobalAlloc::Static(def_id) => {
assert!(fx.tcx.is_static(def_id));
let data_id = data_id_for_static(fx.tcx, fx.module, def_id, false);
let local_data_id =
fx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
if fx.clif_comments.enabled() {
fx.add_comment(local_data_id, format!("{:?}", def_id));
}
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
}
};
let val = if offset.bytes() != 0 {
fx.bcx.ins().iadd_imm(base_addr, i64::try_from(offset.bytes()).unwrap())
} else {
base_addr
};
CValue::by_val(val, layout)
}
},
ConstValue::Indirect { alloc_id, offset } => CValue::by_ref(
match *const_val.kind() {
ConstValueKind::ZeroSized => unreachable!(), // we already handled ZST above
ConstValueKind::Scalar(x) => codegen_const_scalar(fx, x, layout),
ConstValueKind::Indirect { alloc_id, offset } => CValue::by_ref(
pointer_for_allocation(fx, alloc_id)
.offset_i64(fx, i64::try_from(offset.bytes()).unwrap()),
layout,
),
ConstValue::Slice { data, start, end } => {
let alloc_id = fx.tcx.reserve_and_set_memory_alloc(data);
let ptr = pointer_for_allocation(fx, alloc_id)
.offset_i64(fx, i64::try_from(start).unwrap())
.get_addr(fx);
let len = fx
.bcx
.ins()
.iconst(fx.pointer_type, i64::try_from(end.checked_sub(start).unwrap()).unwrap());
CValue::by_val_pair(ptr, len, layout)
}
ConstValueKind::ScalarPair(a, b) => todo!(),
}
}

Expand Down
4 changes: 2 additions & 2 deletions compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -171,8 +171,8 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
let idx_const = crate::constant::mir_operand_get_const_val(fx, idx)
.expect("simd_shuffle idx not const");

let idx_bytes = match idx_const {
ConstValue::Indirect { alloc_id, offset } => {
let idx_bytes = match *idx_const.kind() {
ConstValueKind::Indirect { alloc_id, offset } => {
let alloc = fx.tcx.global_alloc(alloc_id).unwrap_memory();
let size = Size::from_bytes(
4 * ret_lane_count, /* size_of([u32; ret_lane_count]) */
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_codegen_ssa/src/common.rs
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ pub fn asm_const_to_str<'tcx>(
const_value: ConstValue<'tcx>,
ty_and_layout: TyAndLayout<'tcx>,
) -> String {
let ConstValue::Scalar(scalar) = const_value else {
let Some(scalar) = const_value.try_to_scalar() else {
span_bug!(sp, "expected Scalar for promoted asm const, but got {:#?}", const_value)
};
let value = scalar.assert_bits(ty_and_layout.size);
Expand Down
29 changes: 13 additions & 16 deletions compiler/rustc_codegen_ssa/src/mir/operand.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use crate::traits::*;
use crate::MemFlags;

use rustc_middle::mir;
use rustc_middle::mir::interpret::{alloc_range, ConstValue, Pointer, Scalar};
use rustc_middle::mir::interpret::{alloc_range, ConstValue, ConstValueKind};
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::Ty;
use rustc_target::abi::{self, Abi, Align, Size};
Expand Down Expand Up @@ -91,35 +91,32 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
) -> Self {
let layout = bx.layout_of(ty);

let val = match val {
ConstValue::Scalar(x) => {
let val = match *val.kind() {
ConstValueKind::Scalar(x) => {
let Abi::Scalar(scalar) = layout.abi else {
bug!("from_const: invalid ByVal layout: {:#?}", layout);
};
let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
OperandValue::Immediate(llval)
}
ConstValue::ZeroSized => return OperandRef::zero_sized(layout),
ConstValue::Slice { data, start, end } => {
let Abi::ScalarPair(a_scalar, _) = layout.abi else {
bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
ConstValueKind::ZeroSized => return OperandRef::zero_sized(layout),
ConstValueKind::ScalarPair(a, b) => {
let Abi::ScalarPair(a_scalar, b_scalar) = layout.abi else {
bug!("from_const: invalid ByVal layout: {:#?}", layout);
};
let a = Scalar::from_pointer(
Pointer::new(
bx.tcx().reserve_and_set_memory_alloc(data),
Size::from_bytes(start),
),
&bx.tcx(),
);
let a_llval = bx.scalar_to_backend(
a,
a_scalar,
bx.scalar_pair_element_backend_type(layout, 0, true),
);
let b_llval = bx.const_usize((end - start) as u64);
let b_llval = bx.scalar_to_backend(
b,
b_scalar,
bx.scalar_pair_element_backend_type(layout, 1, true),
);
OperandValue::Pair(a_llval, b_llval)
}
ConstValue::Indirect { alloc_id, offset } => {
ConstValueKind::Indirect { alloc_id, offset } => {
let alloc = bx.tcx().global_alloc(alloc_id).unwrap_memory();
return Self::from_const_alloc(bx, layout, alloc, offset);
}
Expand Down
34 changes: 10 additions & 24 deletions compiler/rustc_const_eval/src/const_eval/eval_queries.rs
Original file line number Diff line number Diff line change
Expand Up @@ -111,23 +111,24 @@ pub(super) fn op_to_const<'tcx>(
ecx: &CompileTimeEvalContext<'_, 'tcx>,
op: &OpTy<'tcx>,
) -> ConstValue<'tcx> {
let tcx = *ecx.tcx;
// Handle ZST consistently and early.
if op.layout.is_zst() {
return ConstValue::ZeroSized;
return ConstValue::zero_sized(tcx);
}

// All scalar types should be stored as `ConstValue::Scalar`. This is needed to make
// `ConstValue::try_to_scalar` efficient; we want that to work for *all* constants of scalar
// All scalar types should be stored as `ConstValueKind::Scalar`. This is needed to make
// `ConstValueKind::try_to_scalar` efficient; we want that to work for *all* constants of scalar
// type (it's used throughout the compiler and having it work just on literals is not enough)
// and we want it to be fast (i.e., don't go to an `Allocation` and reconstruct the `Scalar`
// from its byte-serialized form).
let force_as_immediate = match op.layout.abi {
Abi::Scalar(abi::Scalar::Initialized { .. }) => true,
// We don't *force* `ConstValue::Slice` for `ScalarPair`. This has the advantage that if the
// We don't *force* `ConstValueKind::Slice` for `ScalarPair`. This has the advantage that if the
// input `op` is a place, then turning it into a `ConstValue` and back into a `OpTy` will
// not have to generate any duplicate allocations (we preserve the original `AllocId` in
// `ConstValue::Indirect`). It means accessing the contents of a slice can be slow (since
// they can be stored as `ConstValue::Indirect`), but that's not relevant since we barely
// `ConstValueKind::Indirect`). It means accessing the contents of a slice can be slow (since
// they can be stored as `ConstValueKind::Indirect`), but that's not relevant since we barely
// ever have to do this. (`try_get_slice_bytes_for_diagnostics` exists to provide this
// functionality.)
_ => false,
Expand All @@ -145,27 +146,12 @@ pub(super) fn op_to_const<'tcx>(
// We know `offset` is relative to the allocation, so we can use `into_parts`.
let (alloc_id, offset) = mplace.ptr().into_parts();
let alloc_id = alloc_id.expect("cannot have `fake` place fot non-ZST type");
ConstValue::Indirect { alloc_id, offset }
ConstValue::from_memory(tcx, alloc_id, offset)
}
// see comment on `let force_as_immediate` above
Right(imm) => match *imm {
Immediate::Scalar(x) => ConstValue::Scalar(x),
Immediate::ScalarPair(a, b) => {
debug!("ScalarPair(a: {:?}, b: {:?})", a, b);
// FIXME: assert that this has an appropriate type.
// Currently we actually get here for non-[u8] slices during valtree construction!
let msg = "`op_to_const` on an immediate scalar pair must only be used on slice references to actually allocated memory";
// We know `offset` is relative to the allocation, so we can use `into_parts`.
// We use `ConstValue::Slice` so that we don't have to generate an allocation for
// `ConstValue::Indirect` here.
let (alloc_id, offset) = a.to_pointer(ecx).expect(msg).into_parts();
let alloc_id = alloc_id.expect(msg);
let data = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
let start = offset.bytes_usize();
let len = b.to_target_usize(ecx).expect(msg);
let len: usize = len.try_into().unwrap();
ConstValue::Slice { data, start, end: start + len }
}
Immediate::Scalar(x) => ConstValue::from_scalar(tcx, x),
Immediate::ScalarPair(a, b) => ConstValue::from_pair(tcx, a, b),
Immediate::Uninit => bug!("`Uninit` is not a valid value for {}", op.layout.ty),
},
}
Expand Down
2 changes: 1 addition & 1 deletion compiler/rustc_const_eval/src/const_eval/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ pub(crate) fn const_caller_location(
if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() {
bug!("intern_const_alloc_recursive should not error in this case")
}
ConstValue::Scalar(Scalar::from_maybe_pointer(loc_place.ptr(), &tcx))
ConstValue::from_scalar(tcx, Scalar::from_maybe_pointer(loc_place.ptr(), &tcx))
}

// We forbid type-level constants that contain more than `VALTREE_MAX_NODES` nodes.
Expand Down
4 changes: 2 additions & 2 deletions compiler/rustc_const_eval/src/const_eval/valtrees.rs
Original file line number Diff line number Diff line change
Expand Up @@ -219,10 +219,10 @@ pub fn valtree_to_const_value<'tcx>(
match ty.kind() {
ty::FnDef(..) => {
assert!(valtree.unwrap_branch().is_empty());
ConstValue::ZeroSized
ConstValue::zero_sized(tcx)
}
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => match valtree {
ty::ValTree::Leaf(scalar_int) => ConstValue::Scalar(Scalar::Int(scalar_int)),
ty::ValTree::Leaf(scalar_int) => ConstValue::from_scalar(tcx, Scalar::Int(scalar_int)),
ty::ValTree::Branch(_) => bug!(
"ValTrees for Bool, Int, Uint, Float or Char should have the form ValTree::Leaf"
),
Expand Down
Loading

0 comments on commit 5b77e18

Please sign in to comment.