diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index 36da9037e43d9..39755169e6cae 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -310,7 +310,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { let ty = tcx.ty_ordering_enum(None); let layout = tcx.layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(ty)).unwrap(); - Self::from_scalar(Scalar::from_i8(c as i8), layout) + Self::from_scalar(Scalar::Int(c.into()), layout) } pub fn from_pair(a: Self, b: Self, cx: &(impl HasTypingEnv<'tcx> + HasTyCtxt<'tcx>)) -> Self { diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs index 9d462093b9ea1..7ba0e5b5e07e5 100644 --- a/compiler/rustc_middle/src/mir/interpret/value.rs +++ b/compiler/rustc_middle/src/mir/interpret/value.rs @@ -180,27 +180,27 @@ impl Scalar { #[inline] pub fn from_i8(i: i8) -> Self { - Self::from_int(i, Size::from_bits(8)) + Self::Int(i.into()) } #[inline] pub fn from_i16(i: i16) -> Self { - Self::from_int(i, Size::from_bits(16)) + Self::Int(i.into()) } #[inline] pub fn from_i32(i: i32) -> Self { - Self::from_int(i, Size::from_bits(32)) + Self::Int(i.into()) } #[inline] pub fn from_i64(i: i64) -> Self { - Self::from_int(i, Size::from_bits(64)) + Self::Int(i.into()) } #[inline] pub fn from_i128(i: i128) -> Self { - Self::from_int(i, Size::from_bits(128)) + Self::Int(i.into()) } #[inline] diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs index 9f5e31d894cb3..9c9cd6953392a 100644 --- a/compiler/rustc_middle/src/ty/consts/int.rs +++ b/compiler/rustc_middle/src/ty/consts/int.rs @@ -422,9 +422,9 @@ macro_rules! from_scalar_int_for_x { impl From for $ty { #[inline] fn from(int: ScalarInt) -> Self { - // The `unwrap` cannot fail because to_bits (if it succeeds) + // The `unwrap` cannot fail because to_uint (if it succeeds) // is guaranteed to return a value that fits into the size. - int.to_bits(Size::from_bytes(size_of::<$ty>())) + int.to_uint(Size::from_bytes(size_of::<$ty>())) .try_into().unwrap() } } @@ -450,6 +450,49 @@ impl From for ScalarInt { } } +macro_rules! from_x_for_scalar_int_signed { + ($($ty:ty),*) => { + $( + impl From<$ty> for ScalarInt { + #[inline] + fn from(u: $ty) -> Self { + Self { + data: u128::from(u.cast_unsigned()), // go via the unsigned type of the same size + size: NonZero::new(size_of::<$ty>() as u8).unwrap(), + } + } + } + )* + } +} + +macro_rules! from_scalar_int_for_x_signed { + ($($ty:ty),*) => { + $( + impl From for $ty { + #[inline] + fn from(int: ScalarInt) -> Self { + // The `unwrap` cannot fail because to_int (if it succeeds) + // is guaranteed to return a value that fits into the size. + int.to_int(Size::from_bytes(size_of::<$ty>())) + .try_into().unwrap() + } + } + )* + } +} + +from_x_for_scalar_int_signed!(i8, i16, i32, i64, i128); +from_scalar_int_for_x_signed!(i8, i16, i32, i64, i128); + +impl From for ScalarInt { + #[inline] + fn from(c: std::cmp::Ordering) -> Self { + // Here we rely on `Ordering` having the same values in host and target! + ScalarInt::from(c as i8) + } +} + /// Error returned when a conversion from ScalarInt to char fails. #[derive(Debug)] pub struct CharTryFromScalarInt; diff --git a/library/core/src/intrinsics/mod.rs b/library/core/src/intrinsics/mod.rs index 23bafa778bc6b..439c8c1f9a1e6 100644 --- a/library/core/src/intrinsics/mod.rs +++ b/library/core/src/intrinsics/mod.rs @@ -1,7 +1,10 @@ //! Compiler intrinsics. //! -//! The corresponding definitions are in . -//! The corresponding const implementations are in . +//! These are the imports making intrinsics available to Rust code. The actual implementations live in the compiler. +//! Some of these intrinsics are lowered to MIR in . +//! The remaining intrinsics are implemented for the LLVM backend in +//! and , +//! and for const evaluation in . //! //! # Const intrinsics //! @@ -20,28 +23,14 @@ //! //! The volatile intrinsics provide operations intended to act on I/O //! memory, which are guaranteed to not be reordered by the compiler -//! across other volatile intrinsics. See the LLVM documentation on -//! [[volatile]]. -//! -//! [volatile]: https://llvm.org/docs/LangRef.html#volatile-memory-accesses +//! across other volatile intrinsics. See [`read_volatile`][ptr::read_volatile] +//! and [`write_volatile`][ptr::write_volatile]. //! //! # Atomics //! //! The atomic intrinsics provide common atomic operations on machine -//! words, with multiple possible memory orderings. They obey the same -//! semantics as C++11. See the LLVM documentation on [[atomics]]. -//! -//! [atomics]: https://llvm.org/docs/Atomics.html -//! -//! A quick refresher on memory ordering: -//! -//! * Acquire - a barrier for acquiring a lock. Subsequent reads and writes -//! take place after the barrier. -//! * Release - a barrier for releasing a lock. Preceding reads and writes -//! take place before the barrier. -//! * Sequentially consistent - sequentially consistent operations are -//! guaranteed to happen in order. This is the standard mode for working -//! with atomic types and is equivalent to Java's `volatile`. +//! words, with multiple possible memory orderings. See the +//! [atomic types][crate::sync::atomic] docs for details. //! //! # Unwinding //!