From c5d1764eb646e4e29f44f08c6c39740e377f0ab0 Mon Sep 17 00:00:00 2001 From: Joshua Liebow-Feeser Date: Fri, 10 May 2024 15:39:01 -0700 Subject: [PATCH 1/2] Move tests to more appropriate locations Since our tests in the crate root were written, some of the production code they test has moved to other modules. This commit moves the tests to those modules. --- src/layout.rs | 833 ++++++++++++++++++++++++++++++++- src/lib.rs | 1228 ------------------------------------------------- src/ref.rs | 426 +++++++++++++++++ 3 files changed, 1255 insertions(+), 1232 deletions(-) diff --git a/src/layout.rs b/src/layout.rs index f0c460b898..266883c300 100644 --- a/src/layout.rs +++ b/src/layout.rs @@ -9,6 +9,8 @@ use core::{mem, num::NonZeroUsize}; +use crate::util; + /// The target pointer width, counted in bits. const POINTER_WIDTH_BITS: usize = mem::size_of::() * 8; @@ -221,7 +223,7 @@ impl DstLayout { #[must_use] #[inline] pub const fn extend(self, field: DstLayout, repr_packed: Option) -> Self { - use crate::util::{max, min, padding_needed_for}; + use util::{max, min, padding_needed_for}; // If `repr_packed` is `None`, there are no alignment constraints, and // the value can be defaulted to `THEORETICAL_MAX_ALIGN`. @@ -361,7 +363,7 @@ impl DstLayout { #[must_use] #[inline] pub const fn pad_to_align(self) -> Self { - use crate::util::padding_needed_for; + use util::padding_needed_for; let size_info = match self.size_info { // For sized layouts, we add the minimum amount of trailing padding @@ -529,7 +531,7 @@ impl DstLayout { // multiple of the alignment, or will be larger than // `bytes_len`. let max_total_bytes = - crate::util::round_down_to_next_multiple_of_alignment(bytes_len, self.align); + util::round_down_to_next_multiple_of_alignment(bytes_len, self.align); // Calculate the maximum number of bytes that could be consumed // by the trailing slice. // @@ -575,7 +577,7 @@ impl DstLayout { // `self_bytes` up to `max_total_bytes`. #[allow(clippy::arithmetic_side_effects)] let self_bytes = - without_padding + crate::util::padding_needed_for(without_padding, self.align); + without_padding + util::padding_needed_for(without_padding, self.align); (elems, self_bytes) } }; @@ -596,3 +598,826 @@ impl DstLayout { Ok((elems, split_at)) } } + +// TODO(#67): For some reason, on our MSRV toolchain, this `allow` isn't +// enforced despite having `#![allow(unknown_lints)]` at the crate root, but +// putting it here works. Once our MSRV is high enough that this bug has been +// fixed, remove this `allow`. +#[allow(unknown_lints)] +#[cfg(test)] +mod tests { + use super::*; + + /// Tests of when a sized `DstLayout` is extended with a sized field. + #[allow(clippy::decimal_literal_representation)] + #[test] + fn test_dst_layout_extend_sized_with_sized() { + // This macro constructs a layout corresponding to a `u8` and extends it + // with a zero-sized trailing field of given alignment `n`. The macro + // tests that the resulting layout has both size and alignment `min(n, + // P)` for all valid values of `repr(packed(P))`. + macro_rules! test_align_is_size { + ($n:expr) => { + let base = DstLayout::for_type::(); + let trailing_field = DstLayout::for_type::>(); + + let packs = + core::iter::once(None).chain((0..29).map(|p| NonZeroUsize::new(2usize.pow(p)))); + + for pack in packs { + let composite = base.extend(trailing_field, pack); + let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN); + let align = $n.min(max_align.get()); + assert_eq!( + composite, + DstLayout { + align: NonZeroUsize::new(align).unwrap(), + size_info: SizeInfo::Sized { size: align } + } + ) + } + }; + } + + test_align_is_size!(1); + test_align_is_size!(2); + test_align_is_size!(4); + test_align_is_size!(8); + test_align_is_size!(16); + test_align_is_size!(32); + test_align_is_size!(64); + test_align_is_size!(128); + test_align_is_size!(256); + test_align_is_size!(512); + test_align_is_size!(1024); + test_align_is_size!(2048); + test_align_is_size!(4096); + test_align_is_size!(8192); + test_align_is_size!(16384); + test_align_is_size!(32768); + test_align_is_size!(65536); + test_align_is_size!(131072); + test_align_is_size!(262144); + test_align_is_size!(524288); + test_align_is_size!(1048576); + test_align_is_size!(2097152); + test_align_is_size!(4194304); + test_align_is_size!(8388608); + test_align_is_size!(16777216); + test_align_is_size!(33554432); + test_align_is_size!(67108864); + test_align_is_size!(33554432); + test_align_is_size!(134217728); + test_align_is_size!(268435456); + } + + /// Tests of when a sized `DstLayout` is extended with a DST field. + #[test] + fn test_dst_layout_extend_sized_with_dst() { + // Test that for all combinations of real-world alignments and + // `repr_packed` values, that the extension of a sized `DstLayout`` with + // a DST field correctly computes the trailing offset in the composite + // layout. + + let aligns = (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()); + let packs = core::iter::once(None).chain(aligns.clone().map(Some)); + + for align in aligns { + for pack in packs.clone() { + let base = DstLayout::for_type::(); + let elem_size = 42; + let trailing_field_offset = 11; + + let trailing_field = DstLayout { + align, + size_info: SizeInfo::SliceDst(TrailingSliceLayout { elem_size, offset: 11 }), + }; + + let composite = base.extend(trailing_field, pack); + + let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN).get(); + + let align = align.get().min(max_align); + + assert_eq!( + composite, + DstLayout { + align: NonZeroUsize::new(align).unwrap(), + size_info: SizeInfo::SliceDst(TrailingSliceLayout { + elem_size, + offset: align + trailing_field_offset, + }), + } + ) + } + } + } + + /// Tests that calling `pad_to_align` on a sized `DstLayout` adds the + /// expected amount of trailing padding. + #[test] + fn test_dst_layout_pad_to_align_with_sized() { + // For all valid alignments `align`, construct a one-byte layout aligned + // to `align`, call `pad_to_align`, and assert that the size of the + // resulting layout is equal to `align`. + for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) { + let layout = DstLayout { align, size_info: SizeInfo::Sized { size: 1 } }; + + assert_eq!( + layout.pad_to_align(), + DstLayout { align, size_info: SizeInfo::Sized { size: align.get() } } + ); + } + + // Test explicitly-provided combinations of unpadded and padded + // counterparts. + + macro_rules! test { + (unpadded { size: $unpadded_size:expr, align: $unpadded_align:expr } + => padded { size: $padded_size:expr, align: $padded_align:expr }) => { + let unpadded = DstLayout { + align: NonZeroUsize::new($unpadded_align).unwrap(), + size_info: SizeInfo::Sized { size: $unpadded_size }, + }; + let padded = unpadded.pad_to_align(); + + assert_eq!( + padded, + DstLayout { + align: NonZeroUsize::new($padded_align).unwrap(), + size_info: SizeInfo::Sized { size: $padded_size }, + } + ); + }; + } + + test!(unpadded { size: 0, align: 4 } => padded { size: 0, align: 4 }); + test!(unpadded { size: 1, align: 4 } => padded { size: 4, align: 4 }); + test!(unpadded { size: 2, align: 4 } => padded { size: 4, align: 4 }); + test!(unpadded { size: 3, align: 4 } => padded { size: 4, align: 4 }); + test!(unpadded { size: 4, align: 4 } => padded { size: 4, align: 4 }); + test!(unpadded { size: 5, align: 4 } => padded { size: 8, align: 4 }); + test!(unpadded { size: 6, align: 4 } => padded { size: 8, align: 4 }); + test!(unpadded { size: 7, align: 4 } => padded { size: 8, align: 4 }); + test!(unpadded { size: 8, align: 4 } => padded { size: 8, align: 4 }); + + let current_max_align = DstLayout::CURRENT_MAX_ALIGN.get(); + + test!(unpadded { size: 1, align: current_max_align } + => padded { size: current_max_align, align: current_max_align }); + + test!(unpadded { size: current_max_align + 1, align: current_max_align } + => padded { size: current_max_align * 2, align: current_max_align }); + } + + /// Tests that calling `pad_to_align` on a DST `DstLayout` is a no-op. + #[test] + fn test_dst_layout_pad_to_align_with_dst() { + for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) { + for offset in 0..10 { + for elem_size in 0..10 { + let layout = DstLayout { + align, + size_info: SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }), + }; + assert_eq!(layout.pad_to_align(), layout); + } + } + } + } + + // This test takes a long time when running under Miri, so we skip it in + // that case. This is acceptable because this is a logic test that doesn't + // attempt to expose UB. + #[test] + #[cfg_attr(miri, ignore)] + fn test_validate_cast_and_convert_metadata() { + #[allow(non_local_definitions)] + impl From for SizeInfo { + fn from(size: usize) -> SizeInfo { + SizeInfo::Sized { size } + } + } + + #[allow(non_local_definitions)] + impl From<(usize, usize)> for SizeInfo { + fn from((offset, elem_size): (usize, usize)) -> SizeInfo { + SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) + } + } + + fn layout>(s: S, align: usize) -> DstLayout { + DstLayout { size_info: s.into(), align: NonZeroUsize::new(align).unwrap() } + } + + /// This macro accepts arguments in the form of: + /// + /// layout(_, _, _).validate(_, _, _), Ok(Some((_, _))) + /// | | | | | | | | + /// base_size ----+ | | | | | | | + /// align -----------+ | | | | | | + /// trailing_size ------+ | | | | | + /// addr ---------------------------+ | | | | + /// bytes_len -------------------------+ | | | + /// cast_type ----------------------------+ | | + /// elems ---------------------------------------------+ | + /// split_at ---------------------------------------------+ + /// + /// `.validate` is shorthand for `.validate_cast_and_convert_metadata` + /// for brevity. + /// + /// Each argument can either be an iterator or a wildcard. Each + /// wildcarded variable is implicitly replaced by an iterator over a + /// representative sample of values for that variable. Each `test!` + /// invocation iterates over every combination of values provided by + /// each variable's iterator (ie, the cartesian product) and validates + /// that the results are expected. + /// + /// The final argument uses the same syntax, but it has a different + /// meaning: + /// - If it is `Ok(pat)`, then the pattern `pat` is supplied to + /// `assert_matches!` to validate the computed result for each + /// combination of input values. + /// - If it is `Err(Some(msg) | None)`, then `test!` validates that the + /// call to `validate_cast_and_convert_metadata` panics with the given + /// panic message or, if the current Rust toolchain version is too + /// early to support panicking in `const fn`s, panics with *some* + /// message. In the latter case, the `const_panic!` macro is used, + /// which emits code which causes a non-panicking error at const eval + /// time, but which does panic when invoked at runtime. Thus, it is + /// merely difficult to predict the *value* of this panic. We deem + /// that testing against the real panic strings on stable and nightly + /// toolchains is enough to ensure correctness. + /// + /// Note that the meta-variables that match these variables have the + /// `tt` type, and some valid expressions are not valid `tt`s (such as + /// `a..b`). In this case, wrap the expression in parentheses, and it + /// will become valid `tt`. + macro_rules! test { + ($(:$sizes:expr =>)? + layout($size:tt, $align:tt) + .validate($addr:tt, $bytes_len:tt, $cast_type:tt), $expect:pat $(,)? + ) => { + itertools::iproduct!( + test!(@generate_size $size), + test!(@generate_align $align), + test!(@generate_usize $addr), + test!(@generate_usize $bytes_len), + test!(@generate_cast_type $cast_type) + ).for_each(|(size_info, align, addr, bytes_len, cast_type)| { + // Temporarily disable the panic hook installed by the test + // harness. If we don't do this, all panic messages will be + // kept in an internal log. On its own, this isn't a + // problem, but if a non-caught panic ever happens (ie, in + // code later in this test not in this macro), all of the + // previously-buffered messages will be dumped, hiding the + // real culprit. + let previous_hook = std::panic::take_hook(); + // I don't understand why, but this seems to be required in + // addition to the previous line. + std::panic::set_hook(Box::new(|_| {})); + let actual = std::panic::catch_unwind(|| { + layout(size_info, align).validate_cast_and_convert_metadata(addr, bytes_len, cast_type) + }).map_err(|d| { + let msg = d.downcast::<&'static str>().ok().map(|s| *s.as_ref()); + assert!(msg.is_some() || cfg!(not(zerocopy_panic_in_const)), "non-string panic messages are not permitted when `--cfg zerocopy_panic_in_const` is set"); + msg + }); + std::panic::set_hook(previous_hook); + + assert_matches::assert_matches!( + actual, $expect, + "layout({:?}, {}).validate_cast_and_convert_metadata({}, {}, {:?})" ,size_info, align, addr, bytes_len, cast_type + ); + }); + }; + (@generate_usize _) => { 0..8 }; + // Generate sizes for both Sized and !Sized types. + (@generate_size _) => { + test!(@generate_size (_)).chain(test!(@generate_size (_, _))) + }; + // Generate sizes for both Sized and !Sized types by chaining + // specified iterators for each. + (@generate_size ($sized_sizes:tt | $unsized_sizes:tt)) => { + test!(@generate_size ($sized_sizes)).chain(test!(@generate_size $unsized_sizes)) + }; + // Generate sizes for Sized types. + (@generate_size (_)) => { test!(@generate_size (0..8)) }; + (@generate_size ($sizes:expr)) => { $sizes.into_iter().map(Into::::into) }; + // Generate sizes for !Sized types. + (@generate_size ($min_sizes:tt, $elem_sizes:tt)) => { + itertools::iproduct!( + test!(@generate_min_size $min_sizes), + test!(@generate_elem_size $elem_sizes) + ).map(Into::::into) + }; + (@generate_fixed_size _) => { (0..8).into_iter().map(Into::::into) }; + (@generate_min_size _) => { 0..8 }; + (@generate_elem_size _) => { 1..8 }; + (@generate_align _) => { [1, 2, 4, 8, 16] }; + (@generate_opt_usize _) => { [None].into_iter().chain((0..8).map(Some).into_iter()) }; + (@generate_cast_type _) => { [CastType::Prefix, CastType::Suffix] }; + (@generate_cast_type $variant:ident) => { [CastType::$variant] }; + // Some expressions need to be wrapped in parentheses in order to be + // valid `tt`s (required by the top match pattern). See the comment + // below for more details. This arm removes these parentheses to + // avoid generating an `unused_parens` warning. + (@$_:ident ($vals:expr)) => { $vals }; + (@$_:ident $vals:expr) => { $vals }; + } + + const EVENS: [usize; 8] = [0, 2, 4, 6, 8, 10, 12, 14]; + const ODDS: [usize; 8] = [1, 3, 5, 7, 9, 11, 13, 15]; + + // base_size is too big for the memory region. + test!( + layout(((1..8) | ((1..8), (1..8))), _).validate([0], [0], _), + Ok(Err(MetadataCastError::Size)) + ); + test!( + layout(((2..8) | ((2..8), (2..8))), _).validate([0], [1], Prefix), + Ok(Err(MetadataCastError::Size)) + ); + test!( + layout(((2..8) | ((2..8), (2..8))), _).validate([0x1000_0000 - 1], [1], Suffix), + Ok(Err(MetadataCastError::Size)) + ); + + // addr is unaligned for prefix cast + test!(layout(_, [2]).validate(ODDS, _, Prefix), Ok(Err(MetadataCastError::Alignment))); + test!(layout(_, [2]).validate(ODDS, _, Prefix), Ok(Err(MetadataCastError::Alignment))); + + // addr is aligned, but end of buffer is unaligned for suffix cast + test!(layout(_, [2]).validate(EVENS, ODDS, Suffix), Ok(Err(MetadataCastError::Alignment))); + test!(layout(_, [2]).validate(EVENS, ODDS, Suffix), Ok(Err(MetadataCastError::Alignment))); + + // Unfortunately, these constants cannot easily be used in the + // implementation of `validate_cast_and_convert_metadata`, since + // `panic!` consumes a string literal, not an expression. + // + // It's important that these messages be in a separate module. If they + // were at the function's top level, we'd pass them to `test!` as, e.g., + // `Err(TRAILING)`, which would run into a subtle Rust footgun - the + // `TRAILING` identifier would be treated as a pattern to match rather + // than a value to check for equality. + mod msgs { + pub(super) const TRAILING: &str = + "attempted to cast to slice type with zero-sized element"; + pub(super) const OVERFLOW: &str = "`addr` + `bytes_len` > usize::MAX"; + } + + // casts with ZST trailing element types are unsupported + test!(layout((_, [0]), _).validate(_, _, _), Err(Some(msgs::TRAILING) | None),); + + // addr + bytes_len must not overflow usize + test!(layout(_, _).validate([usize::MAX], (1..100), _), Err(Some(msgs::OVERFLOW) | None)); + test!(layout(_, _).validate((1..100), [usize::MAX], _), Err(Some(msgs::OVERFLOW) | None)); + test!( + layout(_, _).validate( + [usize::MAX / 2 + 1, usize::MAX], + [usize::MAX / 2 + 1, usize::MAX], + _ + ), + Err(Some(msgs::OVERFLOW) | None) + ); + + // Validates that `validate_cast_and_convert_metadata` satisfies its own + // documented safety postconditions, and also a few other properties + // that aren't documented but we want to guarantee anyway. + fn validate_behavior( + (layout, addr, bytes_len, cast_type): (DstLayout, usize, usize, CastType), + ) { + if let Ok((elems, split_at)) = + layout.validate_cast_and_convert_metadata(addr, bytes_len, cast_type) + { + let (size_info, align) = (layout.size_info, layout.align); + let debug_str = format!( + "layout({:?}, {}).validate_cast_and_convert_metadata({}, {}, {:?}) => ({}, {})", + size_info, align, addr, bytes_len, cast_type, elems, split_at + ); + + // If this is a sized type (no trailing slice), then `elems` is + // meaningless, but in practice we set it to 0. Callers are not + // allowed to rely on this, but a lot of math is nicer if + // they're able to, and some callers might accidentally do that. + let sized = matches!(layout.size_info, SizeInfo::Sized { .. }); + assert!(!(sized && elems != 0), "{}", debug_str); + + let resulting_size = match layout.size_info { + SizeInfo::Sized { size } => size, + SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) => { + let padded_size = |elems| { + let without_padding = offset + elems * elem_size; + without_padding + util::padding_needed_for(without_padding, align) + }; + + let resulting_size = padded_size(elems); + // Test that `validate_cast_and_convert_metadata` + // computed the largest possible value that fits in the + // given range. + assert!(padded_size(elems + 1) > bytes_len, "{}", debug_str); + resulting_size + } + }; + + // Test safety postconditions guaranteed by + // `validate_cast_and_convert_metadata`. + assert!(resulting_size <= bytes_len, "{}", debug_str); + match cast_type { + CastType::Prefix => { + assert_eq!(addr % align, 0, "{}", debug_str); + assert_eq!(resulting_size, split_at, "{}", debug_str); + } + CastType::Suffix => { + assert_eq!(split_at, bytes_len - resulting_size, "{}", debug_str); + assert_eq!((addr + split_at) % align, 0, "{}", debug_str); + } + } + } else { + let min_size = match layout.size_info { + SizeInfo::Sized { size } => size, + SizeInfo::SliceDst(TrailingSliceLayout { offset, .. }) => { + offset + util::padding_needed_for(offset, layout.align) + } + }; + + // If a cast is invalid, it is either because... + // 1. there are insufficent bytes at the given region for type: + let insufficient_bytes = bytes_len < min_size; + // 2. performing the cast would misalign type: + let base = match cast_type { + CastType::Prefix => 0, + CastType::Suffix => bytes_len, + }; + let misaligned = (base + addr) % layout.align != 0; + + assert!(insufficient_bytes || misaligned); + } + } + + let sizes = 0..8; + let elem_sizes = 1..8; + let size_infos = sizes + .clone() + .map(Into::::into) + .chain(itertools::iproduct!(sizes, elem_sizes).map(Into::::into)); + let layouts = itertools::iproduct!(size_infos, [1, 2, 4, 8, 16, 32]) + .filter(|(size_info, align)| !matches!(size_info, SizeInfo::Sized { size } if size % align != 0)) + .map(|(size_info, align)| layout(size_info, align)); + itertools::iproduct!(layouts, 0..8, 0..8, [CastType::Prefix, CastType::Suffix]) + .for_each(validate_behavior); + } + + #[test] + #[cfg(__INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)] + fn test_validate_rust_layout() { + use crate::util::testutil::*; + use core::{ + convert::TryInto as _, + ptr::{self, NonNull}, + }; + + // This test synthesizes pointers with various metadata and uses Rust's + // built-in APIs to confirm that Rust makes decisions about type layout + // which are consistent with what we believe is guaranteed by the + // language. If this test fails, it doesn't just mean our code is wrong + // - it means we're misunderstanding the language's guarantees. + + #[derive(Debug)] + struct MacroArgs { + offset: usize, + align: NonZeroUsize, + elem_size: Option, + } + + /// # Safety + /// + /// `test` promises to only call `addr_of_slice_field` on a `NonNull` + /// which points to a valid `T`. + /// + /// `with_elems` must produce a pointer which points to a valid `T`. + fn test NonNull>( + args: MacroArgs, + with_elems: W, + addr_of_slice_field: Option) -> NonNull>, + ) { + let dst = args.elem_size.is_some(); + let layout = { + let size_info = match args.elem_size { + Some(elem_size) => { + SizeInfo::SliceDst(TrailingSliceLayout { offset: args.offset, elem_size }) + } + None => SizeInfo::Sized { + // Rust only supports types whose sizes are a multiple + // of their alignment. If the macro created a type like + // this: + // + // #[repr(C, align(2))] + // struct Foo([u8; 1]); + // + // ...then Rust will automatically round the type's size + // up to 2. + size: args.offset + util::padding_needed_for(args.offset, args.align), + }, + }; + DstLayout { size_info, align: args.align } + }; + + for elems in 0..128 { + let ptr = with_elems(elems); + + if let Some(addr_of_slice_field) = addr_of_slice_field { + let slc_field_ptr = addr_of_slice_field(ptr).as_ptr(); + // SAFETY: Both `slc_field_ptr` and `ptr` are pointers to + // the same valid Rust object. + #[allow(clippy::incompatible_msrv)] + // Work around https://github.com/rust-lang/rust-clippy/issues/12280 + let offset: usize = + unsafe { slc_field_ptr.byte_offset_from(ptr.as_ptr()).try_into().unwrap() }; + assert_eq!(offset, args.offset); + } + + // SAFETY: `ptr` points to a valid `T`. + let (size, align) = unsafe { + (mem::size_of_val_raw(ptr.as_ptr()), mem::align_of_val_raw(ptr.as_ptr())) + }; + + // Avoid expensive allocation when running under Miri. + let assert_msg = if !cfg!(miri) { + format!("\n{:?}\nsize:{}, align:{}", args, size, align) + } else { + String::new() + }; + + let without_padding = + args.offset + args.elem_size.map(|elem_size| elems * elem_size).unwrap_or(0); + assert!(size >= without_padding, "{}", assert_msg); + assert_eq!(align, args.align.get(), "{}", assert_msg); + + // This encodes the most important part of the test: our + // understanding of how Rust determines the layout of repr(C) + // types. Sized repr(C) types are trivial, but DST types have + // some subtlety. Note that: + // - For sized types, `without_padding` is just the size of the + // type that we constructed for `Foo`. Since we may have + // requested a larger alignment, `Foo` may actually be larger + // than this, hence `padding_needed_for`. + // - For unsized types, `without_padding` is dynamically + // computed from the offset, the element size, and element + // count. We expect that the size of the object should be + // `offset + elem_size * elems` rounded up to the next + // alignment. + let expected_size = + without_padding + util::padding_needed_for(without_padding, args.align); + assert_eq!(expected_size, size, "{}", assert_msg); + + // For zero-sized element types, + // `validate_cast_and_convert_metadata` just panics, so we skip + // testing those types. + if args.elem_size.map(|elem_size| elem_size > 0).unwrap_or(true) { + let addr = ptr.addr().get(); + let (got_elems, got_split_at) = layout + .validate_cast_and_convert_metadata(addr, size, CastType::Prefix) + .unwrap(); + // Avoid expensive allocation when running under Miri. + let assert_msg = if !cfg!(miri) { + format!( + "{}\nvalidate_cast_and_convert_metadata({}, {})", + assert_msg, addr, size, + ) + } else { + String::new() + }; + assert_eq!(got_split_at, size, "{}", assert_msg); + if dst { + assert!(got_elems >= elems, "{}", assert_msg); + if got_elems != elems { + // If `validate_cast_and_convert_metadata` + // returned more elements than `elems`, that + // means that `elems` is not the maximum number + // of elements that can fit in `size` - in other + // words, there is enough padding at the end of + // the value to fit at least one more element. + // If we use this metadata to synthesize a + // pointer, despite having a different element + // count, we still expect it to have the same + // size. + let got_ptr = with_elems(got_elems); + // SAFETY: `got_ptr` is a pointer to a valid `T`. + let size_of_got_ptr = unsafe { mem::size_of_val_raw(got_ptr.as_ptr()) }; + assert_eq!(size_of_got_ptr, size, "{}", assert_msg); + } + } else { + // For sized casts, the returned element value is + // technically meaningless, and we don't guarantee any + // particular value. In practice, it's always zero. + assert_eq!(got_elems, 0, "{}", assert_msg) + } + } + } + } + + macro_rules! validate_against_rust { + ($offset:literal, $align:literal $(, $elem_size:literal)?) => {{ + #[repr(C, align($align))] + struct Foo([u8; $offset]$(, [[u8; $elem_size]])?); + + let args = MacroArgs { + offset: $offset, + align: $align.try_into().unwrap(), + elem_size: { + #[allow(unused)] + let ret = None::; + $(let ret = Some($elem_size);)? + ret + } + }; + + #[repr(C, align($align))] + struct FooAlign; + // Create an aligned buffer to use in order to synthesize + // pointers to `Foo`. We don't ever load values from these + // pointers - we just do arithmetic on them - so having a "real" + // block of memory as opposed to a validly-aligned-but-dangling + // pointer is only necessary to make Miri happy since we run it + // with "strict provenance" checking enabled. + let aligned_buf = Align::<_, FooAlign>::new([0u8; 1024]); + let with_elems = |elems| { + let slc = NonNull::slice_from_raw_parts(NonNull::from(&aligned_buf.t), elems); + #[allow(clippy::as_conversions)] + NonNull::new(slc.as_ptr() as *mut Foo).unwrap() + }; + let addr_of_slice_field = { + #[allow(unused)] + let f = None::) -> NonNull>; + $( + // SAFETY: `test` promises to only call `f` with a `ptr` + // to a valid `Foo`. + let f: Option) -> NonNull> = Some(|ptr: NonNull| unsafe { + NonNull::new(ptr::addr_of_mut!((*ptr.as_ptr()).1)).unwrap().cast::() + }); + let _ = $elem_size; + )? + f + }; + + test::(args, with_elems, addr_of_slice_field); + }}; + } + + // Every permutation of: + // - offset in [0, 4] + // - align in [1, 16] + // - elem_size in [0, 4] (plus no elem_size) + validate_against_rust!(0, 1); + validate_against_rust!(0, 1, 0); + validate_against_rust!(0, 1, 1); + validate_against_rust!(0, 1, 2); + validate_against_rust!(0, 1, 3); + validate_against_rust!(0, 1, 4); + validate_against_rust!(0, 2); + validate_against_rust!(0, 2, 0); + validate_against_rust!(0, 2, 1); + validate_against_rust!(0, 2, 2); + validate_against_rust!(0, 2, 3); + validate_against_rust!(0, 2, 4); + validate_against_rust!(0, 4); + validate_against_rust!(0, 4, 0); + validate_against_rust!(0, 4, 1); + validate_against_rust!(0, 4, 2); + validate_against_rust!(0, 4, 3); + validate_against_rust!(0, 4, 4); + validate_against_rust!(0, 8); + validate_against_rust!(0, 8, 0); + validate_against_rust!(0, 8, 1); + validate_against_rust!(0, 8, 2); + validate_against_rust!(0, 8, 3); + validate_against_rust!(0, 8, 4); + validate_against_rust!(0, 16); + validate_against_rust!(0, 16, 0); + validate_against_rust!(0, 16, 1); + validate_against_rust!(0, 16, 2); + validate_against_rust!(0, 16, 3); + validate_against_rust!(0, 16, 4); + validate_against_rust!(1, 1); + validate_against_rust!(1, 1, 0); + validate_against_rust!(1, 1, 1); + validate_against_rust!(1, 1, 2); + validate_against_rust!(1, 1, 3); + validate_against_rust!(1, 1, 4); + validate_against_rust!(1, 2); + validate_against_rust!(1, 2, 0); + validate_against_rust!(1, 2, 1); + validate_against_rust!(1, 2, 2); + validate_against_rust!(1, 2, 3); + validate_against_rust!(1, 2, 4); + validate_against_rust!(1, 4); + validate_against_rust!(1, 4, 0); + validate_against_rust!(1, 4, 1); + validate_against_rust!(1, 4, 2); + validate_against_rust!(1, 4, 3); + validate_against_rust!(1, 4, 4); + validate_against_rust!(1, 8); + validate_against_rust!(1, 8, 0); + validate_against_rust!(1, 8, 1); + validate_against_rust!(1, 8, 2); + validate_against_rust!(1, 8, 3); + validate_against_rust!(1, 8, 4); + validate_against_rust!(1, 16); + validate_against_rust!(1, 16, 0); + validate_against_rust!(1, 16, 1); + validate_against_rust!(1, 16, 2); + validate_against_rust!(1, 16, 3); + validate_against_rust!(1, 16, 4); + validate_against_rust!(2, 1); + validate_against_rust!(2, 1, 0); + validate_against_rust!(2, 1, 1); + validate_against_rust!(2, 1, 2); + validate_against_rust!(2, 1, 3); + validate_against_rust!(2, 1, 4); + validate_against_rust!(2, 2); + validate_against_rust!(2, 2, 0); + validate_against_rust!(2, 2, 1); + validate_against_rust!(2, 2, 2); + validate_against_rust!(2, 2, 3); + validate_against_rust!(2, 2, 4); + validate_against_rust!(2, 4); + validate_against_rust!(2, 4, 0); + validate_against_rust!(2, 4, 1); + validate_against_rust!(2, 4, 2); + validate_against_rust!(2, 4, 3); + validate_against_rust!(2, 4, 4); + validate_against_rust!(2, 8); + validate_against_rust!(2, 8, 0); + validate_against_rust!(2, 8, 1); + validate_against_rust!(2, 8, 2); + validate_against_rust!(2, 8, 3); + validate_against_rust!(2, 8, 4); + validate_against_rust!(2, 16); + validate_against_rust!(2, 16, 0); + validate_against_rust!(2, 16, 1); + validate_against_rust!(2, 16, 2); + validate_against_rust!(2, 16, 3); + validate_against_rust!(2, 16, 4); + validate_against_rust!(3, 1); + validate_against_rust!(3, 1, 0); + validate_against_rust!(3, 1, 1); + validate_against_rust!(3, 1, 2); + validate_against_rust!(3, 1, 3); + validate_against_rust!(3, 1, 4); + validate_against_rust!(3, 2); + validate_against_rust!(3, 2, 0); + validate_against_rust!(3, 2, 1); + validate_against_rust!(3, 2, 2); + validate_against_rust!(3, 2, 3); + validate_against_rust!(3, 2, 4); + validate_against_rust!(3, 4); + validate_against_rust!(3, 4, 0); + validate_against_rust!(3, 4, 1); + validate_against_rust!(3, 4, 2); + validate_against_rust!(3, 4, 3); + validate_against_rust!(3, 4, 4); + validate_against_rust!(3, 8); + validate_against_rust!(3, 8, 0); + validate_against_rust!(3, 8, 1); + validate_against_rust!(3, 8, 2); + validate_against_rust!(3, 8, 3); + validate_against_rust!(3, 8, 4); + validate_against_rust!(3, 16); + validate_against_rust!(3, 16, 0); + validate_against_rust!(3, 16, 1); + validate_against_rust!(3, 16, 2); + validate_against_rust!(3, 16, 3); + validate_against_rust!(3, 16, 4); + validate_against_rust!(4, 1); + validate_against_rust!(4, 1, 0); + validate_against_rust!(4, 1, 1); + validate_against_rust!(4, 1, 2); + validate_against_rust!(4, 1, 3); + validate_against_rust!(4, 1, 4); + validate_against_rust!(4, 2); + validate_against_rust!(4, 2, 0); + validate_against_rust!(4, 2, 1); + validate_against_rust!(4, 2, 2); + validate_against_rust!(4, 2, 3); + validate_against_rust!(4, 2, 4); + validate_against_rust!(4, 4); + validate_against_rust!(4, 4, 0); + validate_against_rust!(4, 4, 1); + validate_against_rust!(4, 4, 2); + validate_against_rust!(4, 4, 3); + validate_against_rust!(4, 4, 4); + validate_against_rust!(4, 8); + validate_against_rust!(4, 8, 0); + validate_against_rust!(4, 8, 1); + validate_against_rust!(4, 8, 2); + validate_against_rust!(4, 8, 3); + validate_against_rust!(4, 8, 4); + validate_against_rust!(4, 16); + validate_against_rust!(4, 16, 0); + validate_against_rust!(4, 16, 1); + validate_against_rust!(4, 16, 2); + validate_against_rust!(4, 16, 3); + validate_against_rust!(4, 16, 4); + } +} diff --git a/src/lib.rs b/src/lib.rs index 51db83eb2b..e6ef934e5b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -5872,8 +5872,6 @@ pub use alloc_support::*; #[cfg(test)] #[allow(clippy::assertions_on_result_states, clippy::unreadable_literal)] mod tests { - use core::convert::TryInto as _; - use static_assertions::assert_impl_all; use super::*; @@ -5900,815 +5898,6 @@ mod tests { } } - /// Tests of when a sized `DstLayout` is extended with a sized field. - #[allow(clippy::decimal_literal_representation)] - #[test] - fn test_dst_layout_extend_sized_with_sized() { - // This macro constructs a layout corresponding to a `u8` and extends it - // with a zero-sized trailing field of given alignment `n`. The macro - // tests that the resulting layout has both size and alignment `min(n, - // P)` for all valid values of `repr(packed(P))`. - macro_rules! test_align_is_size { - ($n:expr) => { - let base = DstLayout::for_type::(); - let trailing_field = DstLayout::for_type::>(); - - let packs = - core::iter::once(None).chain((0..29).map(|p| NonZeroUsize::new(2usize.pow(p)))); - - for pack in packs { - let composite = base.extend(trailing_field, pack); - let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN); - let align = $n.min(max_align.get()); - assert_eq!( - composite, - DstLayout { - align: NonZeroUsize::new(align).unwrap(), - size_info: SizeInfo::Sized { size: align } - } - ) - } - }; - } - - test_align_is_size!(1); - test_align_is_size!(2); - test_align_is_size!(4); - test_align_is_size!(8); - test_align_is_size!(16); - test_align_is_size!(32); - test_align_is_size!(64); - test_align_is_size!(128); - test_align_is_size!(256); - test_align_is_size!(512); - test_align_is_size!(1024); - test_align_is_size!(2048); - test_align_is_size!(4096); - test_align_is_size!(8192); - test_align_is_size!(16384); - test_align_is_size!(32768); - test_align_is_size!(65536); - test_align_is_size!(131072); - test_align_is_size!(262144); - test_align_is_size!(524288); - test_align_is_size!(1048576); - test_align_is_size!(2097152); - test_align_is_size!(4194304); - test_align_is_size!(8388608); - test_align_is_size!(16777216); - test_align_is_size!(33554432); - test_align_is_size!(67108864); - test_align_is_size!(33554432); - test_align_is_size!(134217728); - test_align_is_size!(268435456); - } - - /// Tests of when a sized `DstLayout` is extended with a DST field. - #[test] - fn test_dst_layout_extend_sized_with_dst() { - // Test that for all combinations of real-world alignments and - // `repr_packed` values, that the extension of a sized `DstLayout`` with - // a DST field correctly computes the trailing offset in the composite - // layout. - - let aligns = (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()); - let packs = core::iter::once(None).chain(aligns.clone().map(Some)); - - for align in aligns { - for pack in packs.clone() { - let base = DstLayout::for_type::(); - let elem_size = 42; - let trailing_field_offset = 11; - - let trailing_field = DstLayout { - align, - size_info: SizeInfo::SliceDst(TrailingSliceLayout { elem_size, offset: 11 }), - }; - - let composite = base.extend(trailing_field, pack); - - let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN).get(); - - let align = align.get().min(max_align); - - assert_eq!( - composite, - DstLayout { - align: NonZeroUsize::new(align).unwrap(), - size_info: SizeInfo::SliceDst(TrailingSliceLayout { - elem_size, - offset: align + trailing_field_offset, - }), - } - ) - } - } - } - - /// Tests that calling `pad_to_align` on a sized `DstLayout` adds the - /// expected amount of trailing padding. - #[test] - fn test_dst_layout_pad_to_align_with_sized() { - // For all valid alignments `align`, construct a one-byte layout aligned - // to `align`, call `pad_to_align`, and assert that the size of the - // resulting layout is equal to `align`. - for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) { - let layout = DstLayout { align, size_info: SizeInfo::Sized { size: 1 } }; - - assert_eq!( - layout.pad_to_align(), - DstLayout { align, size_info: SizeInfo::Sized { size: align.get() } } - ); - } - - // Test explicitly-provided combinations of unpadded and padded - // counterparts. - - macro_rules! test { - (unpadded { size: $unpadded_size:expr, align: $unpadded_align:expr } - => padded { size: $padded_size:expr, align: $padded_align:expr }) => { - let unpadded = DstLayout { - align: NonZeroUsize::new($unpadded_align).unwrap(), - size_info: SizeInfo::Sized { size: $unpadded_size }, - }; - let padded = unpadded.pad_to_align(); - - assert_eq!( - padded, - DstLayout { - align: NonZeroUsize::new($padded_align).unwrap(), - size_info: SizeInfo::Sized { size: $padded_size }, - } - ); - }; - } - - test!(unpadded { size: 0, align: 4 } => padded { size: 0, align: 4 }); - test!(unpadded { size: 1, align: 4 } => padded { size: 4, align: 4 }); - test!(unpadded { size: 2, align: 4 } => padded { size: 4, align: 4 }); - test!(unpadded { size: 3, align: 4 } => padded { size: 4, align: 4 }); - test!(unpadded { size: 4, align: 4 } => padded { size: 4, align: 4 }); - test!(unpadded { size: 5, align: 4 } => padded { size: 8, align: 4 }); - test!(unpadded { size: 6, align: 4 } => padded { size: 8, align: 4 }); - test!(unpadded { size: 7, align: 4 } => padded { size: 8, align: 4 }); - test!(unpadded { size: 8, align: 4 } => padded { size: 8, align: 4 }); - - let current_max_align = DstLayout::CURRENT_MAX_ALIGN.get(); - - test!(unpadded { size: 1, align: current_max_align } - => padded { size: current_max_align, align: current_max_align }); - - test!(unpadded { size: current_max_align + 1, align: current_max_align } - => padded { size: current_max_align * 2, align: current_max_align }); - } - - /// Tests that calling `pad_to_align` on a DST `DstLayout` is a no-op. - #[test] - fn test_dst_layout_pad_to_align_with_dst() { - for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) { - for offset in 0..10 { - for elem_size in 0..10 { - let layout = DstLayout { - align, - size_info: SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }), - }; - assert_eq!(layout.pad_to_align(), layout); - } - } - } - } - - // This test takes a long time when running under Miri, so we skip it in - // that case. This is acceptable because this is a logic test that doesn't - // attempt to expose UB. - #[test] - #[cfg_attr(miri, ignore)] - fn test_validate_cast_and_convert_metadata() { - #[allow(non_local_definitions)] - impl From for SizeInfo { - fn from(size: usize) -> SizeInfo { - SizeInfo::Sized { size } - } - } - - #[allow(non_local_definitions)] - impl From<(usize, usize)> for SizeInfo { - fn from((offset, elem_size): (usize, usize)) -> SizeInfo { - SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) - } - } - - fn layout>(s: S, align: usize) -> DstLayout { - DstLayout { size_info: s.into(), align: NonZeroUsize::new(align).unwrap() } - } - - /// This macro accepts arguments in the form of: - /// - /// layout(_, _, _).validate(_, _, _), Ok(Some((_, _))) - /// | | | | | | | | - /// base_size ----+ | | | | | | | - /// align -----------+ | | | | | | - /// trailing_size ------+ | | | | | - /// addr ---------------------------+ | | | | - /// bytes_len -------------------------+ | | | - /// cast_type ----------------------------+ | | - /// elems ---------------------------------------------+ | - /// split_at ---------------------------------------------+ - /// - /// `.validate` is shorthand for `.validate_cast_and_convert_metadata` - /// for brevity. - /// - /// Each argument can either be an iterator or a wildcard. Each - /// wildcarded variable is implicitly replaced by an iterator over a - /// representative sample of values for that variable. Each `test!` - /// invocation iterates over every combination of values provided by - /// each variable's iterator (ie, the cartesian product) and validates - /// that the results are expected. - /// - /// The final argument uses the same syntax, but it has a different - /// meaning: - /// - If it is `Ok(pat)`, then the pattern `pat` is supplied to - /// `assert_matches!` to validate the computed result for each - /// combination of input values. - /// - If it is `Err(Some(msg) | None)`, then `test!` validates that the - /// call to `validate_cast_and_convert_metadata` panics with the given - /// panic message or, if the current Rust toolchain version is too - /// early to support panicking in `const fn`s, panics with *some* - /// message. In the latter case, the `const_panic!` macro is used, - /// which emits code which causes a non-panicking error at const eval - /// time, but which does panic when invoked at runtime. Thus, it is - /// merely difficult to predict the *value* of this panic. We deem - /// that testing against the real panic strings on stable and nightly - /// toolchains is enough to ensure correctness. - /// - /// Note that the meta-variables that match these variables have the - /// `tt` type, and some valid expressions are not valid `tt`s (such as - /// `a..b`). In this case, wrap the expression in parentheses, and it - /// will become valid `tt`. - macro_rules! test { - ($(:$sizes:expr =>)? - layout($size:tt, $align:tt) - .validate($addr:tt, $bytes_len:tt, $cast_type:tt), $expect:pat $(,)? - ) => { - itertools::iproduct!( - test!(@generate_size $size), - test!(@generate_align $align), - test!(@generate_usize $addr), - test!(@generate_usize $bytes_len), - test!(@generate_cast_type $cast_type) - ).for_each(|(size_info, align, addr, bytes_len, cast_type)| { - // Temporarily disable the panic hook installed by the test - // harness. If we don't do this, all panic messages will be - // kept in an internal log. On its own, this isn't a - // problem, but if a non-caught panic ever happens (ie, in - // code later in this test not in this macro), all of the - // previously-buffered messages will be dumped, hiding the - // real culprit. - let previous_hook = std::panic::take_hook(); - // I don't understand why, but this seems to be required in - // addition to the previous line. - std::panic::set_hook(Box::new(|_| {})); - let actual = std::panic::catch_unwind(|| { - layout(size_info, align).validate_cast_and_convert_metadata(addr, bytes_len, cast_type) - }).map_err(|d| { - let msg = d.downcast::<&'static str>().ok().map(|s| *s.as_ref()); - assert!(msg.is_some() || cfg!(not(zerocopy_panic_in_const)), "non-string panic messages are not permitted when `--cfg zerocopy_panic_in_const` is set"); - msg - }); - std::panic::set_hook(previous_hook); - - assert_matches::assert_matches!( - actual, $expect, - "layout({:?}, {}).validate_cast_and_convert_metadata({}, {}, {:?})" ,size_info, align, addr, bytes_len, cast_type - ); - }); - }; - (@generate_usize _) => { 0..8 }; - // Generate sizes for both Sized and !Sized types. - (@generate_size _) => { - test!(@generate_size (_)).chain(test!(@generate_size (_, _))) - }; - // Generate sizes for both Sized and !Sized types by chaining - // specified iterators for each. - (@generate_size ($sized_sizes:tt | $unsized_sizes:tt)) => { - test!(@generate_size ($sized_sizes)).chain(test!(@generate_size $unsized_sizes)) - }; - // Generate sizes for Sized types. - (@generate_size (_)) => { test!(@generate_size (0..8)) }; - (@generate_size ($sizes:expr)) => { $sizes.into_iter().map(Into::::into) }; - // Generate sizes for !Sized types. - (@generate_size ($min_sizes:tt, $elem_sizes:tt)) => { - itertools::iproduct!( - test!(@generate_min_size $min_sizes), - test!(@generate_elem_size $elem_sizes) - ).map(Into::::into) - }; - (@generate_fixed_size _) => { (0..8).into_iter().map(Into::::into) }; - (@generate_min_size _) => { 0..8 }; - (@generate_elem_size _) => { 1..8 }; - (@generate_align _) => { [1, 2, 4, 8, 16] }; - (@generate_opt_usize _) => { [None].into_iter().chain((0..8).map(Some).into_iter()) }; - (@generate_cast_type _) => { [CastType::Prefix, CastType::Suffix] }; - (@generate_cast_type $variant:ident) => { [CastType::$variant] }; - // Some expressions need to be wrapped in parentheses in order to be - // valid `tt`s (required by the top match pattern). See the comment - // below for more details. This arm removes these parentheses to - // avoid generating an `unused_parens` warning. - (@$_:ident ($vals:expr)) => { $vals }; - (@$_:ident $vals:expr) => { $vals }; - } - - const EVENS: [usize; 8] = [0, 2, 4, 6, 8, 10, 12, 14]; - const ODDS: [usize; 8] = [1, 3, 5, 7, 9, 11, 13, 15]; - - // base_size is too big for the memory region. - test!( - layout(((1..8) | ((1..8), (1..8))), _).validate([0], [0], _), - Ok(Err(MetadataCastError::Size)) - ); - test!( - layout(((2..8) | ((2..8), (2..8))), _).validate([0], [1], Prefix), - Ok(Err(MetadataCastError::Size)) - ); - test!( - layout(((2..8) | ((2..8), (2..8))), _).validate([0x1000_0000 - 1], [1], Suffix), - Ok(Err(MetadataCastError::Size)) - ); - - // addr is unaligned for prefix cast - test!(layout(_, [2]).validate(ODDS, _, Prefix), Ok(Err(MetadataCastError::Alignment))); - test!(layout(_, [2]).validate(ODDS, _, Prefix), Ok(Err(MetadataCastError::Alignment))); - - // addr is aligned, but end of buffer is unaligned for suffix cast - test!(layout(_, [2]).validate(EVENS, ODDS, Suffix), Ok(Err(MetadataCastError::Alignment))); - test!(layout(_, [2]).validate(EVENS, ODDS, Suffix), Ok(Err(MetadataCastError::Alignment))); - - // Unfortunately, these constants cannot easily be used in the - // implementation of `validate_cast_and_convert_metadata`, since - // `panic!` consumes a string literal, not an expression. - // - // It's important that these messages be in a separate module. If they - // were at the function's top level, we'd pass them to `test!` as, e.g., - // `Err(TRAILING)`, which would run into a subtle Rust footgun - the - // `TRAILING` identifier would be treated as a pattern to match rather - // than a value to check for equality. - mod msgs { - pub(super) const TRAILING: &str = - "attempted to cast to slice type with zero-sized element"; - pub(super) const OVERFLOW: &str = "`addr` + `bytes_len` > usize::MAX"; - } - - // casts with ZST trailing element types are unsupported - test!(layout((_, [0]), _).validate(_, _, _), Err(Some(msgs::TRAILING) | None),); - - // addr + bytes_len must not overflow usize - test!(layout(_, _).validate([usize::MAX], (1..100), _), Err(Some(msgs::OVERFLOW) | None)); - test!(layout(_, _).validate((1..100), [usize::MAX], _), Err(Some(msgs::OVERFLOW) | None)); - test!( - layout(_, _).validate( - [usize::MAX / 2 + 1, usize::MAX], - [usize::MAX / 2 + 1, usize::MAX], - _ - ), - Err(Some(msgs::OVERFLOW) | None) - ); - - // Validates that `validate_cast_and_convert_metadata` satisfies its own - // documented safety postconditions, and also a few other properties - // that aren't documented but we want to guarantee anyway. - fn validate_behavior( - (layout, addr, bytes_len, cast_type): (DstLayout, usize, usize, CastType), - ) { - if let Ok((elems, split_at)) = - layout.validate_cast_and_convert_metadata(addr, bytes_len, cast_type) - { - let (size_info, align) = (layout.size_info, layout.align); - let debug_str = format!( - "layout({:?}, {}).validate_cast_and_convert_metadata({}, {}, {:?}) => ({}, {})", - size_info, align, addr, bytes_len, cast_type, elems, split_at - ); - - // If this is a sized type (no trailing slice), then `elems` is - // meaningless, but in practice we set it to 0. Callers are not - // allowed to rely on this, but a lot of math is nicer if - // they're able to, and some callers might accidentally do that. - let sized = matches!(layout.size_info, SizeInfo::Sized { .. }); - assert!(!(sized && elems != 0), "{}", debug_str); - - let resulting_size = match layout.size_info { - SizeInfo::Sized { size } => size, - SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) => { - let padded_size = |elems| { - let without_padding = offset + elems * elem_size; - without_padding + util::padding_needed_for(without_padding, align) - }; - - let resulting_size = padded_size(elems); - // Test that `validate_cast_and_convert_metadata` - // computed the largest possible value that fits in the - // given range. - assert!(padded_size(elems + 1) > bytes_len, "{}", debug_str); - resulting_size - } - }; - - // Test safety postconditions guaranteed by - // `validate_cast_and_convert_metadata`. - assert!(resulting_size <= bytes_len, "{}", debug_str); - match cast_type { - CastType::Prefix => { - assert_eq!(addr % align, 0, "{}", debug_str); - assert_eq!(resulting_size, split_at, "{}", debug_str); - } - CastType::Suffix => { - assert_eq!(split_at, bytes_len - resulting_size, "{}", debug_str); - assert_eq!((addr + split_at) % align, 0, "{}", debug_str); - } - } - } else { - let min_size = match layout.size_info { - SizeInfo::Sized { size } => size, - SizeInfo::SliceDst(TrailingSliceLayout { offset, .. }) => { - offset + util::padding_needed_for(offset, layout.align) - } - }; - - // If a cast is invalid, it is either because... - // 1. there are insufficent bytes at the given region for type: - let insufficient_bytes = bytes_len < min_size; - // 2. performing the cast would misalign type: - let base = match cast_type { - CastType::Prefix => 0, - CastType::Suffix => bytes_len, - }; - let misaligned = (base + addr) % layout.align != 0; - - assert!(insufficient_bytes || misaligned); - } - } - - let sizes = 0..8; - let elem_sizes = 1..8; - let size_infos = sizes - .clone() - .map(Into::::into) - .chain(itertools::iproduct!(sizes, elem_sizes).map(Into::::into)); - let layouts = itertools::iproduct!(size_infos, [1, 2, 4, 8, 16, 32]) - .filter(|(size_info, align)| !matches!(size_info, SizeInfo::Sized { size } if size % align != 0)) - .map(|(size_info, align)| layout(size_info, align)); - itertools::iproduct!(layouts, 0..8, 0..8, [CastType::Prefix, CastType::Suffix]) - .for_each(validate_behavior); - } - - #[test] - #[cfg(__INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)] - fn test_validate_rust_layout() { - use core::ptr::NonNull; - - // This test synthesizes pointers with various metadata and uses Rust's - // built-in APIs to confirm that Rust makes decisions about type layout - // which are consistent with what we believe is guaranteed by the - // language. If this test fails, it doesn't just mean our code is wrong - // - it means we're misunderstanding the language's guarantees. - - #[derive(Debug)] - struct MacroArgs { - offset: usize, - align: NonZeroUsize, - elem_size: Option, - } - - /// # Safety - /// - /// `test` promises to only call `addr_of_slice_field` on a `NonNull` - /// which points to a valid `T`. - /// - /// `with_elems` must produce a pointer which points to a valid `T`. - fn test NonNull>( - args: MacroArgs, - with_elems: W, - addr_of_slice_field: Option) -> NonNull>, - ) { - let dst = args.elem_size.is_some(); - let layout = { - let size_info = match args.elem_size { - Some(elem_size) => { - SizeInfo::SliceDst(TrailingSliceLayout { offset: args.offset, elem_size }) - } - None => SizeInfo::Sized { - // Rust only supports types whose sizes are a multiple - // of their alignment. If the macro created a type like - // this: - // - // #[repr(C, align(2))] - // struct Foo([u8; 1]); - // - // ...then Rust will automatically round the type's size - // up to 2. - size: args.offset + util::padding_needed_for(args.offset, args.align), - }, - }; - DstLayout { size_info, align: args.align } - }; - - for elems in 0..128 { - let ptr = with_elems(elems); - - if let Some(addr_of_slice_field) = addr_of_slice_field { - let slc_field_ptr = addr_of_slice_field(ptr).as_ptr(); - // SAFETY: Both `slc_field_ptr` and `ptr` are pointers to - // the same valid Rust object. - #[allow(clippy::incompatible_msrv)] - // Work around https://github.com/rust-lang/rust-clippy/issues/12280 - let offset: usize = - unsafe { slc_field_ptr.byte_offset_from(ptr.as_ptr()).try_into().unwrap() }; - assert_eq!(offset, args.offset); - } - - // SAFETY: `ptr` points to a valid `T`. - let (size, align) = unsafe { - (mem::size_of_val_raw(ptr.as_ptr()), mem::align_of_val_raw(ptr.as_ptr())) - }; - - // Avoid expensive allocation when running under Miri. - let assert_msg = if !cfg!(miri) { - format!("\n{:?}\nsize:{}, align:{}", args, size, align) - } else { - String::new() - }; - - let without_padding = - args.offset + args.elem_size.map(|elem_size| elems * elem_size).unwrap_or(0); - assert!(size >= without_padding, "{}", assert_msg); - assert_eq!(align, args.align.get(), "{}", assert_msg); - - // This encodes the most important part of the test: our - // understanding of how Rust determines the layout of repr(C) - // types. Sized repr(C) types are trivial, but DST types have - // some subtlety. Note that: - // - For sized types, `without_padding` is just the size of the - // type that we constructed for `Foo`. Since we may have - // requested a larger alignment, `Foo` may actually be larger - // than this, hence `padding_needed_for`. - // - For unsized types, `without_padding` is dynamically - // computed from the offset, the element size, and element - // count. We expect that the size of the object should be - // `offset + elem_size * elems` rounded up to the next - // alignment. - let expected_size = - without_padding + util::padding_needed_for(without_padding, args.align); - assert_eq!(expected_size, size, "{}", assert_msg); - - // For zero-sized element types, - // `validate_cast_and_convert_metadata` just panics, so we skip - // testing those types. - if args.elem_size.map(|elem_size| elem_size > 0).unwrap_or(true) { - let addr = ptr.addr().get(); - let (got_elems, got_split_at) = layout - .validate_cast_and_convert_metadata(addr, size, CastType::Prefix) - .unwrap(); - // Avoid expensive allocation when running under Miri. - let assert_msg = if !cfg!(miri) { - format!( - "{}\nvalidate_cast_and_convert_metadata({}, {})", - assert_msg, addr, size, - ) - } else { - String::new() - }; - assert_eq!(got_split_at, size, "{}", assert_msg); - if dst { - assert!(got_elems >= elems, "{}", assert_msg); - if got_elems != elems { - // If `validate_cast_and_convert_metadata` - // returned more elements than `elems`, that - // means that `elems` is not the maximum number - // of elements that can fit in `size` - in other - // words, there is enough padding at the end of - // the value to fit at least one more element. - // If we use this metadata to synthesize a - // pointer, despite having a different element - // count, we still expect it to have the same - // size. - let got_ptr = with_elems(got_elems); - // SAFETY: `got_ptr` is a pointer to a valid `T`. - let size_of_got_ptr = unsafe { mem::size_of_val_raw(got_ptr.as_ptr()) }; - assert_eq!(size_of_got_ptr, size, "{}", assert_msg); - } - } else { - // For sized casts, the returned element value is - // technically meaningless, and we don't guarantee any - // particular value. In practice, it's always zero. - assert_eq!(got_elems, 0, "{}", assert_msg) - } - } - } - } - - macro_rules! validate_against_rust { - ($offset:literal, $align:literal $(, $elem_size:literal)?) => {{ - #[repr(C, align($align))] - struct Foo([u8; $offset]$(, [[u8; $elem_size]])?); - - let args = MacroArgs { - offset: $offset, - align: $align.try_into().unwrap(), - elem_size: { - #[allow(unused)] - let ret = None::; - $(let ret = Some($elem_size);)? - ret - } - }; - - #[repr(C, align($align))] - struct FooAlign; - // Create an aligned buffer to use in order to synthesize - // pointers to `Foo`. We don't ever load values from these - // pointers - we just do arithmetic on them - so having a "real" - // block of memory as opposed to a validly-aligned-but-dangling - // pointer is only necessary to make Miri happy since we run it - // with "strict provenance" checking enabled. - let aligned_buf = Align::<_, FooAlign>::new([0u8; 1024]); - let with_elems = |elems| { - let slc = NonNull::slice_from_raw_parts(NonNull::from(&aligned_buf.t), elems); - #[allow(clippy::as_conversions)] - NonNull::new(slc.as_ptr() as *mut Foo).unwrap() - }; - let addr_of_slice_field = { - #[allow(unused)] - let f = None::) -> NonNull>; - $( - // SAFETY: `test` promises to only call `f` with a `ptr` - // to a valid `Foo`. - let f: Option) -> NonNull> = Some(|ptr: NonNull| unsafe { - NonNull::new(ptr::addr_of_mut!((*ptr.as_ptr()).1)).unwrap().cast::() - }); - let _ = $elem_size; - )? - f - }; - - test::(args, with_elems, addr_of_slice_field); - }}; - } - - // Every permutation of: - // - offset in [0, 4] - // - align in [1, 16] - // - elem_size in [0, 4] (plus no elem_size) - validate_against_rust!(0, 1); - validate_against_rust!(0, 1, 0); - validate_against_rust!(0, 1, 1); - validate_against_rust!(0, 1, 2); - validate_against_rust!(0, 1, 3); - validate_against_rust!(0, 1, 4); - validate_against_rust!(0, 2); - validate_against_rust!(0, 2, 0); - validate_against_rust!(0, 2, 1); - validate_against_rust!(0, 2, 2); - validate_against_rust!(0, 2, 3); - validate_against_rust!(0, 2, 4); - validate_against_rust!(0, 4); - validate_against_rust!(0, 4, 0); - validate_against_rust!(0, 4, 1); - validate_against_rust!(0, 4, 2); - validate_against_rust!(0, 4, 3); - validate_against_rust!(0, 4, 4); - validate_against_rust!(0, 8); - validate_against_rust!(0, 8, 0); - validate_against_rust!(0, 8, 1); - validate_against_rust!(0, 8, 2); - validate_against_rust!(0, 8, 3); - validate_against_rust!(0, 8, 4); - validate_against_rust!(0, 16); - validate_against_rust!(0, 16, 0); - validate_against_rust!(0, 16, 1); - validate_against_rust!(0, 16, 2); - validate_against_rust!(0, 16, 3); - validate_against_rust!(0, 16, 4); - validate_against_rust!(1, 1); - validate_against_rust!(1, 1, 0); - validate_against_rust!(1, 1, 1); - validate_against_rust!(1, 1, 2); - validate_against_rust!(1, 1, 3); - validate_against_rust!(1, 1, 4); - validate_against_rust!(1, 2); - validate_against_rust!(1, 2, 0); - validate_against_rust!(1, 2, 1); - validate_against_rust!(1, 2, 2); - validate_against_rust!(1, 2, 3); - validate_against_rust!(1, 2, 4); - validate_against_rust!(1, 4); - validate_against_rust!(1, 4, 0); - validate_against_rust!(1, 4, 1); - validate_against_rust!(1, 4, 2); - validate_against_rust!(1, 4, 3); - validate_against_rust!(1, 4, 4); - validate_against_rust!(1, 8); - validate_against_rust!(1, 8, 0); - validate_against_rust!(1, 8, 1); - validate_against_rust!(1, 8, 2); - validate_against_rust!(1, 8, 3); - validate_against_rust!(1, 8, 4); - validate_against_rust!(1, 16); - validate_against_rust!(1, 16, 0); - validate_against_rust!(1, 16, 1); - validate_against_rust!(1, 16, 2); - validate_against_rust!(1, 16, 3); - validate_against_rust!(1, 16, 4); - validate_against_rust!(2, 1); - validate_against_rust!(2, 1, 0); - validate_against_rust!(2, 1, 1); - validate_against_rust!(2, 1, 2); - validate_against_rust!(2, 1, 3); - validate_against_rust!(2, 1, 4); - validate_against_rust!(2, 2); - validate_against_rust!(2, 2, 0); - validate_against_rust!(2, 2, 1); - validate_against_rust!(2, 2, 2); - validate_against_rust!(2, 2, 3); - validate_against_rust!(2, 2, 4); - validate_against_rust!(2, 4); - validate_against_rust!(2, 4, 0); - validate_against_rust!(2, 4, 1); - validate_against_rust!(2, 4, 2); - validate_against_rust!(2, 4, 3); - validate_against_rust!(2, 4, 4); - validate_against_rust!(2, 8); - validate_against_rust!(2, 8, 0); - validate_against_rust!(2, 8, 1); - validate_against_rust!(2, 8, 2); - validate_against_rust!(2, 8, 3); - validate_against_rust!(2, 8, 4); - validate_against_rust!(2, 16); - validate_against_rust!(2, 16, 0); - validate_against_rust!(2, 16, 1); - validate_against_rust!(2, 16, 2); - validate_against_rust!(2, 16, 3); - validate_against_rust!(2, 16, 4); - validate_against_rust!(3, 1); - validate_against_rust!(3, 1, 0); - validate_against_rust!(3, 1, 1); - validate_against_rust!(3, 1, 2); - validate_against_rust!(3, 1, 3); - validate_against_rust!(3, 1, 4); - validate_against_rust!(3, 2); - validate_against_rust!(3, 2, 0); - validate_against_rust!(3, 2, 1); - validate_against_rust!(3, 2, 2); - validate_against_rust!(3, 2, 3); - validate_against_rust!(3, 2, 4); - validate_against_rust!(3, 4); - validate_against_rust!(3, 4, 0); - validate_against_rust!(3, 4, 1); - validate_against_rust!(3, 4, 2); - validate_against_rust!(3, 4, 3); - validate_against_rust!(3, 4, 4); - validate_against_rust!(3, 8); - validate_against_rust!(3, 8, 0); - validate_against_rust!(3, 8, 1); - validate_against_rust!(3, 8, 2); - validate_against_rust!(3, 8, 3); - validate_against_rust!(3, 8, 4); - validate_against_rust!(3, 16); - validate_against_rust!(3, 16, 0); - validate_against_rust!(3, 16, 1); - validate_against_rust!(3, 16, 2); - validate_against_rust!(3, 16, 3); - validate_against_rust!(3, 16, 4); - validate_against_rust!(4, 1); - validate_against_rust!(4, 1, 0); - validate_against_rust!(4, 1, 1); - validate_against_rust!(4, 1, 2); - validate_against_rust!(4, 1, 3); - validate_against_rust!(4, 1, 4); - validate_against_rust!(4, 2); - validate_against_rust!(4, 2, 0); - validate_against_rust!(4, 2, 1); - validate_against_rust!(4, 2, 2); - validate_against_rust!(4, 2, 3); - validate_against_rust!(4, 2, 4); - validate_against_rust!(4, 4); - validate_against_rust!(4, 4, 0); - validate_against_rust!(4, 4, 1); - validate_against_rust!(4, 4, 2); - validate_against_rust!(4, 4, 3); - validate_against_rust!(4, 4, 4); - validate_against_rust!(4, 8); - validate_against_rust!(4, 8, 0); - validate_against_rust!(4, 8, 1); - validate_against_rust!(4, 8, 2); - validate_against_rust!(4, 8, 3); - validate_against_rust!(4, 8, 4); - validate_against_rust!(4, 16); - validate_against_rust!(4, 16, 0); - validate_against_rust!(4, 16, 1); - validate_against_rust!(4, 16, 2); - validate_against_rust!(4, 16, 3); - validate_against_rust!(4, 16, 4); - } - #[test] fn test_known_layout() { // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout. @@ -7368,311 +6557,6 @@ mod tests { assert_eq!(AS_I32, i32::from_ne_bytes([b'a', b'b', b'c', b'd'])); } - #[test] - fn test_address() { - // Test that the `Deref` and `DerefMut` implementations return a - // reference which points to the right region of memory. - - let buf = [0]; - let r = Ref::<_, u8>::from(&buf[..]).unwrap(); - let buf_ptr = buf.as_ptr(); - let deref_ptr: *const u8 = r.deref(); - assert_eq!(buf_ptr, deref_ptr); - - let buf = [0]; - let r = Ref::<_, [u8]>::from(&buf[..]).unwrap(); - let buf_ptr = buf.as_ptr(); - let deref_ptr = r.deref().as_ptr(); - assert_eq!(buf_ptr, deref_ptr); - } - - // Verify that values written to a `Ref` are properly shared between the - // typed and untyped representations, that reads via `deref` and `read` - // behave the same, and that writes via `deref_mut` and `write` behave the - // same. - fn test_new_helper(mut r: Ref<&mut [u8], AU64>) { - // assert that the value starts at 0 - assert_eq!(*r, AU64(0)); - assert_eq!(r.read(), AU64(0)); - - // Assert that values written to the typed value are reflected in the - // byte slice. - const VAL1: AU64 = AU64(0xFF00FF00FF00FF00); - *r = VAL1; - assert_eq!(r.bytes(), &VAL1.to_bytes()); - *r = AU64(0); - r.write(VAL1); - assert_eq!(r.bytes(), &VAL1.to_bytes()); - - // Assert that values written to the byte slice are reflected in the - // typed value. - const VAL2: AU64 = AU64(!VAL1.0); // different from `VAL1` - r.bytes_mut().copy_from_slice(&VAL2.to_bytes()[..]); - assert_eq!(*r, VAL2); - assert_eq!(r.read(), VAL2); - } - - // Verify that values written to a `Ref` are properly shared between the - // typed and untyped representations; pass a value with `typed_len` `AU64`s - // backed by an array of `typed_len * 8` bytes. - fn test_new_helper_slice(mut r: Ref<&mut [u8], [AU64]>, typed_len: usize) { - // Assert that the value starts out zeroed. - assert_eq!(&*r, vec![AU64(0); typed_len].as_slice()); - - // Check the backing storage is the exact same slice. - let untyped_len = typed_len * 8; - assert_eq!(r.bytes().len(), untyped_len); - assert_eq!(r.bytes().as_ptr(), r.as_ptr().cast::()); - - // Assert that values written to the typed value are reflected in the - // byte slice. - const VAL1: AU64 = AU64(0xFF00FF00FF00FF00); - for typed in &mut *r { - *typed = VAL1; - } - assert_eq!(r.bytes(), VAL1.0.to_ne_bytes().repeat(typed_len).as_slice()); - - // Assert that values written to the byte slice are reflected in the - // typed value. - const VAL2: AU64 = AU64(!VAL1.0); // different from VAL1 - r.bytes_mut().copy_from_slice(&VAL2.0.to_ne_bytes().repeat(typed_len)); - assert!(r.iter().copied().all(|x| x == VAL2)); - } - - // Verify that values written to a `Ref` are properly shared between the - // typed and untyped representations, that reads via `deref` and `read` - // behave the same, and that writes via `deref_mut` and `write` behave the - // same. - fn test_new_helper_unaligned(mut r: Ref<&mut [u8], [u8; 8]>) { - // assert that the value starts at 0 - assert_eq!(*r, [0; 8]); - assert_eq!(r.read(), [0; 8]); - - // Assert that values written to the typed value are reflected in the - // byte slice. - const VAL1: [u8; 8] = [0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00]; - *r = VAL1; - assert_eq!(r.bytes(), &VAL1); - *r = [0; 8]; - r.write(VAL1); - assert_eq!(r.bytes(), &VAL1); - - // Assert that values written to the byte slice are reflected in the - // typed value. - const VAL2: [u8; 8] = [0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF]; // different from VAL1 - r.bytes_mut().copy_from_slice(&VAL2[..]); - assert_eq!(*r, VAL2); - assert_eq!(r.read(), VAL2); - } - - // Verify that values written to a `Ref` are properly shared between the - // typed and untyped representations; pass a value with `len` `u8`s backed - // by an array of `len` bytes. - fn test_new_helper_slice_unaligned(mut r: Ref<&mut [u8], [u8]>, len: usize) { - // Assert that the value starts out zeroed. - assert_eq!(&*r, vec![0u8; len].as_slice()); - - // Check the backing storage is the exact same slice. - assert_eq!(r.bytes().len(), len); - assert_eq!(r.bytes().as_ptr(), r.as_ptr()); - - // Assert that values written to the typed value are reflected in the - // byte slice. - let mut expected_bytes = [0xFF, 0x00].iter().copied().cycle().take(len).collect::>(); - r.copy_from_slice(&expected_bytes); - assert_eq!(r.bytes(), expected_bytes.as_slice()); - - // Assert that values written to the byte slice are reflected in the - // typed value. - for byte in &mut expected_bytes { - *byte = !*byte; // different from `expected_len` - } - r.bytes_mut().copy_from_slice(&expected_bytes); - assert_eq!(&*r, expected_bytes.as_slice()); - } - - #[test] - fn test_new_aligned_sized() { - // Test that a properly-aligned, properly-sized buffer works for new, - // new_from_prefix, and new_from_suffix, and that new_from_prefix and - // new_from_suffix return empty slices. Test that a properly-aligned - // buffer whose length is a multiple of the element size works for - // new_slice. - - // A buffer with an alignment of 8. - let mut buf = Align::<[u8; 8], AU64>::default(); - // `buf.t` should be aligned to 8, so this should always succeed. - test_new_helper(Ref::<_, AU64>::from(&mut buf.t[..]).unwrap()); - { - // In a block so that `r` and `suffix` don't live too long. - buf.set_default(); - let (r, suffix) = Ref::<_, AU64>::from_prefix(&mut buf.t[..]).unwrap(); - assert!(suffix.is_empty()); - test_new_helper(r); - } - { - buf.set_default(); - let (prefix, r) = Ref::<_, AU64>::from_suffix(&mut buf.t[..]).unwrap(); - assert!(prefix.is_empty()); - test_new_helper(r); - } - - // A buffer with alignment 8 and length 24. We choose this length very - // intentionally: if we instead used length 16, then the prefix and - // suffix lengths would be identical. In the past, we used length 16, - // which resulted in this test failing to discover the bug uncovered in - // #506. - let mut buf = Align::<[u8; 24], AU64>::default(); - // `buf.t` should be aligned to 8 and have a length which is a multiple - // of `size_of::()`, so this should always succeed. - test_new_helper_slice(Ref::<_, [AU64]>::from(&mut buf.t[..]).unwrap(), 3); - let ascending: [u8; 24] = (0..24).collect::>().try_into().unwrap(); - // 16 ascending bytes followed by 8 zeros. - let mut ascending_prefix = ascending; - ascending_prefix[16..].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]); - // 8 zeros followed by 16 ascending bytes. - let mut ascending_suffix = ascending; - ascending_suffix[..8].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]); - - { - buf.t = ascending_suffix; - let (r, suffix) = Ref::<_, [AU64]>::from_prefix_with_elems(&mut buf.t[..], 1).unwrap(); - assert_eq!(suffix, &ascending[8..]); - test_new_helper_slice(r, 1); - } - { - buf.t = ascending_prefix; - let (prefix, r) = Ref::<_, [AU64]>::from_suffix_with_elems(&mut buf.t[..], 1).unwrap(); - assert_eq!(prefix, &ascending[..16]); - test_new_helper_slice(r, 1); - } - } - - #[test] - fn test_new_unaligned_sized() { - // Test that an unaligned, properly-sized buffer works for - // `new_unaligned`, `new_unaligned_from_prefix`, and - // `new_unaligned_from_suffix`, and that `new_unaligned_from_prefix` - // `new_unaligned_from_suffix` return empty slices. Test that an - // unaligned buffer whose length is a multiple of the element size works - // for `new_slice`. - - let mut buf = [0u8; 8]; - test_new_helper_unaligned(Ref::<_, [u8; 8]>::unaligned_from(&mut buf[..]).unwrap()); - { - // In a block so that `r` and `suffix` don't live too long. - buf = [0u8; 8]; - let (r, suffix) = Ref::<_, [u8; 8]>::unaligned_from_prefix(&mut buf[..]).unwrap(); - assert!(suffix.is_empty()); - test_new_helper_unaligned(r); - } - { - buf = [0u8; 8]; - let (prefix, r) = Ref::<_, [u8; 8]>::unaligned_from_suffix(&mut buf[..]).unwrap(); - assert!(prefix.is_empty()); - test_new_helper_unaligned(r); - } - - let mut buf = [0u8; 16]; - // `buf.t` should be aligned to 8 and have a length which is a multiple - // of `size_of::AU64>()`, so this should always succeed. - test_new_helper_slice_unaligned(Ref::<_, [u8]>::unaligned_from(&mut buf[..]).unwrap(), 16); - - { - buf = [0u8; 16]; - let (r, suffix) = - Ref::<_, [u8]>::unaligned_from_prefix_with_elems(&mut buf[..], 8).unwrap(); - assert_eq!(suffix, [0; 8]); - test_new_helper_slice_unaligned(r, 8); - } - { - buf = [0u8; 16]; - let (prefix, r) = - Ref::<_, [u8]>::unaligned_from_suffix_with_elems(&mut buf[..], 8).unwrap(); - assert_eq!(prefix, [0; 8]); - test_new_helper_slice_unaligned(r, 8); - } - } - - #[test] - fn test_new_oversized() { - // Test that a properly-aligned, overly-sized buffer works for - // `new_from_prefix` and `new_from_suffix`, and that they return the - // remainder and prefix of the slice respectively. - - let mut buf = Align::<[u8; 16], AU64>::default(); - { - // In a block so that `r` and `suffix` don't live too long. `buf.t` - // should be aligned to 8, so this should always succeed. - let (r, suffix) = Ref::<_, AU64>::from_prefix(&mut buf.t[..]).unwrap(); - assert_eq!(suffix.len(), 8); - test_new_helper(r); - } - { - buf.set_default(); - // `buf.t` should be aligned to 8, so this should always succeed. - let (prefix, r) = Ref::<_, AU64>::from_suffix(&mut buf.t[..]).unwrap(); - assert_eq!(prefix.len(), 8); - test_new_helper(r); - } - } - - #[test] - fn test_new_unaligned_oversized() { - // Test than an unaligned, overly-sized buffer works for - // `new_unaligned_from_prefix` and `new_unaligned_from_suffix`, and that - // they return the remainder and prefix of the slice respectively. - - let mut buf = [0u8; 16]; - { - // In a block so that `r` and `suffix` don't live too long. - let (r, suffix) = Ref::<_, [u8; 8]>::unaligned_from_prefix(&mut buf[..]).unwrap(); - assert_eq!(suffix.len(), 8); - test_new_helper_unaligned(r); - } - { - buf = [0u8; 16]; - let (prefix, r) = Ref::<_, [u8; 8]>::unaligned_from_suffix(&mut buf[..]).unwrap(); - assert_eq!(prefix.len(), 8); - test_new_helper_unaligned(r); - } - } - - #[test] - fn test_ref_from_mut_from() { - // Test `FromBytes::{ref_from, mut_from}{,_prefix,Suffix}` success cases - // Exhaustive coverage for these methods is covered by the `Ref` tests above, - // which these helper methods defer to. - - let mut buf = - Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); - - assert_eq!( - AU64::ref_from(&buf.t[8..]).unwrap().0.to_ne_bytes(), - [8, 9, 10, 11, 12, 13, 14, 15] - ); - let suffix = AU64::mut_from(&mut buf.t[8..]).unwrap(); - suffix.0 = 0x0101010101010101; - // The `[u8:9]` is a non-half size of the full buffer, which would catch - // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511). - assert_eq!( - <[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(), - (&[0, 1, 2, 3, 4, 5, 6][..], &[7u8, 1, 1, 1, 1, 1, 1, 1, 1]) - ); - let (prefix, suffix) = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap(); - assert_eq!(prefix, &mut [1u8, 2, 3, 4, 5, 6, 7][..]); - suffix.0 = 0x0202020202020202; - let (prefix, suffix) = <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap(); - assert_eq!(prefix, &mut [0u8, 1, 2, 3, 4, 5][..]); - suffix[0] = 42; - assert_eq!( - <[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(), - (&[0u8, 1, 2, 3, 4, 5, 42, 7, 2], &[2u8, 2, 2, 2, 2, 2, 2][..]) - ); - <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap().0[1] = 30; - assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]); - } - #[test] fn test_ref_from_mut_from_error() { // Test `FromBytes::{ref_from, mut_from}{,_prefix,Suffix}` error cases. @@ -7712,79 +6596,6 @@ mod tests { assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err()); } - #[test] - #[allow(clippy::cognitive_complexity)] - fn test_new_error() { - // Fail because the buffer is too large. - - // A buffer with an alignment of 8. - let buf = Align::<[u8; 16], AU64>::default(); - // `buf.t` should be aligned to 8, so only the length check should fail. - assert!(Ref::<_, AU64>::from(&buf.t[..]).is_err()); - assert!(Ref::<_, [u8; 8]>::unaligned_from(&buf.t[..]).is_err()); - - // Fail because the buffer is too small. - - // A buffer with an alignment of 8. - let buf = Align::<[u8; 4], AU64>::default(); - // `buf.t` should be aligned to 8, so only the length check should fail. - assert!(Ref::<_, AU64>::from(&buf.t[..]).is_err()); - assert!(Ref::<_, [u8; 8]>::unaligned_from(&buf.t[..]).is_err()); - assert!(Ref::<_, AU64>::from_prefix(&buf.t[..]).is_err()); - assert!(Ref::<_, AU64>::from_suffix(&buf.t[..]).is_err()); - assert!(Ref::<_, [u8; 8]>::unaligned_from_prefix(&buf.t[..]).is_err()); - assert!(Ref::<_, [u8; 8]>::unaligned_from_suffix(&buf.t[..]).is_err()); - - // Fail because the length is not a multiple of the element size. - - let buf = Align::<[u8; 12], AU64>::default(); - // `buf.t` has length 12, but element size is 8. - assert!(Ref::<_, [AU64]>::from(&buf.t[..]).is_err()); - assert!(Ref::<_, [[u8; 8]]>::unaligned_from(&buf.t[..]).is_err()); - - // Fail because the buffer is too short. - let buf = Align::<[u8; 12], AU64>::default(); - // `buf.t` has length 12, but the element size is 8 (and we're expecting - // two of them). - assert!(Ref::<_, [AU64]>::from_prefix_with_elems(&buf.t[..], 2).is_err()); - assert!(Ref::<_, [AU64]>::from_suffix_with_elems(&buf.t[..], 2).is_err()); - assert!(Ref::<_, [[u8; 8]]>::unaligned_from_prefix_with_elems(&buf.t[..], 2).is_err()); - assert!(Ref::<_, [[u8; 8]]>::unaligned_from_suffix_with_elems(&buf.t[..], 2).is_err()); - - // Fail because the alignment is insufficient. - - // A buffer with an alignment of 8. An odd buffer size is chosen so that - // the last byte of the buffer has odd alignment. - let buf = Align::<[u8; 13], AU64>::default(); - // Slicing from 1, we get a buffer with size 12 (so the length check - // should succeed) but an alignment of only 1, which is insufficient. - assert!(Ref::<_, AU64>::from(&buf.t[1..]).is_err()); - assert!(Ref::<_, AU64>::from_prefix(&buf.t[1..]).is_err()); - assert!(Ref::<_, [AU64]>::from(&buf.t[1..]).is_err()); - assert!(Ref::<_, [AU64]>::from_prefix_with_elems(&buf.t[1..], 1).is_err()); - assert!(Ref::<_, [AU64]>::from_suffix_with_elems(&buf.t[1..], 1).is_err()); - // Slicing is unnecessary here because `new_from_suffix` uses the suffix - // of the slice, which has odd alignment. - assert!(Ref::<_, AU64>::from_suffix(&buf.t[..]).is_err()); - - // Fail due to arithmetic overflow. - - let buf = Align::<[u8; 16], AU64>::default(); - let unreasonable_len = usize::MAX / mem::size_of::() + 1; - assert!(Ref::<_, [AU64]>::from_prefix_with_elems(&buf.t[..], unreasonable_len).is_err()); - assert!(Ref::<_, [AU64]>::from_suffix_with_elems(&buf.t[..], unreasonable_len).is_err()); - assert!(Ref::<_, [[u8; 8]]>::unaligned_from_prefix_with_elems( - &buf.t[..], - unreasonable_len - ) - .is_err()); - assert!(Ref::<_, [[u8; 8]]>::unaligned_from_suffix_with_elems( - &buf.t[..], - unreasonable_len - ) - .is_err()); - } - #[test] fn test_to_methods() { /// Run a series of tests by calling `IntoBytes` methods on `t`. @@ -7892,45 +6703,6 @@ mod tests { assert_eq!(foo.as_bytes(), &expected[..]); } - #[test] - fn test_display_debug() { - let buf = Align::<[u8; 8], u64>::default(); - let r = Ref::<_, u64>::from(&buf.t[..]).unwrap(); - assert_eq!(format!("{}", r), "0"); - assert_eq!(format!("{:?}", r), "Ref(0)"); - - let buf = Align::<[u8; 8], u64>::default(); - let r = Ref::<_, [u64]>::from(&buf.t[..]).unwrap(); - assert_eq!(format!("{:?}", r), "Ref([0])"); - } - - #[test] - fn test_eq() { - let buf1 = 0_u64; - let r1 = Ref::<_, u64>::from(buf1.as_bytes()).unwrap(); - let buf2 = 0_u64; - let r2 = Ref::<_, u64>::from(buf2.as_bytes()).unwrap(); - assert_eq!(r1, r2); - } - - #[test] - fn test_ne() { - let buf1 = 0_u64; - let r1 = Ref::<_, u64>::from(buf1.as_bytes()).unwrap(); - let buf2 = 1_u64; - let r2 = Ref::<_, u64>::from(buf2.as_bytes()).unwrap(); - assert_ne!(r1, r2); - } - - #[test] - fn test_ord() { - let buf1 = 0_u64; - let r1 = Ref::<_, u64>::from(buf1.as_bytes()).unwrap(); - let buf2 = 1_u64; - let r2 = Ref::<_, u64>::from(buf2.as_bytes()).unwrap(); - assert!(r1 < r2); - } - #[test] fn test_new_zeroed() { assert!(!bool::new_zeroed()); diff --git a/src/ref.rs b/src/ref.rs index a16dc2acbf..116438a754 100644 --- a/src/ref.rs +++ b/src/ref.rs @@ -902,3 +902,429 @@ where inner.partial_cmp(other_inner) } } + +#[cfg(test)] +#[allow(clippy::assertions_on_result_states)] +mod tests { + use core::convert::TryInto as _; + + use super::*; + use crate::util::testutil::*; + + #[test] + fn test_address() { + // Test that the `Deref` and `DerefMut` implementations return a + // reference which points to the right region of memory. + + let buf = [0]; + let r = Ref::<_, u8>::from(&buf[..]).unwrap(); + let buf_ptr = buf.as_ptr(); + let deref_ptr: *const u8 = r.deref(); + assert_eq!(buf_ptr, deref_ptr); + + let buf = [0]; + let r = Ref::<_, [u8]>::from(&buf[..]).unwrap(); + let buf_ptr = buf.as_ptr(); + let deref_ptr = r.deref().as_ptr(); + assert_eq!(buf_ptr, deref_ptr); + } + + // Verify that values written to a `Ref` are properly shared between the + // typed and untyped representations, that reads via `deref` and `read` + // behave the same, and that writes via `deref_mut` and `write` behave the + // same. + fn test_new_helper(mut r: Ref<&mut [u8], AU64>) { + // assert that the value starts at 0 + assert_eq!(*r, AU64(0)); + assert_eq!(r.read(), AU64(0)); + + // Assert that values written to the typed value are reflected in the + // byte slice. + const VAL1: AU64 = AU64(0xFF00FF00FF00FF00); + *r = VAL1; + assert_eq!(r.bytes(), &VAL1.to_bytes()); + *r = AU64(0); + r.write(VAL1); + assert_eq!(r.bytes(), &VAL1.to_bytes()); + + // Assert that values written to the byte slice are reflected in the + // typed value. + const VAL2: AU64 = AU64(!VAL1.0); // different from `VAL1` + r.bytes_mut().copy_from_slice(&VAL2.to_bytes()[..]); + assert_eq!(*r, VAL2); + assert_eq!(r.read(), VAL2); + } + + // Verify that values written to a `Ref` are properly shared between the + // typed and untyped representations; pass a value with `typed_len` `AU64`s + // backed by an array of `typed_len * 8` bytes. + fn test_new_helper_slice(mut r: Ref<&mut [u8], [AU64]>, typed_len: usize) { + // Assert that the value starts out zeroed. + assert_eq!(&*r, vec![AU64(0); typed_len].as_slice()); + + // Check the backing storage is the exact same slice. + let untyped_len = typed_len * 8; + assert_eq!(r.bytes().len(), untyped_len); + assert_eq!(r.bytes().as_ptr(), r.as_ptr().cast::()); + + // Assert that values written to the typed value are reflected in the + // byte slice. + const VAL1: AU64 = AU64(0xFF00FF00FF00FF00); + for typed in &mut *r { + *typed = VAL1; + } + assert_eq!(r.bytes(), VAL1.0.to_ne_bytes().repeat(typed_len).as_slice()); + + // Assert that values written to the byte slice are reflected in the + // typed value. + const VAL2: AU64 = AU64(!VAL1.0); // different from VAL1 + r.bytes_mut().copy_from_slice(&VAL2.0.to_ne_bytes().repeat(typed_len)); + assert!(r.iter().copied().all(|x| x == VAL2)); + } + + // Verify that values written to a `Ref` are properly shared between the + // typed and untyped representations, that reads via `deref` and `read` + // behave the same, and that writes via `deref_mut` and `write` behave the + // same. + fn test_new_helper_unaligned(mut r: Ref<&mut [u8], [u8; 8]>) { + // assert that the value starts at 0 + assert_eq!(*r, [0; 8]); + assert_eq!(r.read(), [0; 8]); + + // Assert that values written to the typed value are reflected in the + // byte slice. + const VAL1: [u8; 8] = [0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00]; + *r = VAL1; + assert_eq!(r.bytes(), &VAL1); + *r = [0; 8]; + r.write(VAL1); + assert_eq!(r.bytes(), &VAL1); + + // Assert that values written to the byte slice are reflected in the + // typed value. + const VAL2: [u8; 8] = [0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF]; // different from VAL1 + r.bytes_mut().copy_from_slice(&VAL2[..]); + assert_eq!(*r, VAL2); + assert_eq!(r.read(), VAL2); + } + + // Verify that values written to a `Ref` are properly shared between the + // typed and untyped representations; pass a value with `len` `u8`s backed + // by an array of `len` bytes. + fn test_new_helper_slice_unaligned(mut r: Ref<&mut [u8], [u8]>, len: usize) { + // Assert that the value starts out zeroed. + assert_eq!(&*r, vec![0u8; len].as_slice()); + + // Check the backing storage is the exact same slice. + assert_eq!(r.bytes().len(), len); + assert_eq!(r.bytes().as_ptr(), r.as_ptr()); + + // Assert that values written to the typed value are reflected in the + // byte slice. + let mut expected_bytes = [0xFF, 0x00].iter().copied().cycle().take(len).collect::>(); + r.copy_from_slice(&expected_bytes); + assert_eq!(r.bytes(), expected_bytes.as_slice()); + + // Assert that values written to the byte slice are reflected in the + // typed value. + for byte in &mut expected_bytes { + *byte = !*byte; // different from `expected_len` + } + r.bytes_mut().copy_from_slice(&expected_bytes); + assert_eq!(&*r, expected_bytes.as_slice()); + } + + #[test] + fn test_new_aligned_sized() { + // Test that a properly-aligned, properly-sized buffer works for new, + // new_from_prefix, and new_from_suffix, and that new_from_prefix and + // new_from_suffix return empty slices. Test that a properly-aligned + // buffer whose length is a multiple of the element size works for + // new_slice. + + // A buffer with an alignment of 8. + let mut buf = Align::<[u8; 8], AU64>::default(); + // `buf.t` should be aligned to 8, so this should always succeed. + test_new_helper(Ref::<_, AU64>::from(&mut buf.t[..]).unwrap()); + { + // In a block so that `r` and `suffix` don't live too long. + buf.set_default(); + let (r, suffix) = Ref::<_, AU64>::from_prefix(&mut buf.t[..]).unwrap(); + assert!(suffix.is_empty()); + test_new_helper(r); + } + { + buf.set_default(); + let (prefix, r) = Ref::<_, AU64>::from_suffix(&mut buf.t[..]).unwrap(); + assert!(prefix.is_empty()); + test_new_helper(r); + } + + // A buffer with alignment 8 and length 24. We choose this length very + // intentionally: if we instead used length 16, then the prefix and + // suffix lengths would be identical. In the past, we used length 16, + // which resulted in this test failing to discover the bug uncovered in + // #506. + let mut buf = Align::<[u8; 24], AU64>::default(); + // `buf.t` should be aligned to 8 and have a length which is a multiple + // of `size_of::()`, so this should always succeed. + test_new_helper_slice(Ref::<_, [AU64]>::from(&mut buf.t[..]).unwrap(), 3); + let ascending: [u8; 24] = (0..24).collect::>().try_into().unwrap(); + // 16 ascending bytes followed by 8 zeros. + let mut ascending_prefix = ascending; + ascending_prefix[16..].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]); + // 8 zeros followed by 16 ascending bytes. + let mut ascending_suffix = ascending; + ascending_suffix[..8].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]); + + { + buf.t = ascending_suffix; + let (r, suffix) = Ref::<_, [AU64]>::from_prefix_with_elems(&mut buf.t[..], 1).unwrap(); + assert_eq!(suffix, &ascending[8..]); + test_new_helper_slice(r, 1); + } + { + buf.t = ascending_prefix; + let (prefix, r) = Ref::<_, [AU64]>::from_suffix_with_elems(&mut buf.t[..], 1).unwrap(); + assert_eq!(prefix, &ascending[..16]); + test_new_helper_slice(r, 1); + } + } + + #[test] + fn test_new_unaligned_sized() { + // Test that an unaligned, properly-sized buffer works for + // `new_unaligned`, `new_unaligned_from_prefix`, and + // `new_unaligned_from_suffix`, and that `new_unaligned_from_prefix` + // `new_unaligned_from_suffix` return empty slices. Test that an + // unaligned buffer whose length is a multiple of the element size works + // for `new_slice`. + + let mut buf = [0u8; 8]; + test_new_helper_unaligned(Ref::<_, [u8; 8]>::unaligned_from(&mut buf[..]).unwrap()); + { + // In a block so that `r` and `suffix` don't live too long. + buf = [0u8; 8]; + let (r, suffix) = Ref::<_, [u8; 8]>::unaligned_from_prefix(&mut buf[..]).unwrap(); + assert!(suffix.is_empty()); + test_new_helper_unaligned(r); + } + { + buf = [0u8; 8]; + let (prefix, r) = Ref::<_, [u8; 8]>::unaligned_from_suffix(&mut buf[..]).unwrap(); + assert!(prefix.is_empty()); + test_new_helper_unaligned(r); + } + + let mut buf = [0u8; 16]; + // `buf.t` should be aligned to 8 and have a length which is a multiple + // of `size_of::AU64>()`, so this should always succeed. + test_new_helper_slice_unaligned(Ref::<_, [u8]>::unaligned_from(&mut buf[..]).unwrap(), 16); + + { + buf = [0u8; 16]; + let (r, suffix) = + Ref::<_, [u8]>::unaligned_from_prefix_with_elems(&mut buf[..], 8).unwrap(); + assert_eq!(suffix, [0; 8]); + test_new_helper_slice_unaligned(r, 8); + } + { + buf = [0u8; 16]; + let (prefix, r) = + Ref::<_, [u8]>::unaligned_from_suffix_with_elems(&mut buf[..], 8).unwrap(); + assert_eq!(prefix, [0; 8]); + test_new_helper_slice_unaligned(r, 8); + } + } + + #[test] + fn test_new_oversized() { + // Test that a properly-aligned, overly-sized buffer works for + // `new_from_prefix` and `new_from_suffix`, and that they return the + // remainder and prefix of the slice respectively. + + let mut buf = Align::<[u8; 16], AU64>::default(); + { + // In a block so that `r` and `suffix` don't live too long. `buf.t` + // should be aligned to 8, so this should always succeed. + let (r, suffix) = Ref::<_, AU64>::from_prefix(&mut buf.t[..]).unwrap(); + assert_eq!(suffix.len(), 8); + test_new_helper(r); + } + { + buf.set_default(); + // `buf.t` should be aligned to 8, so this should always succeed. + let (prefix, r) = Ref::<_, AU64>::from_suffix(&mut buf.t[..]).unwrap(); + assert_eq!(prefix.len(), 8); + test_new_helper(r); + } + } + + #[test] + fn test_new_unaligned_oversized() { + // Test than an unaligned, overly-sized buffer works for + // `new_unaligned_from_prefix` and `new_unaligned_from_suffix`, and that + // they return the remainder and prefix of the slice respectively. + + let mut buf = [0u8; 16]; + { + // In a block so that `r` and `suffix` don't live too long. + let (r, suffix) = Ref::<_, [u8; 8]>::unaligned_from_prefix(&mut buf[..]).unwrap(); + assert_eq!(suffix.len(), 8); + test_new_helper_unaligned(r); + } + { + buf = [0u8; 16]; + let (prefix, r) = Ref::<_, [u8; 8]>::unaligned_from_suffix(&mut buf[..]).unwrap(); + assert_eq!(prefix.len(), 8); + test_new_helper_unaligned(r); + } + } + + #[test] + fn test_ref_from_mut_from() { + // Test `FromBytes::{ref_from, mut_from}{,_prefix,Suffix}` success cases + // Exhaustive coverage for these methods is covered by the `Ref` tests above, + // which these helper methods defer to. + + let mut buf = + Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); + + assert_eq!( + AU64::ref_from(&buf.t[8..]).unwrap().0.to_ne_bytes(), + [8, 9, 10, 11, 12, 13, 14, 15] + ); + let suffix = AU64::mut_from(&mut buf.t[8..]).unwrap(); + suffix.0 = 0x0101010101010101; + // The `[u8:9]` is a non-half size of the full buffer, which would catch + // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511). + assert_eq!( + <[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(), + (&[0, 1, 2, 3, 4, 5, 6][..], &[7u8, 1, 1, 1, 1, 1, 1, 1, 1]) + ); + let (prefix, suffix) = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap(); + assert_eq!(prefix, &mut [1u8, 2, 3, 4, 5, 6, 7][..]); + suffix.0 = 0x0202020202020202; + let (prefix, suffix) = <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap(); + assert_eq!(prefix, &mut [0u8, 1, 2, 3, 4, 5][..]); + suffix[0] = 42; + assert_eq!( + <[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(), + (&[0u8, 1, 2, 3, 4, 5, 42, 7, 2], &[2u8, 2, 2, 2, 2, 2, 2][..]) + ); + <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap().0[1] = 30; + assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]); + } + + #[test] + #[allow(clippy::cognitive_complexity)] + fn test_new_error() { + // Fail because the buffer is too large. + + // A buffer with an alignment of 8. + let buf = Align::<[u8; 16], AU64>::default(); + // `buf.t` should be aligned to 8, so only the length check should fail. + assert!(Ref::<_, AU64>::from(&buf.t[..]).is_err()); + assert!(Ref::<_, [u8; 8]>::unaligned_from(&buf.t[..]).is_err()); + + // Fail because the buffer is too small. + + // A buffer with an alignment of 8. + let buf = Align::<[u8; 4], AU64>::default(); + // `buf.t` should be aligned to 8, so only the length check should fail. + assert!(Ref::<_, AU64>::from(&buf.t[..]).is_err()); + assert!(Ref::<_, [u8; 8]>::unaligned_from(&buf.t[..]).is_err()); + assert!(Ref::<_, AU64>::from_prefix(&buf.t[..]).is_err()); + assert!(Ref::<_, AU64>::from_suffix(&buf.t[..]).is_err()); + assert!(Ref::<_, [u8; 8]>::unaligned_from_prefix(&buf.t[..]).is_err()); + assert!(Ref::<_, [u8; 8]>::unaligned_from_suffix(&buf.t[..]).is_err()); + + // Fail because the length is not a multiple of the element size. + + let buf = Align::<[u8; 12], AU64>::default(); + // `buf.t` has length 12, but element size is 8. + assert!(Ref::<_, [AU64]>::from(&buf.t[..]).is_err()); + assert!(Ref::<_, [[u8; 8]]>::unaligned_from(&buf.t[..]).is_err()); + + // Fail because the buffer is too short. + let buf = Align::<[u8; 12], AU64>::default(); + // `buf.t` has length 12, but the element size is 8 (and we're expecting + // two of them). + assert!(Ref::<_, [AU64]>::from_prefix_with_elems(&buf.t[..], 2).is_err()); + assert!(Ref::<_, [AU64]>::from_suffix_with_elems(&buf.t[..], 2).is_err()); + assert!(Ref::<_, [[u8; 8]]>::unaligned_from_prefix_with_elems(&buf.t[..], 2).is_err()); + assert!(Ref::<_, [[u8; 8]]>::unaligned_from_suffix_with_elems(&buf.t[..], 2).is_err()); + + // Fail because the alignment is insufficient. + + // A buffer with an alignment of 8. An odd buffer size is chosen so that + // the last byte of the buffer has odd alignment. + let buf = Align::<[u8; 13], AU64>::default(); + // Slicing from 1, we get a buffer with size 12 (so the length check + // should succeed) but an alignment of only 1, which is insufficient. + assert!(Ref::<_, AU64>::from(&buf.t[1..]).is_err()); + assert!(Ref::<_, AU64>::from_prefix(&buf.t[1..]).is_err()); + assert!(Ref::<_, [AU64]>::from(&buf.t[1..]).is_err()); + assert!(Ref::<_, [AU64]>::from_prefix_with_elems(&buf.t[1..], 1).is_err()); + assert!(Ref::<_, [AU64]>::from_suffix_with_elems(&buf.t[1..], 1).is_err()); + // Slicing is unnecessary here because `new_from_suffix` uses the suffix + // of the slice, which has odd alignment. + assert!(Ref::<_, AU64>::from_suffix(&buf.t[..]).is_err()); + + // Fail due to arithmetic overflow. + + let buf = Align::<[u8; 16], AU64>::default(); + let unreasonable_len = usize::MAX / mem::size_of::() + 1; + assert!(Ref::<_, [AU64]>::from_prefix_with_elems(&buf.t[..], unreasonable_len).is_err()); + assert!(Ref::<_, [AU64]>::from_suffix_with_elems(&buf.t[..], unreasonable_len).is_err()); + assert!(Ref::<_, [[u8; 8]]>::unaligned_from_prefix_with_elems( + &buf.t[..], + unreasonable_len + ) + .is_err()); + assert!(Ref::<_, [[u8; 8]]>::unaligned_from_suffix_with_elems( + &buf.t[..], + unreasonable_len + ) + .is_err()); + } + + #[test] + fn test_display_debug() { + let buf = Align::<[u8; 8], u64>::default(); + let r = Ref::<_, u64>::from(&buf.t[..]).unwrap(); + assert_eq!(format!("{}", r), "0"); + assert_eq!(format!("{:?}", r), "Ref(0)"); + + let buf = Align::<[u8; 8], u64>::default(); + let r = Ref::<_, [u64]>::from(&buf.t[..]).unwrap(); + assert_eq!(format!("{:?}", r), "Ref([0])"); + } + + #[test] + fn test_eq() { + let buf1 = 0_u64; + let r1 = Ref::<_, u64>::from(buf1.as_bytes()).unwrap(); + let buf2 = 0_u64; + let r2 = Ref::<_, u64>::from(buf2.as_bytes()).unwrap(); + assert_eq!(r1, r2); + } + + #[test] + fn test_ne() { + let buf1 = 0_u64; + let r1 = Ref::<_, u64>::from(buf1.as_bytes()).unwrap(); + let buf2 = 1_u64; + let r2 = Ref::<_, u64>::from(buf2.as_bytes()).unwrap(); + assert_ne!(r1, r2); + } + + #[test] + fn test_ord() { + let buf1 = 0_u64; + let r1 = Ref::<_, u64>::from(buf1.as_bytes()).unwrap(); + let buf2 = 1_u64; + let r2 = Ref::<_, u64>::from(buf2.as_bytes()).unwrap(); + assert!(r1 < r2); + } +} From 7231fe8349393e367b15502dd32a8c9e195d4e3d Mon Sep 17 00:00:00 2001 From: Joshua Liebow-Feeser Date: Fri, 10 May 2024 16:44:46 -0700 Subject: [PATCH 2/2] [impls] Move trait impls from crate root --- src/impls.rs | 1870 ++++++++++++++++++++++++++++++++++++++++++++++++++ src/lib.rs | 1856 +------------------------------------------------ 2 files changed, 1871 insertions(+), 1855 deletions(-) create mode 100644 src/impls.rs diff --git a/src/impls.rs b/src/impls.rs new file mode 100644 index 0000000000..22488f240c --- /dev/null +++ b/src/impls.rs @@ -0,0 +1,1870 @@ +// Copyright 2024 The Fuchsia Authors +// +// Licensed under the 2-Clause BSD License , Apache License, Version 2.0 +// , or the MIT +// license , at your option. +// This file may not be copied, modified, or distributed except according to +// those terms. + +use super::*; + +safety_comment! { + /// SAFETY: + /// Per the reference [1], "the unit tuple (`()`) ... is guaranteed as a + /// zero-sized type to have a size of 0 and an alignment of 1." + /// - `Immutable`: `()` self-evidently does not contain any `UnsafeCell`s. + /// - `TryFromBytes` (with no validator), `FromZeros`, `FromBytes`: There is + /// only one possible sequence of 0 bytes, and `()` is inhabited. + /// - `IntoBytes`: Since `()` has size 0, it contains no padding bytes. + /// - `Unaligned`: `()` has alignment 1. + /// + /// [1] https://doc.rust-lang.org/reference/type-layout.html#tuple-layout + unsafe_impl!((): Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + assert_unaligned!(()); +} + +safety_comment! { + /// SAFETY: + /// - `Immutable`: These types self-evidently do not contain any + /// `UnsafeCell`s. + /// - `TryFromBytes` (with no validator), `FromZeros`, `FromBytes`: all bit + /// patterns are valid for numeric types [1] + /// - `IntoBytes`: numeric types have no padding bytes [1] + /// - `Unaligned` (`u8` and `i8` only): The reference [2] specifies the size + /// of `u8` and `i8` as 1 byte. We also know that: + /// - Alignment is >= 1 [3] + /// - Size is an integer multiple of alignment [4] + /// - The only value >= 1 for which 1 is an integer multiple is 1 + /// Therefore, the only possible alignment for `u8` and `i8` is 1. + /// + /// [1] Per https://doc.rust-lang.org/beta/reference/types/numeric.html#bit-validity: + /// + /// For every numeric type, `T`, the bit validity of `T` is equivalent to + /// the bit validity of `[u8; size_of::()]`. An uninitialized byte is + /// not a valid `u8`. + /// + /// TODO(https://github.com/rust-lang/reference/pull/1392): Once this text + /// is available on the Stable docs, cite those instead. + /// + /// [2] https://doc.rust-lang.org/reference/type-layout.html#primitive-data-layout + /// + /// [3] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment: + /// + /// Alignment is measured in bytes, and must be at least 1. + /// + /// [4] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment: + /// + /// The size of a value is always a multiple of its alignment. + /// + /// TODO(#278): Once we've updated the trait docs to refer to `u8`s rather + /// than bits or bytes, update this comment, especially the reference to + /// [1]. + unsafe_impl!(u8: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + unsafe_impl!(i8: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + assert_unaligned!(u8, i8); + unsafe_impl!(u16: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(i16: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(u32: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(i32: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(u64: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(i64: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(u128: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(i128: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(usize: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(isize: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(f32: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(f64: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); +} + +safety_comment! { + /// SAFETY: + /// - `Immutable`: `bool` self-evidently does not contain any `UnsafeCell`s. + /// - `FromZeros`: Valid since "[t]he value false has the bit pattern 0x00" + /// [1]. + /// - `IntoBytes`: Since "the boolean type has a size and alignment of 1 + /// each" and "The value false has the bit pattern 0x00 and the value true + /// has the bit pattern 0x01" [1]. Thus, the only byte of the bool is + /// always initialized. + /// - `Unaligned`: Per the reference [1], "[a]n object with the boolean type + /// has a size and alignment of 1 each." + /// + /// [1] https://doc.rust-lang.org/reference/types/boolean.html + unsafe_impl!(bool: Immutable, FromZeros, IntoBytes, Unaligned); + assert_unaligned!(bool); + /// SAFETY: + /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid` + /// closure: + /// - Given `t: *mut bool` and `let r = *mut u8`, `r` refers to an object + /// of the same size as that referred to by `t`. This is true because + /// `bool` and `u8` have the same size (1 byte) [1]. Neither `r` nor `t` + /// contain `UnsafeCell`s because neither `bool` nor `u8` do [4]. + /// - Since the closure takes a `&u8` argument, given a `Maybe<'a, + /// bool>` which satisfies the preconditions of + /// `TryFromBytes::::is_bit_valid`, it must be guaranteed that the + /// memory referenced by that `MaybeValid` always contains a valid `u8`. + /// Since `bool`'s single byte is always initialized, `is_bit_valid`'s + /// precondition requires that the same is true of its argument. Since + /// `u8`'s only bit validity invariant is that its single byte must be + /// initialized, this memory is guaranteed to contain a valid `u8`. + /// - The impl must only return `true` for its argument if the original + /// `Maybe` refers to a valid `bool`. We only return true if + /// the `u8` value is 0 or 1, and both of these are valid values for + /// `bool`. [3] + /// + /// [1] Per https://doc.rust-lang.org/reference/type-layout.html#primitive-data-layout: + /// + /// The size of most primitives is given in this table. + /// + /// | Type | `size_of::() ` | + /// |-----------|----------------------| + /// | `bool` | 1 | + /// | `u8`/`i8` | 1 | + /// + /// [2] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment: + /// + /// The size of a value is always a multiple of its alignment. + /// + /// [3] Per https://doc.rust-lang.org/reference/types/boolean.html: + /// + /// The value false has the bit pattern 0x00 and the value true has the + /// bit pattern 0x01. + /// + /// [4] TODO(#429): Justify this claim. + unsafe_impl!(bool: TryFromBytes; |byte: MaybeAligned| *byte.unaligned_as_ref() < 2); +} +safety_comment! { + /// SAFETY: + /// - `Immutable`: `char` self-evidently does not contain any `UnsafeCell`s. + /// - `FromZeros`: Per reference [1], "[a] value of type char is a Unicode + /// scalar value (i.e. a code point that is not a surrogate), represented + /// as a 32-bit unsigned word in the 0x0000 to 0xD7FF or 0xE000 to + /// 0x10FFFF range" which contains 0x0000. + /// - `IntoBytes`: `char` is per reference [1] "represented as a 32-bit + /// unsigned word" (`u32`) which is `IntoBytes`. Note that unlike `u32`, + /// not all bit patterns are valid for `char`. + /// + /// [1] https://doc.rust-lang.org/reference/types/textual.html + unsafe_impl!(char: Immutable, FromZeros, IntoBytes); + /// SAFETY: + /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid` + /// closure: + /// - Given `t: *mut char` and `let r = *mut u32`, `r` refers to an object + /// of the same size as that referred to by `t`. This is true because + /// `char` and `u32` have the same size [1]. Neither `r` nor `t` contain + /// `UnsafeCell`s because neither `char` nor `u32` do [4]. + /// - Since the closure takes a `&u32` argument, given a `Maybe<'a, + /// char>` which satisfies the preconditions of + /// `TryFromBytes::::is_bit_valid`, it must be guaranteed that the + /// memory referenced by that `MaybeValid` always contains a valid + /// `u32`. Since `char`'s bytes are always initialized [2], + /// `is_bit_valid`'s precondition requires that the same is true of its + /// argument. Since `u32`'s only bit validity invariant is that its + /// bytes must be initialized, this memory is guaranteed to contain a + /// valid `u32`. + /// - The impl must only return `true` for its argument if the original + /// `Maybe` refers to a valid `char`. `char::from_u32` + /// guarantees that it returns `None` if its input is not a valid + /// `char`. [3] + /// + /// [1] Per https://doc.rust-lang.org/nightly/reference/types/textual.html#layout-and-bit-validity: + /// + /// `char` is guaranteed to have the same size and alignment as `u32` on + /// all platforms. + /// + /// [2] Per https://doc.rust-lang.org/core/primitive.char.html#method.from_u32: + /// + /// Every byte of a `char` is guaranteed to be initialized. + /// + /// [3] Per https://doc.rust-lang.org/core/primitive.char.html#method.from_u32: + /// + /// `from_u32()` will return `None` if the input is not a valid value for + /// a `char`. + /// + /// [4] TODO(#429): Justify this claim. + unsafe_impl!(char: TryFromBytes; |candidate: MaybeAligned| { + let candidate = candidate.read_unaligned(); + char::from_u32(candidate).is_some() + }); +} +safety_comment! { + /// SAFETY: + /// Per the Reference [1], `str` has the same layout as `[u8]`. + /// - `Immutable`: `[u8]` does not contain any `UnsafeCell`s. + /// - `FromZeros`, `IntoBytes`, `Unaligned`: `[u8]` is `FromZeros`, + /// `IntoBytes`, and `Unaligned`. + /// + /// Note that we don't `assert_unaligned!(str)` because `assert_unaligned!` + /// uses `align_of`, which only works for `Sized` types. + /// + /// TODO(#429): + /// - Add quotes from documentation. + /// - Improve safety proof for `FromZeros` and `IntoBytes`; having the same + /// layout as `[u8]` isn't sufficient. + /// + /// [1] https://doc.rust-lang.org/reference/type-layout.html#str-layout + unsafe_impl!(str: Immutable, FromZeros, IntoBytes, Unaligned); + /// SAFETY: + /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid` + /// closure: + /// - Given `t: *mut str` and `let r = *mut [u8]`, `r` refers to an object + /// of the same size as that referred to by `t`. This is true because + /// `str` and `[u8]` have the same representation. [1] Neither `t` nor + /// `r` contain `UnsafeCell`s because `[u8]` doesn't, and both `t` and + /// `r` have that representation. + /// - Since the closure takes a `&[u8]` argument, given a `Maybe<'a, + /// str>` which satisfies the preconditions of + /// `TryFromBytes::::is_bit_valid`, it must be guaranteed that the + /// memory referenced by that `MaybeValid` always contains a valid + /// `[u8]`. Since `str`'s bytes are always initialized [1], + /// `is_bit_valid`'s precondition requires that the same is true of its + /// argument. Since `[u8]`'s only bit validity invariant is that its + /// bytes must be initialized, this memory is guaranteed to contain a + /// valid `[u8]`. + /// - The impl must only return `true` for its argument if the original + /// `Maybe` refers to a valid `str`. `str::from_utf8` + /// guarantees that it returns `Err` if its input is not a valid `str`. + /// [2] + /// + /// [1] Per https://doc.rust-lang.org/reference/types/textual.html: + /// + /// A value of type `str` is represented the same was as `[u8]`. + /// + /// [2] Per https://doc.rust-lang.org/core/str/fn.from_utf8.html#errors: + /// + /// Returns `Err` if the slice is not UTF-8. + unsafe_impl!(str: TryFromBytes; |candidate: MaybeAligned<[u8]>| { + let candidate = candidate.unaligned_as_ref(); + core::str::from_utf8(candidate).is_ok() + }); +} + +safety_comment! { + // `NonZeroXxx` is `IntoBytes`, but not `FromZeros` or `FromBytes`. + // + /// SAFETY: + /// - `IntoBytes`: `NonZeroXxx` has the same layout as its associated + /// primitive. Since it is the same size, this guarantees it has no + /// padding - integers have no padding, and there's no room for padding + /// if it can represent all of the same values except 0. + /// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that + /// `Option` and `Option` both have size 1. [1] [2] + /// This is worded in a way that makes it unclear whether it's meant as a + /// guarantee, but given the purpose of those types, it's virtually + /// unthinkable that that would ever change. `Option` cannot be smaller + /// than its contained type, which implies that, and `NonZeroX8` are of + /// size 1 or 0. `NonZeroX8` can represent multiple states, so they cannot + /// be 0 bytes, which means that they must be 1 byte. The only valid + /// alignment for a 1-byte type is 1. + /// + /// TODO(#429): + /// - Add quotes from documentation. + /// - Add safety comment for `Immutable`. How can we prove that `NonZeroXxx` + /// doesn't contain any `UnsafeCell`s? It's obviously true, but it's not + /// clear how we'd prove it short of adding text to the stdlib docs that + /// says so explicitly, which likely wouldn't be accepted. + /// + /// [1] https://doc.rust-lang.org/stable/std/num/struct.NonZeroU8.html + /// [2] https://doc.rust-lang.org/stable/std/num/struct.NonZeroI8.html + /// TODO(https://github.com/rust-lang/rust/pull/104082): Cite documentation + /// that layout is the same as primitive layout. + unsafe_impl!(NonZeroU8: Immutable, IntoBytes, Unaligned); + unsafe_impl!(NonZeroI8: Immutable, IntoBytes, Unaligned); + assert_unaligned!(NonZeroU8, NonZeroI8); + unsafe_impl!(NonZeroU16: Immutable, IntoBytes); + unsafe_impl!(NonZeroI16: Immutable, IntoBytes); + unsafe_impl!(NonZeroU32: Immutable, IntoBytes); + unsafe_impl!(NonZeroI32: Immutable, IntoBytes); + unsafe_impl!(NonZeroU64: Immutable, IntoBytes); + unsafe_impl!(NonZeroI64: Immutable, IntoBytes); + unsafe_impl!(NonZeroU128: Immutable, IntoBytes); + unsafe_impl!(NonZeroI128: Immutable, IntoBytes); + unsafe_impl!(NonZeroUsize: Immutable, IntoBytes); + unsafe_impl!(NonZeroIsize: Immutable, IntoBytes); + /// SAFETY: + /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid` + /// closure: + /// - Given `t: *mut NonZeroXxx` and `let r = *mut xxx`, `r` refers to an + /// object of the same size as that referred to by `t`. This is true + /// because `NonZeroXxx` and `xxx` have the same size. [1] Neither `r` + /// nor `t` refer to any `UnsafeCell`s because neither `NonZeroXxx` [2] + /// nor `xxx` do. + /// - Since the closure takes a `&xxx` argument, given a `Maybe<'a, + /// NonZeroXxx>` which satisfies the preconditions of + /// `TryFromBytes::::is_bit_valid`, it must be guaranteed + /// that the memory referenced by that `MabyeValid` always contains a + /// valid `xxx`. Since `NonZeroXxx`'s bytes are always initialized [1], + /// `is_bit_valid`'s precondition requires that the same is true of its + /// argument. Since `xxx`'s only bit validity invariant is that its + /// bytes must be initialized, this memory is guaranteed to contain a + /// valid `xxx`. + /// - The impl must only return `true` for its argument if the original + /// `Maybe` refers to a valid `NonZeroXxx`. The only + /// `xxx` which is not also a valid `NonZeroXxx` is 0. [1] + /// + /// [1] Per https://doc.rust-lang.org/core/num/struct.NonZeroU16.html: + /// + /// `NonZeroU16` is guaranteed to have the same layout and bit validity as + /// `u16` with the exception that `0` is not a valid instance. + /// + /// [2] TODO(#896): Write a safety proof for this before the next stable + /// release. + unsafe_impl!(NonZeroU8: TryFromBytes; |n: MaybeAligned| NonZeroU8::new(n.read_unaligned()).is_some()); + unsafe_impl!(NonZeroI8: TryFromBytes; |n: MaybeAligned| NonZeroI8::new(n.read_unaligned()).is_some()); + unsafe_impl!(NonZeroU16: TryFromBytes; |n: MaybeAligned| NonZeroU16::new(n.read_unaligned()).is_some()); + unsafe_impl!(NonZeroI16: TryFromBytes; |n: MaybeAligned| NonZeroI16::new(n.read_unaligned()).is_some()); + unsafe_impl!(NonZeroU32: TryFromBytes; |n: MaybeAligned| NonZeroU32::new(n.read_unaligned()).is_some()); + unsafe_impl!(NonZeroI32: TryFromBytes; |n: MaybeAligned| NonZeroI32::new(n.read_unaligned()).is_some()); + unsafe_impl!(NonZeroU64: TryFromBytes; |n: MaybeAligned| NonZeroU64::new(n.read_unaligned()).is_some()); + unsafe_impl!(NonZeroI64: TryFromBytes; |n: MaybeAligned| NonZeroI64::new(n.read_unaligned()).is_some()); + unsafe_impl!(NonZeroU128: TryFromBytes; |n: MaybeAligned| NonZeroU128::new(n.read_unaligned()).is_some()); + unsafe_impl!(NonZeroI128: TryFromBytes; |n: MaybeAligned| NonZeroI128::new(n.read_unaligned()).is_some()); + unsafe_impl!(NonZeroUsize: TryFromBytes; |n: MaybeAligned| NonZeroUsize::new(n.read_unaligned()).is_some()); + unsafe_impl!(NonZeroIsize: TryFromBytes; |n: MaybeAligned| NonZeroIsize::new(n.read_unaligned()).is_some()); +} +safety_comment! { + /// SAFETY: + /// - `TryFromBytes` (with no validator), `FromZeros`, `FromBytes`, + /// `IntoBytes`: The Rust compiler reuses `0` value to represent `None`, + /// so `size_of::>() == size_of::()`; see + /// `NonZeroXxx` documentation. + /// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that + /// `Option` and `Option` both have size 1. [1] [2] + /// This is worded in a way that makes it unclear whether it's meant as a + /// guarantee, but given the purpose of those types, it's virtually + /// unthinkable that that would ever change. The only valid alignment for + /// a 1-byte type is 1. + /// + /// TODO(#429): Add quotes from documentation. + /// + /// [1] https://doc.rust-lang.org/stable/std/num/struct.NonZeroU8.html + /// [2] https://doc.rust-lang.org/stable/std/num/struct.NonZeroI8.html + /// + /// TODO(https://github.com/rust-lang/rust/pull/104082): Cite documentation + /// for layout guarantees. + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + assert_unaligned!(Option, Option); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); + unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); +} + +safety_comment! { + /// SAFETY: + /// While it's not fully documented, the consensus is that `Box` does not + /// contain any `UnsafeCell`s for `T: Sized` [1]. + /// + /// [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/492 + /// + /// TODO(#896): Write a more complete safety proof before the next stable + /// release. + #[cfg(feature = "alloc")] + unsafe_impl!( + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + T: Sized => Immutable for Box + ); +} + +safety_comment! { + /// SAFETY: + /// The following types can be transmuted from `[0u8; size_of::()]`. [1] + /// + /// [1] Per https://doc.rust-lang.org/nightly/core/option/index.html#representation: + /// + /// Rust guarantees to optimize the following types `T` such that + /// [`Option`] has the same size and alignment as `T`. In some of these + /// cases, Rust further guarantees that `transmute::<_, Option>([0u8; + /// size_of::()])` is sound and produces `Option::::None`. These + /// cases are identified by the second column: + /// + /// | `T` | `transmute::<_, Option>([0u8; size_of::()])` sound? | + /// |-----------------------|-----------------------------------------------------------| + /// | [`Box`] | when `U: Sized` | + /// | `&U` | when `U: Sized` | + /// | `&mut U` | when `U: Sized` | + /// | [`ptr::NonNull`] | when `U: Sized` | + /// | `fn`, `extern "C" fn` | always | + /// + /// TODO(#429), TODO(https://github.com/rust-lang/rust/pull/115333): Cite + /// the Stable docs once they're available. + #[cfg(feature = "alloc")] + unsafe_impl!( + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + T => TryFromBytes for Option>; + |c: Maybe>>| pointer::is_zeroed(c) + ); + #[cfg(feature = "alloc")] + unsafe_impl!( + #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] + T => FromZeros for Option> + ); + unsafe_impl!( + T => TryFromBytes for Option<&'_ T>; + |c: Maybe>| pointer::is_zeroed(c) + ); + unsafe_impl!(T => FromZeros for Option<&'_ T>); + unsafe_impl!( + T => TryFromBytes for Option<&'_ mut T>; + |c: Maybe>| pointer::is_zeroed(c) + ); + unsafe_impl!(T => FromZeros for Option<&'_ mut T>); + unsafe_impl!( + T => TryFromBytes for Option>; + |c: Maybe>>| pointer::is_zeroed(c) + ); + unsafe_impl!(T => FromZeros for Option>); + unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeros for opt_fn!(...)); + unsafe_impl_for_power_set!( + A, B, C, D, E, F, G, H, I, J, K, L -> M => TryFromBytes for opt_fn!(...); + |c: Maybe| pointer::is_zeroed(c) + ); + unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeros for opt_extern_c_fn!(...)); + unsafe_impl_for_power_set!( + A, B, C, D, E, F, G, H, I, J, K, L -> M => TryFromBytes for opt_extern_c_fn!(...); + |c: Maybe| pointer::is_zeroed(c) + ); +} + +safety_comment! { + /// SAFETY: + /// TODO(#896): Write this safety proof before the next stable release. + unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => Immutable for opt_fn!(...)); + unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => Immutable for opt_extern_c_fn!(...)); +} + +macro_rules! impl_traits_for_atomics { + ($($atomics:ident [$inners:ident]),* $(,)?) => { + $( + impl_for_transparent_wrapper!(TryFromBytes for $atomics [UnsafeCell<$inners>]); + impl_for_transparent_wrapper!(FromZeros for $atomics [UnsafeCell<$inners>]); + impl_for_transparent_wrapper!(FromBytes for $atomics [UnsafeCell<$inners>]); + impl_for_transparent_wrapper!(IntoBytes for $atomics [UnsafeCell<$inners>]); + )* + }; +} + +#[rustfmt::skip] +impl_traits_for_atomics!( + AtomicBool [bool], + AtomicI16 [i16], AtomicI32 [i32], AtomicI8 [i8], AtomicIsize [isize], + AtomicU16 [u16], AtomicU32 [u32], AtomicU8 [u8], AtomicUsize [usize], +); + +safety_comment! { + /// SAFETY: + /// Per [1], `AtomicBool`, `AtomicU8`, and `AtomicI8` have the same size as + /// `bool`, `u8`, and `i8` respectively. Since a type's alignment cannot be + /// smaller than 1 [2], and since its alignment cannot be greater than its + /// size [3], the only possible value for the alignment is 1. Thus, it is + /// sound to implement `Unaligned`. + /// + /// [1] TODO(#896), TODO(https://github.com/rust-lang/rust/pull/121943): + /// Cite docs once they've landed. + /// + /// [2] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment: + /// + /// Alignment is measured in bytes, and must be at least 1. + /// + /// [3] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment: + /// + /// The size of a value is always a multiple of its alignment. + unsafe_impl!(AtomicBool: Unaligned); + unsafe_impl!(AtomicU8: Unaligned); + unsafe_impl!(AtomicI8: Unaligned); + assert_unaligned!(AtomicBool, AtomicU8, AtomicI8); +} + +safety_comment! { + /// SAFETY: + /// Per reference [1]: + /// "For all T, the following are guaranteed: + /// size_of::>() == 0 + /// align_of::>() == 1". + /// This gives: + /// - `Immutable`: `PhantomData` has no fields. + /// - `TryFromBytes` (with no validator), `FromZeros`, `FromBytes`: There is + /// only one possible sequence of 0 bytes, and `PhantomData` is inhabited. + /// - `IntoBytes`: Since `PhantomData` has size 0, it contains no padding + /// bytes. + /// - `Unaligned`: Per the preceding reference, `PhantomData` has alignment + /// 1. + /// + /// [1] https://doc.rust-lang.org/std/marker/struct.PhantomData.html#layout-1 + unsafe_impl!(T: ?Sized => Immutable for PhantomData); + unsafe_impl!(T: ?Sized => TryFromBytes for PhantomData); + unsafe_impl!(T: ?Sized => FromZeros for PhantomData); + unsafe_impl!(T: ?Sized => FromBytes for PhantomData); + unsafe_impl!(T: ?Sized => IntoBytes for PhantomData); + unsafe_impl!(T: ?Sized => Unaligned for PhantomData); + assert_unaligned!(PhantomData<()>, PhantomData, PhantomData); +} + +impl_for_transparent_wrapper!(T: Immutable => Immutable for Wrapping); +impl_for_transparent_wrapper!(T: TryFromBytes => TryFromBytes for Wrapping); +impl_for_transparent_wrapper!(T: FromZeros => FromZeros for Wrapping); +impl_for_transparent_wrapper!(T: FromBytes => FromBytes for Wrapping); +impl_for_transparent_wrapper!(T: IntoBytes => IntoBytes for Wrapping); +impl_for_transparent_wrapper!(T: Unaligned => Unaligned for Wrapping); +assert_unaligned!(Wrapping<()>, Wrapping); + +safety_comment! { + /// SAFETY: + /// `TryFromBytes` (with no validator), `FromZeros`, `FromBytes`: + /// `MaybeUninit` has no restrictions on its contents. + unsafe_impl!(T => TryFromBytes for MaybeUninit); + unsafe_impl!(T => FromZeros for MaybeUninit); + unsafe_impl!(T => FromBytes for MaybeUninit); +} + +impl_for_transparent_wrapper!(T: Immutable => Immutable for MaybeUninit); +impl_for_transparent_wrapper!(T: Unaligned => Unaligned for MaybeUninit); +assert_unaligned!(MaybeUninit<()>, MaybeUninit); + +impl_for_transparent_wrapper!(T: ?Sized + Immutable => Immutable for ManuallyDrop); +impl_for_transparent_wrapper!(T: ?Sized + TryFromBytes => TryFromBytes for ManuallyDrop); +impl_for_transparent_wrapper!(T: ?Sized + FromZeros => FromZeros for ManuallyDrop); +impl_for_transparent_wrapper!(T: ?Sized + FromBytes => FromBytes for ManuallyDrop); +impl_for_transparent_wrapper!(T: ?Sized + IntoBytes => IntoBytes for ManuallyDrop); +impl_for_transparent_wrapper!(T: ?Sized + Unaligned => Unaligned for ManuallyDrop); +assert_unaligned!(ManuallyDrop<()>, ManuallyDrop); + +// TODO(#5): Implement `FromZeros` and `FromBytes` when `T: ?Sized`. +impl_for_transparent_wrapper!(T: FromZeros => FromZeros for UnsafeCell); +impl_for_transparent_wrapper!(T: FromBytes => FromBytes for UnsafeCell); +impl_for_transparent_wrapper!(T: ?Sized + IntoBytes => IntoBytes for UnsafeCell); +impl_for_transparent_wrapper!(T: ?Sized + Unaligned => Unaligned for UnsafeCell); +assert_unaligned!(UnsafeCell<()>, UnsafeCell); + +// SAFETY: See safety comment in `is_bit_valid` impl. +// +// TODO(#5): Try to add `T: ?Sized` bound. +unsafe impl TryFromBytes for UnsafeCell { + #[allow(clippy::missing_inline_in_public_items)] + fn only_derive_is_allowed_to_implement_this_trait() + where + Self: Sized, + { + } + + #[inline] + fn is_bit_valid>( + candidate: Maybe<'_, Self, A>, + ) -> bool { + // The only way to implement this function is using an exclusive-aliased + // pointer. `UnsafeCell`s cannot be read via shared-aliased pointers + // (other than by using `unsafe` code, which we can't use since we can't + // guarantee how our users are accessing or modifying the `UnsafeCell`). + // + // `is_bit_valid` is documented as panicking or failing to monomorphize + // if called with a shared-aliased pointer on a type containing an + // `UnsafeCell`. In practice, it will always be a monorphization error. + // Since `is_bit_valid` is `#[doc(hidden)]` and only called directly + // from this crate, we only need to worry about our own code incorrectly + // calling `UnsafeCell::is_bit_valid`. The post-monomorphization error + // makes it easier to test that this is truly the case, and also means + // that if we make a mistake, it will cause downstream code to fail to + // compile, which will immediately surface the mistake and give us a + // chance to fix it quickly. + let c = candidate.into_exclusive_or_post_monomorphization_error(); + + // We wrap in `Unalign` here so that we can get a vanilla Rust reference + // below, which in turn allows us to call `UnsafeCell::get_mut`. + // + // SAFETY: + // - `.cast` preserves address. `Unalign` and `MaybeUninit` both have + // the same size as the types they wrap [1]. Thus, this cast will + // preserve the size of the pointer. As a result, the cast will + // address the same bytes as `c`. + // - `.cast` preserves provenance. + // - Since both the source and destination types are wrapped in + // `UnsafeCell`, all bytes of both types are inside of `UnsafeCell`s, + // and so the byte ranges covered by `UnsafeCell`s are identical in + // both types. Since the pointers refer to the same byte ranges, + // the same is true of the pointers' referents as well. + // + // [1] Per https://doc.rust-lang.org/stable/core/mem/union.MaybeUninit.html#layout-1: + // + // MaybeUninit is guaranteed to have the same size, alignment, and + // ABI as T. + let c = unsafe { + c.cast_unsized(|c: *mut UnsafeCell| c.cast::>>>()) + }; + // SAFETY: `MaybeUninit` has no validity requirements. + let c = unsafe { c.assume_valid() }; + let c = c.bikeshed_recall_aligned(); + // This is the crucial step at which we use `UnsafeCell::get_mut` to go + // from `UnsafeCell` to `U` (where `U = Unalign>`). + // Now that we've gotten rid of the `UnsafeCell`, we can delegate to + // `T::is_bit_valid`. + let c: &mut Unalign> = c.as_mut().get_mut(); + // This converts from an aligned `Unalign>` pointer to an + // unaligned `MaybeUninit` pointer. + let c: Ptr<'_, MaybeUninit, _> = Ptr::from_mut(c).transparent_wrapper_into_inner(); + let c: Ptr<'_, T, _> = c.transparent_wrapper_into_inner(); + + // SAFETY: The original `candidate` argument has `Initialized` validity. + // None of the subsequent operations modify the memory itself, and so + // that guarantee is still upheld. + let c = unsafe { c.assume_initialized() }; + // Confirm that `Maybe` is a type alias for `Ptr` with the validity + // invariant `Initialized`. Our safety proof depends upon this + // invariant, and it might change at some point. If that happens, we + // want this function to stop compiling. + let _: Ptr<'_, UnsafeCell, (_, _, invariant::Initialized)> = candidate; + + // SAFETY: Since `UnsafeCell` and `T` have the same layout and bit + // validity, `UnsafeCell` is bit-valid exactly when its wrapped `T` + // is. Thus, this is a sound implementation of + // `UnsafeCell::is_bit_valid`. + T::is_bit_valid(c.forget_exclusive()) + } +} + +safety_comment! { + /// SAFETY: + /// Per the reference [1]: + /// + /// An array of `[T; N]` has a size of `size_of::() * N` and the same + /// alignment of `T`. Arrays are laid out so that the zero-based `nth` + /// element of the array is offset from the start of the array by `n * + /// size_of::()` bytes. + /// + /// ... + /// + /// Slices have the same layout as the section of the array they slice. + /// + /// In other words, the layout of a `[T]` or `[T; N]` is a sequence of `T`s + /// laid out back-to-back with no bytes in between. Therefore, `[T]` or `[T; + /// N]` are `Immutable`, `TryFromBytes`, `FromZeros`, `FromBytes`, and + /// `IntoBytes` if `T` is (respectively). Furthermore, since an array/slice + /// has "the same alignment of `T`", `[T]` and `[T; N]` are `Unaligned` if + /// `T` is. + /// + /// Note that we don't `assert_unaligned!` for slice types because + /// `assert_unaligned!` uses `align_of`, which only works for `Sized` types. + /// + /// [1] https://doc.rust-lang.org/reference/type-layout.html#array-layout + unsafe_impl!(const N: usize, T: Immutable => Immutable for [T; N]); + unsafe_impl!(const N: usize, T: TryFromBytes => TryFromBytes for [T; N]; |c: Maybe<[T; N]>| { + // Note that this call may panic, but it would still be sound even if it + // did. `is_bit_valid` does not promise that it will not panic (in fact, + // it explicitly warns that it's a possibility), and we have not + // violated any safety invariants that we must fix before returning. + <[T] as TryFromBytes>::is_bit_valid(c.as_slice()) + }); + unsafe_impl!(const N: usize, T: FromZeros => FromZeros for [T; N]); + unsafe_impl!(const N: usize, T: FromBytes => FromBytes for [T; N]); + unsafe_impl!(const N: usize, T: IntoBytes => IntoBytes for [T; N]); + unsafe_impl!(const N: usize, T: Unaligned => Unaligned for [T; N]); + assert_unaligned!([(); 0], [(); 1], [u8; 0], [u8; 1]); + unsafe_impl!(T: Immutable => Immutable for [T]); + unsafe_impl!(T: TryFromBytes => TryFromBytes for [T]; |c: Maybe<[T]>| { + // SAFETY: Per the reference [1]: + // + // An array of `[T; N]` has a size of `size_of::() * N` and the + // same alignment of `T`. Arrays are laid out so that the zero-based + // `nth` element of the array is offset from the start of the array by + // `n * size_of::()` bytes. + // + // ... + // + // Slices have the same layout as the section of the array they slice. + // + // In other words, the layout of a `[T] is a sequence of `T`s laid out + // back-to-back with no bytes in between. If all elements in `candidate` + // are `is_bit_valid`, so too is `candidate`. + // + // Note that any of the below calls may panic, but it would still be + // sound even if it did. `is_bit_valid` does not promise that it will + // not panic (in fact, it explicitly warns that it's a possibility), and + // we have not violated any safety invariants that we must fix before + // returning. + c.iter().all(::is_bit_valid) + }); + unsafe_impl!(T: FromZeros => FromZeros for [T]); + unsafe_impl!(T: FromBytes => FromBytes for [T]); + unsafe_impl!(T: IntoBytes => IntoBytes for [T]); + unsafe_impl!(T: Unaligned => Unaligned for [T]); +} +safety_comment! { + /// SAFETY: + /// - `Immutable`: Raw pointers do not contain any `UnsafeCell`s. + /// - `FromZeros`: For thin pointers (note that `T: Sized`), the zero + /// pointer is considered "null". [1] No operations which require + /// provenance are legal on null pointers, so this is not a footgun. + /// - `TryFromBytes`: By the same reasoning as for `FromZeroes`, we can + /// implement `TryFromBytes` for thin pointers provided that + /// [`TryFromByte::is_bit_valid`] only produces `true` for zeroed bytes. + /// + /// NOTE(#170): Implementing `FromBytes` and `IntoBytes` for raw pointers + /// would be sound, but carries provenance footguns. We want to support + /// `FromBytes` and `IntoBytes` for raw pointers eventually, but we are + /// holding off until we can figure out how to address those footguns. + /// + /// [1] TODO(https://github.com/rust-lang/rust/pull/116988): Cite the + /// documentation once this PR lands. + unsafe_impl!(T: ?Sized => Immutable for *const T); + unsafe_impl!(T: ?Sized => Immutable for *mut T); + unsafe_impl!(T => TryFromBytes for *const T; |c: Maybe<*const T>| { + pointer::is_zeroed(c) + }); + unsafe_impl!(T => FromZeros for *const T); + unsafe_impl!(T => TryFromBytes for *mut T; |c: Maybe<*const T>| { + pointer::is_zeroed(c) + }); + unsafe_impl!(T => FromZeros for *mut T); +} + +safety_comment! { + /// SAFETY: + /// + /// TODO(#896): Write this safety proof before the next stable release. + unsafe_impl!(T: ?Sized => Immutable for NonNull); +} + +safety_comment! { + /// SAFETY: + /// Reference types do not contain any `UnsafeCell`s. + unsafe_impl!(T: ?Sized => Immutable for &'_ T); + unsafe_impl!(T: ?Sized => Immutable for &'_ mut T); +} + +safety_comment! { + /// SAFETY: + /// `Option` is not `#[non_exhaustive]` [1], which means that the types in + /// its variants cannot change, and no new variants can be added. + /// `Option` does not contain any `UnsafeCell`s outside of `T`. [1] + /// + /// [1] https://doc.rust-lang.org/core/option/enum.Option.html + unsafe_impl!(T: Immutable => Immutable for Option); +} + +// SIMD support +// +// Per the Unsafe Code Guidelines Reference [1]: +// +// Packed SIMD vector types are `repr(simd)` homogeneous tuple-structs +// containing `N` elements of type `T` where `N` is a power-of-two and the +// size and alignment requirements of `T` are equal: +// +// ```rust +// #[repr(simd)] +// struct Vector(T_0, ..., T_(N - 1)); +// ``` +// +// ... +// +// The size of `Vector` is `N * size_of::()` and its alignment is an +// implementation-defined function of `T` and `N` greater than or equal to +// `align_of::()`. +// +// ... +// +// Vector elements are laid out in source field order, enabling random access +// to vector elements by reinterpreting the vector as an array: +// +// ```rust +// union U { +// vec: Vector, +// arr: [T; N] +// } +// +// assert_eq!(size_of::>(), size_of::<[T; N]>()); +// assert!(align_of::>() >= align_of::<[T; N]>()); +// +// unsafe { +// let u = U { vec: Vector(t_0, ..., t_(N - 1)) }; +// +// assert_eq!(u.vec.0, u.arr[0]); +// // ... +// assert_eq!(u.vec.(N - 1), u.arr[N - 1]); +// } +// ``` +// +// Given this background, we can observe that: +// - The size and bit pattern requirements of a SIMD type are equivalent to the +// equivalent array type. Thus, for any SIMD type whose primitive `T` is +// `Immutable`, `TryFromBytes`, `FromZeros`, `FromBytes`, or `IntoBytes`, that +// SIMD type is also `Immutable`, `TryFromBytes`, `FromZeros`, `FromBytes`, or +// `IntoBytes` respectively. +// - Since no upper bound is placed on the alignment, no SIMD type can be +// guaranteed to be `Unaligned`. +// +// Also per [1]: +// +// This chapter represents the consensus from issue #38. The statements in +// here are not (yet) "guaranteed" not to change until an RFC ratifies them. +// +// See issue #38 [2]. While this behavior is not technically guaranteed, the +// likelihood that the behavior will change such that SIMD types are no longer +// `TryFromBytes`, `FromZeros`, `FromBytes`, or `IntoBytes` is next to zero, as +// that would defeat the entire purpose of SIMD types. Nonetheless, we put this +// behavior behind the `simd` Cargo feature, which requires consumers to opt +// into this stability hazard. +// +// [1] https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html +// [2] https://github.com/rust-lang/unsafe-code-guidelines/issues/38 +#[cfg(feature = "simd")] +#[cfg_attr(doc_cfg, doc(cfg(feature = "simd")))] +mod simd { + /// Defines a module which implements `TryFromBytes`, `FromZeros`, + /// `FromBytes`, and `IntoBytes` for a set of types from a module in + /// `core::arch`. + /// + /// `$arch` is both the name of the defined module and the name of the + /// module in `core::arch`, and `$typ` is the list of items from that module + /// to implement `FromZeros`, `FromBytes`, and `IntoBytes` for. + #[allow(unused_macros)] // `allow(unused_macros)` is needed because some + // target/feature combinations don't emit any impls + // and thus don't use this macro. + macro_rules! simd_arch_mod { + (#[cfg $cfg:tt] $arch:ident, $mod:ident, $($typ:ident),*) => { + #[cfg $cfg] + #[cfg_attr(doc_cfg, doc(cfg $cfg))] + mod $mod { + use core::arch::$arch::{$($typ),*}; + + use crate::*; + impl_known_layout!($($typ),*); + safety_comment! { + /// SAFETY: + /// See comment on module definition for justification. + $( unsafe_impl!($typ: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); )* + } + } + }; + } + + #[rustfmt::skip] + const _: () = { + simd_arch_mod!( + #[cfg(target_arch = "x86")] + x86, x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i + ); + simd_arch_mod!( + #[cfg(all(feature = "simd-nightly", target_arch = "x86"))] + x86, x86_nightly, __m512bh, __m512, __m512d, __m512i + ); + simd_arch_mod!( + #[cfg(target_arch = "x86_64")] + x86_64, x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i + ); + simd_arch_mod!( + #[cfg(all(feature = "simd-nightly", target_arch = "x86_64"))] + x86_64, x86_64_nightly, __m512bh, __m512, __m512d, __m512i + ); + simd_arch_mod!( + #[cfg(target_arch = "wasm32")] + wasm32, wasm32, v128 + ); + simd_arch_mod!( + #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))] + powerpc, powerpc, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long + ); + simd_arch_mod!( + #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))] + powerpc64, powerpc64, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long + ); + #[cfg(zerocopy_aarch64_simd)] + simd_arch_mod!( + #[cfg(target_arch = "aarch64")] + aarch64, aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t, + int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t, + int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t, + poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t, + poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t, + uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t, + uint64x1_t, uint64x2_t + ); + simd_arch_mod!( + #[cfg(all(feature = "simd-nightly", target_arch = "arm"))] + arm, arm, int8x4_t, uint8x4_t + ); + }; +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_impls() { + // A type that can supply test cases for testing + // `TryFromBytes::is_bit_valid`. All types passed to `assert_impls!` + // must implement this trait; that macro uses it to generate runtime + // tests for `TryFromBytes` impls. + // + // All `T: FromBytes` types are provided with a blanket impl. Other + // types must implement `TryFromBytesTestable` directly (ie using + // `impl_try_from_bytes_testable!`). + trait TryFromBytesTestable { + fn with_passing_test_cases)>(f: F); + fn with_failing_test_cases(f: F); + } + + impl TryFromBytesTestable for T { + fn with_passing_test_cases)>(f: F) { + // Test with a zeroed value. + f(Self::new_box_zeroed()); + + let ffs = { + let mut t = Self::new_zeroed(); + let ptr: *mut T = &mut t; + // SAFETY: `T: FromBytes` + unsafe { ptr::write_bytes(ptr.cast::(), 0xFF, mem::size_of::()) }; + t + }; + + // Test with a value initialized with 0xFF. + f(Box::new(ffs)); + } + + fn with_failing_test_cases(_f: F) {} + } + + macro_rules! impl_try_from_bytes_testable_for_null_pointer_optimization { + ($($tys:ty),*) => { + $( + impl TryFromBytesTestable for Option<$tys> { + fn with_passing_test_cases)>(f: F) { + // Test with a zeroed value. + f(Box::new(None)); + } + + fn with_failing_test_cases(f: F) { + for pos in 0..mem::size_of::() { + let mut bytes = [0u8; mem::size_of::()]; + bytes[pos] = 0x01; + f(&mut bytes[..]); + } + } + } + )* + }; + } + + // Implements `TryFromBytesTestable`. + macro_rules! impl_try_from_bytes_testable { + // Base case for recursion (when the list of types has run out). + (=> @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {}; + // Implements for type(s) with no type parameters. + ($ty:ty $(,$tys:ty)* => @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => { + impl TryFromBytesTestable for $ty { + impl_try_from_bytes_testable!( + @methods @success $($success_case),* + $(, @failure $($failure_case),*)? + ); + } + impl_try_from_bytes_testable!($($tys),* => @success $($success_case),* $(, @failure $($failure_case),*)?); + }; + // Implements for multiple types with no type parameters. + ($($($ty:ty),* => @success $($success_case:expr), * $(, @failure $($failure_case:expr),*)?;)*) => { + $( + impl_try_from_bytes_testable!($($ty),* => @success $($success_case),* $(, @failure $($failure_case),*)*); + )* + }; + // Implements only the methods; caller must invoke this from inside + // an impl block. + (@methods @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => { + fn with_passing_test_cases)>(_f: F) { + $( + _f(Box::::from($success_case));//.borrow()); + )* + } + + fn with_failing_test_cases(_f: F) { + $($( + // `unused_qualifications` is spuriously triggered on + // `Option::::None`. + #[allow(unused_qualifications)] + let mut case = $failure_case;//.as_mut_bytes(); + _f(case.as_mut_bytes()); + )*)? + } + }; + } + + impl_try_from_bytes_testable_for_null_pointer_optimization!( + Box>, + &'static UnsafeCell, + &'static mut UnsafeCell, + NonNull>, + fn(), + FnManyArgs, + extern "C" fn(), + ECFnManyArgs + ); + + macro_rules! bx { + ($e:expr) => { + Box::new($e) + }; + } + + // Note that these impls are only for types which are not `FromBytes`. + // `FromBytes` types are covered by a preceding blanket impl. + impl_try_from_bytes_testable!( + bool => @success true, false, + @failure 2u8, 3u8, 0xFFu8; + char => @success '\u{0}', '\u{D7FF}', '\u{E000}', '\u{10FFFF}', + @failure 0xD800u32, 0xDFFFu32, 0x110000u32; + str => @success "", "hello", "โค๏ธ๐Ÿงก๐Ÿ’›๐Ÿ’š๐Ÿ’™๐Ÿ’œ", + @failure [0, 159, 146, 150]; + [u8] => @success vec![].into_boxed_slice(), vec![0, 1, 2].into_boxed_slice(); + NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, + NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, + NonZeroUsize, NonZeroIsize + => @success Self::new(1).unwrap(), + // Doing this instead of `0` ensures that we always satisfy + // the size and alignment requirements of `Self` (whereas `0` + // may be any integer type with a different size or alignment + // than some `NonZeroXxx` types). + @failure Option::::None; + [bool; 0] => @success []; + [bool; 1] + => @success [true], [false], + @failure [2u8], [3u8], [0xFFu8]; + [bool] + => @success vec![true, false].into_boxed_slice(), vec![false, true].into_boxed_slice(), + @failure [2u8], [3u8], [0xFFu8], [0u8, 1u8, 2u8]; + Unalign + => @success Unalign::new(false), Unalign::new(true), + @failure 2u8, 0xFFu8; + ManuallyDrop + => @success ManuallyDrop::new(false), ManuallyDrop::new(true), + @failure 2u8, 0xFFu8; + ManuallyDrop<[u8]> + => @success bx!(ManuallyDrop::new([])), bx!(ManuallyDrop::new([0u8])), bx!(ManuallyDrop::new([0u8, 1u8])); + ManuallyDrop<[bool]> + => @success bx!(ManuallyDrop::new([])), bx!(ManuallyDrop::new([false])), bx!(ManuallyDrop::new([false, true])), + @failure [2u8], [3u8], [0xFFu8], [0u8, 1u8, 2u8]; + ManuallyDrop<[UnsafeCell]> + => @success bx!(ManuallyDrop::new([UnsafeCell::new(0)])), bx!(ManuallyDrop::new([UnsafeCell::new(0), UnsafeCell::new(1)])); + ManuallyDrop<[UnsafeCell]> + => @success bx!(ManuallyDrop::new([UnsafeCell::new(false)])), bx!(ManuallyDrop::new([UnsafeCell::new(false), UnsafeCell::new(true)])), + @failure [2u8], [3u8], [0xFFu8], [0u8, 1u8, 2u8]; + Wrapping + => @success Wrapping(false), Wrapping(true), + @failure 2u8, 0xFFu8; + *const NotZerocopy + => @success ptr::null::(), + @failure [0x01; mem::size_of::<*const NotZerocopy>()]; + *mut NotZerocopy + => @success ptr::null_mut::(), + @failure [0x01; mem::size_of::<*mut NotZerocopy>()]; + ); + + // Use the trick described in [1] to allow us to call methods + // conditional on certain trait bounds. + // + // In all of these cases, methods return `Option`, where `R` is the + // return type of the method we're conditionally calling. The "real" + // implementations (the ones defined in traits using `&self`) return + // `Some`, and the default implementations (the ones defined as inherent + // methods using `&mut self`) return `None`. + // + // [1] https://github.com/dtolnay/case-studies/blob/master/autoref-specialization/README.md + mod autoref_trick { + use super::*; + + pub(super) struct AutorefWrapper(pub(super) PhantomData); + + pub(super) trait TestIsBitValidShared { + #[allow(clippy::needless_lifetimes)] + fn test_is_bit_valid_shared< + 'ptr, + A: invariant::Aliasing + invariant::AtLeast, + >( + &self, + candidate: Maybe<'ptr, T, A>, + ) -> Option; + } + + impl TestIsBitValidShared for AutorefWrapper { + #[allow(clippy::needless_lifetimes)] + fn test_is_bit_valid_shared< + 'ptr, + A: invariant::Aliasing + invariant::AtLeast, + >( + &self, + candidate: Maybe<'ptr, T, A>, + ) -> Option { + Some(T::is_bit_valid(candidate)) + } + } + + pub(super) trait TestTryFromRef { + #[allow(clippy::needless_lifetimes)] + fn test_try_from_ref<'bytes>( + &self, + bytes: &'bytes [u8], + ) -> Option>; + + #[allow(clippy::needless_lifetimes)] + fn test_try_from_mut<'bytes>( + &self, + bytes: &'bytes mut [u8], + ) -> Option>; + } + + impl TestTryFromRef for AutorefWrapper { + #[allow(clippy::needless_lifetimes)] + fn test_try_from_ref<'bytes>( + &self, + bytes: &'bytes [u8], + ) -> Option> { + Some(T::try_ref_from(bytes).ok()) + } + + #[allow(clippy::needless_lifetimes)] + fn test_try_from_mut<'bytes>( + &self, + bytes: &'bytes mut [u8], + ) -> Option> { + Some(T::try_mut_from(bytes).ok()) + } + } + + pub(super) trait TestTryReadFrom { + fn test_try_read_from(&self, bytes: &[u8]) -> Option>; + } + + impl TestTryReadFrom for AutorefWrapper { + fn test_try_read_from(&self, bytes: &[u8]) -> Option> { + Some(T::try_read_from(bytes).ok()) + } + } + + pub(super) trait TestAsBytes { + #[allow(clippy::needless_lifetimes)] + fn test_as_bytes<'slf, 't>(&'slf self, t: &'t T) -> Option<&'t [u8]>; + } + + impl TestAsBytes for AutorefWrapper { + #[allow(clippy::needless_lifetimes)] + fn test_as_bytes<'slf, 't>(&'slf self, t: &'t T) -> Option<&'t [u8]> { + Some(t.as_bytes()) + } + } + } + + use autoref_trick::*; + + // Asserts that `$ty` is one of a list of types which are allowed to not + // provide a "real" implementation for `$fn_name`. Since the + // `autoref_trick` machinery fails silently, this allows us to ensure + // that the "default" impls are only being used for types which we + // expect. + // + // Note that, since this is a runtime test, it is possible to have an + // allowlist which is too restrictive if the function in question is + // never called for a particular type. For example, if `as_bytes` is not + // supported for a particular type, and so `test_as_bytes` returns + // `None`, methods such as `test_try_from_ref` may never be called for + // that type. As a result, it's possible that, for example, adding + // `as_bytes` support for a type would cause other allowlist assertions + // to fail. This means that allowlist assertion failures should not + // automatically be taken as a sign of a bug. + macro_rules! assert_on_allowlist { + ($fn_name:ident($ty:ty) $(: $($tys:ty),*)?) => {{ + use core::any::TypeId; + + let allowlist: &[TypeId] = &[ $($(TypeId::of::<$tys>()),*)? ]; + let allowlist_names: &[&str] = &[ $($(stringify!($tys)),*)? ]; + + let id = TypeId::of::<$ty>(); + assert!(allowlist.contains(&id), "{} is not on allowlist for {}: {:?}", stringify!($ty), stringify!($fn_name), allowlist_names); + }}; + } + + // Asserts that `$ty` implements any `$trait` and doesn't implement any + // `!$trait`. Note that all `$trait`s must come before any `!$trait`s. + // + // For `T: TryFromBytes`, uses `TryFromBytesTestable` to test success + // and failure cases. + macro_rules! assert_impls { + ($ty:ty: TryFromBytes) => { + // "Default" implementations that match the "real" + // implementations defined in the `autoref_trick` module above. + #[allow(unused, non_local_definitions)] + impl AutorefWrapper<$ty> { + #[allow(clippy::needless_lifetimes)] + fn test_is_bit_valid_shared<'ptr, A: invariant::Aliasing + invariant::AtLeast>( + &mut self, + candidate: Maybe<'ptr, $ty, A>, + ) -> Option { + assert_on_allowlist!( + test_is_bit_valid_shared($ty): + ManuallyDrop>, + ManuallyDrop<[UnsafeCell]>, + ManuallyDrop<[UnsafeCell]>, + MaybeUninit, + MaybeUninit>, + Wrapping> + ); + + None + } + + #[allow(clippy::needless_lifetimes)] + fn test_try_from_ref<'bytes>(&mut self, _bytes: &'bytes [u8]) -> Option> { + assert_on_allowlist!( + test_try_from_ref($ty): + ManuallyDrop<[UnsafeCell]> + ); + + None + } + + #[allow(clippy::needless_lifetimes)] + fn test_try_from_mut<'bytes>(&mut self, _bytes: &'bytes mut [u8]) -> Option> { + assert_on_allowlist!( + test_try_from_mut($ty): + ManuallyDrop<[UnsafeCell]> + ); + + None + } + + fn test_try_read_from(&mut self, _bytes: &[u8]) -> Option> { + assert_on_allowlist!( + test_try_read_from($ty): + str, + ManuallyDrop<[u8]>, + ManuallyDrop<[bool]>, + ManuallyDrop<[UnsafeCell]>, + [u8], + [bool] + ); + + None + } + + fn test_as_bytes(&mut self, _t: &$ty) -> Option<&[u8]> { + assert_on_allowlist!( + test_as_bytes($ty): + Option<&'static UnsafeCell>, + Option<&'static mut UnsafeCell>, + Option>>, + Option>>, + Option, + Option, + Option, + Option, + MaybeUninit, + MaybeUninit, + MaybeUninit>, + ManuallyDrop>, + ManuallyDrop<[UnsafeCell]>, + ManuallyDrop<[UnsafeCell]>, + Wrapping>, + *const NotZerocopy, + *mut NotZerocopy + ); + + None + } + } + + <$ty as TryFromBytesTestable>::with_passing_test_cases(|mut val| { + // TODO(#494): These tests only get exercised for types + // which are `IntoBytes`. Once we implement #494, we should + // be able to support non-`IntoBytes` types by zeroing + // padding. + + // We define `w` and `ww` since, in the case of the inherent + // methods, Rust thinks they're both borrowed mutably at the + // same time (given how we use them below). If we just + // defined a single `w` and used it for multiple operations, + // this would conflict. + // + // We `#[allow(unused_mut]` for the cases where the "real" + // impls are used, which take `&self`. + #[allow(unused_mut)] + let (mut w, mut ww) = (AutorefWrapper::<$ty>(PhantomData), AutorefWrapper::<$ty>(PhantomData)); + + let c = Ptr::from_ref(&*val); + let c = c.forget_aligned(); + // SAFETY: TODO(#899): This is unsound. `$ty` is not + // necessarily `IntoBytes`, but that's the corner we've + // backed ourselves into by using `Ptr::from_ref`. + let c = unsafe { c.assume_initialized() }; + let res = w.test_is_bit_valid_shared(c); + if let Some(res) = res { + assert!(res, "{}::is_bit_valid({:?}) (shared `Ptr`): got false, expected true", stringify!($ty), val); + } + + let c = Ptr::from_mut(&mut *val); + let c = c.forget_aligned(); + // SAFETY: TODO(#899): This is unsound. `$ty` is not + // necessarily `IntoBytes`, but that's the corner we've + // backed ourselves into by using `Ptr::from_ref`. + let c = unsafe { c.assume_initialized() }; + let res = <$ty as TryFromBytes>::is_bit_valid(c); + assert!(res, "{}::is_bit_valid({:?}) (exclusive `Ptr`): got false, expected true", stringify!($ty), val); + + // `bytes` is `Some(val.as_bytes())` if `$ty: IntoBytes + + // Immutable` and `None` otherwise. + let bytes = w.test_as_bytes(&*val); + + // The inner closure returns + // `Some($ty::try_ref_from(bytes))` if `$ty: Immutable` and + // `None` otherwise. + let res = bytes.and_then(|bytes| ww.test_try_from_ref(bytes)); + if let Some(res) = res { + assert!(res.is_some(), "{}::try_ref_from({:?}): got `None`, expected `Some`", stringify!($ty), val); + } + + if let Some(bytes) = bytes { + // We need to get a mutable byte slice, and so we clone + // into a `Vec`. However, we also need these bytes to + // satisfy `$ty`'s alignment requirement, which isn't + // guaranteed for `Vec`. In order to get around + // this, we create a `Vec` which is twice as long as we + // need. There is guaranteed to be an aligned byte range + // of size `size_of_val(val)` within that range. + let val = &*val; + let size = mem::size_of_val(val); + let align = mem::align_of_val(val); + + let mut vec = bytes.to_vec(); + vec.extend(bytes); + let slc = vec.as_slice(); + let offset = slc.as_ptr().align_offset(align); + let bytes_mut = &mut vec.as_mut_slice()[offset..offset+size]; + bytes_mut.copy_from_slice(bytes); + + let res = ww.test_try_from_mut(bytes_mut); + if let Some(res) = res { + assert!(res.is_some(), "{}::try_mut_from({:?}): got `None`, expected `Some`", stringify!($ty), val); + } + } + + let res = bytes.and_then(|bytes| ww.test_try_read_from(bytes)); + if let Some(res) = res { + assert!(res.is_some(), "{}::try_read_from({:?}): got `None`, expected `Some`", stringify!($ty), val); + } + }); + #[allow(clippy::as_conversions)] + <$ty as TryFromBytesTestable>::with_failing_test_cases(|c| { + #[allow(unused_mut)] // For cases where the "real" impls are used, which take `&self`. + let mut w = AutorefWrapper::<$ty>(PhantomData); + + // This is `Some($ty::try_ref_from(c))` if `$ty: Immutable` and + // `None` otherwise. + let res = w.test_try_from_ref(c); + if let Some(res) = res { + assert!(res.is_none(), "{}::try_ref_from({:?}): got Some, expected None", stringify!($ty), c); + } + + let res = w.test_try_from_mut(c); + if let Some(res) = res { + assert!(res.is_none(), "{}::try_mut_from({:?}): got Some, expected None", stringify!($ty), c); + } + + let res = w.test_try_read_from(c); + if let Some(res) = res { + assert!(res.is_none(), "{}::try_read_from({:?}): got Some, expected None", stringify!($ty), c); + } + }); + + #[allow(dead_code)] + const _: () = { static_assertions::assert_impl_all!($ty: TryFromBytes); }; + }; + ($ty:ty: $trait:ident) => { + #[allow(dead_code)] + const _: () = { static_assertions::assert_impl_all!($ty: $trait); }; + }; + ($ty:ty: !$trait:ident) => { + #[allow(dead_code)] + const _: () = { static_assertions::assert_not_impl_any!($ty: $trait); }; + }; + ($ty:ty: $($trait:ident),* $(,)? $(!$negative_trait:ident),*) => { + $( + assert_impls!($ty: $trait); + )* + + $( + assert_impls!($ty: !$negative_trait); + )* + }; + } + + // NOTE: The negative impl assertions here are not necessarily + // prescriptive. They merely serve as change detectors to make sure + // we're aware of what trait impls are getting added with a given + // change. Of course, some impls would be invalid (e.g., `bool: + // FromBytes`), and so this change detection is very important. + + assert_impls!( + (): KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + Unaligned + ); + assert_impls!( + u8: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + Unaligned + ); + assert_impls!( + i8: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + Unaligned + ); + assert_impls!( + u16: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + i16: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + u32: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + i32: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + u64: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + i64: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + u128: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + i128: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + usize: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + isize: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + f32: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + assert_impls!( + f64: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + !Unaligned + ); + + assert_impls!( + bool: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + IntoBytes, + Unaligned, + !FromBytes + ); + assert_impls!( + char: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + str: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + IntoBytes, + Unaligned, + !FromBytes + ); + + assert_impls!( + NonZeroU8: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + Unaligned, + !FromZeros, + !FromBytes + ); + assert_impls!( + NonZeroI8: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + Unaligned, + !FromZeros, + !FromBytes + ); + assert_impls!( + NonZeroU16: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + NonZeroI16: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + NonZeroU32: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + NonZeroI32: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + NonZeroU64: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + NonZeroI64: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + NonZeroU128: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + NonZeroI128: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + NonZeroUsize: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + assert_impls!( + NonZeroIsize: KnownLayout, + Immutable, + TryFromBytes, + IntoBytes, + !FromBytes, + !Unaligned + ); + + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); + + // Implements none of the ZC traits. + struct NotZerocopy; + + #[rustfmt::skip] + type FnManyArgs = fn( + NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, + ) -> (NotZerocopy, NotZerocopy); + + // Allowed, because we're not actually using this type for FFI. + #[allow(improper_ctypes_definitions)] + #[rustfmt::skip] + type ECFnManyArgs = extern "C" fn( + NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, + ) -> (NotZerocopy, NotZerocopy); + + #[cfg(feature = "alloc")] + assert_impls!(Option>>: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option]>>: KnownLayout, !Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option<&'static UnsafeCell>: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option<&'static [UnsafeCell]>: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option<&'static mut UnsafeCell>: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option<&'static mut [UnsafeCell]>: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option>>: KnownLayout, TryFromBytes, FromZeros, Immutable, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option]>>: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); + + assert_impls!(PhantomData: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + assert_impls!(PhantomData>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + assert_impls!(PhantomData<[u8]>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + + assert_impls!(ManuallyDrop: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + // This test is important because it allows us to test our hand-rolled + // implementation of ` as TryFromBytes>::is_bit_valid`. + assert_impls!(ManuallyDrop: KnownLayout, Immutable, TryFromBytes, FromZeros, IntoBytes, Unaligned, !FromBytes); + assert_impls!(ManuallyDrop<[u8]>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + // This test is important because it allows us to test our hand-rolled + // implementation of ` as TryFromBytes>::is_bit_valid`. + assert_impls!(ManuallyDrop<[bool]>: KnownLayout, Immutable, TryFromBytes, FromZeros, IntoBytes, Unaligned, !FromBytes); + assert_impls!(ManuallyDrop: !Immutable, !TryFromBytes, !KnownLayout, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(ManuallyDrop<[NotZerocopy]>: KnownLayout, !Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(ManuallyDrop>: KnownLayout, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned, !Immutable); + assert_impls!(ManuallyDrop<[UnsafeCell]>: KnownLayout, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned, !Immutable); + assert_impls!(ManuallyDrop<[UnsafeCell]>: KnownLayout, TryFromBytes, FromZeros, IntoBytes, Unaligned, !Immutable, !FromBytes); + + assert_impls!(MaybeUninit: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, Unaligned, !IntoBytes); + assert_impls!(MaybeUninit: KnownLayout, TryFromBytes, FromZeros, FromBytes, !Immutable, !IntoBytes, !Unaligned); + assert_impls!(MaybeUninit>: KnownLayout, TryFromBytes, FromZeros, FromBytes, Unaligned, !Immutable, !IntoBytes); + + assert_impls!(Wrapping: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + // This test is important because it allows us to test our hand-rolled + // implementation of ` as TryFromBytes>::is_bit_valid`. + assert_impls!(Wrapping: KnownLayout, Immutable, TryFromBytes, FromZeros, IntoBytes, Unaligned, !FromBytes); + assert_impls!(Wrapping: KnownLayout, !Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(Wrapping>: KnownLayout, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned, !Immutable); + + assert_impls!(Unalign: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); + // This test is important because it allows us to test our hand-rolled + // implementation of ` as TryFromBytes>::is_bit_valid`. + assert_impls!(Unalign: KnownLayout, Immutable, TryFromBytes, FromZeros, IntoBytes, Unaligned, !FromBytes); + assert_impls!(Unalign: Unaligned, !Immutable, !KnownLayout, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes); + + assert_impls!( + [u8]: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + Unaligned + ); + assert_impls!( + [bool]: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + IntoBytes, + Unaligned, + !FromBytes + ); + assert_impls!([NotZerocopy]: KnownLayout, !Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!( + [u8; 0]: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + Unaligned, + ); + assert_impls!( + [NotZerocopy; 0]: KnownLayout, + !Immutable, + !TryFromBytes, + !FromZeros, + !FromBytes, + !IntoBytes, + !Unaligned + ); + assert_impls!( + [u8; 1]: KnownLayout, + Immutable, + TryFromBytes, + FromZeros, + FromBytes, + IntoBytes, + Unaligned, + ); + assert_impls!( + [NotZerocopy; 1]: KnownLayout, + !Immutable, + !TryFromBytes, + !FromZeros, + !FromBytes, + !IntoBytes, + !Unaligned + ); + + assert_impls!(*const NotZerocopy: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(*mut NotZerocopy: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(*const [NotZerocopy]: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(*mut [NotZerocopy]: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(*const dyn Debug: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + assert_impls!(*mut dyn Debug: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); + + #[cfg(feature = "simd")] + { + #[allow(unused_macros)] + macro_rules! test_simd_arch_mod { + ($arch:ident, $($typ:ident),*) => { + { + use core::arch::$arch::{$($typ),*}; + use crate::*; + $( assert_impls!($typ: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); )* + } + }; + } + #[cfg(target_arch = "x86")] + test_simd_arch_mod!(x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i); + + #[cfg(all(feature = "simd-nightly", target_arch = "x86"))] + test_simd_arch_mod!(x86, __m512bh, __m512, __m512d, __m512i); + + #[cfg(target_arch = "x86_64")] + test_simd_arch_mod!(x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i); + + #[cfg(all(feature = "simd-nightly", target_arch = "x86_64"))] + test_simd_arch_mod!(x86_64, __m512bh, __m512, __m512d, __m512i); + + #[cfg(target_arch = "wasm32")] + test_simd_arch_mod!(wasm32, v128); + + #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))] + test_simd_arch_mod!( + powerpc, + vector_bool_long, + vector_double, + vector_signed_long, + vector_unsigned_long + ); + + #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))] + test_simd_arch_mod!( + powerpc64, + vector_bool_long, + vector_double, + vector_signed_long, + vector_unsigned_long + ); + #[cfg(all(target_arch = "aarch64", zerocopy_aarch64_simd))] + #[rustfmt::skip] + test_simd_arch_mod!( + aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t, + int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t, + int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t, + poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t, + poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t, + uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t, + uint64x1_t, uint64x2_t + ); + #[cfg(all(feature = "simd-nightly", target_arch = "arm"))] + #[rustfmt::skip] + test_simd_arch_mod!(arm, int8x4_t, uint8x4_t); + } + } +} diff --git a/src/lib.rs b/src/lib.rs index e6ef934e5b..a8711a31d4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -286,6 +286,7 @@ mod deprecated; // expect most users to use the re-export of `error`'s items to avoid identifier // stuttering. pub mod error; +mod impls; #[doc(hidden)] pub mod layout; #[doc(hidden)] @@ -4037,888 +4038,6 @@ pub unsafe trait Unaligned { Self: Sized; } -safety_comment! { - /// SAFETY: - /// Per the reference [1], "the unit tuple (`()`) ... is guaranteed as a - /// zero-sized type to have a size of 0 and an alignment of 1." - /// - `Immutable`: `()` self-evidently does not contain any `UnsafeCell`s. - /// - `TryFromBytes` (with no validator), `FromZeros`, `FromBytes`: There is - /// only one possible sequence of 0 bytes, and `()` is inhabited. - /// - `IntoBytes`: Since `()` has size 0, it contains no padding bytes. - /// - `Unaligned`: `()` has alignment 1. - /// - /// [1] https://doc.rust-lang.org/reference/type-layout.html#tuple-layout - unsafe_impl!((): Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); - assert_unaligned!(()); -} - -safety_comment! { - /// SAFETY: - /// - `Immutable`: These types self-evidently do not contain any - /// `UnsafeCell`s. - /// - `TryFromBytes` (with no validator), `FromZeros`, `FromBytes`: all bit - /// patterns are valid for numeric types [1] - /// - `IntoBytes`: numeric types have no padding bytes [1] - /// - `Unaligned` (`u8` and `i8` only): The reference [2] specifies the size - /// of `u8` and `i8` as 1 byte. We also know that: - /// - Alignment is >= 1 [3] - /// - Size is an integer multiple of alignment [4] - /// - The only value >= 1 for which 1 is an integer multiple is 1 - /// Therefore, the only possible alignment for `u8` and `i8` is 1. - /// - /// [1] Per https://doc.rust-lang.org/beta/reference/types/numeric.html#bit-validity: - /// - /// For every numeric type, `T`, the bit validity of `T` is equivalent to - /// the bit validity of `[u8; size_of::()]`. An uninitialized byte is - /// not a valid `u8`. - /// - /// TODO(https://github.com/rust-lang/reference/pull/1392): Once this text - /// is available on the Stable docs, cite those instead. - /// - /// [2] https://doc.rust-lang.org/reference/type-layout.html#primitive-data-layout - /// - /// [3] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment: - /// - /// Alignment is measured in bytes, and must be at least 1. - /// - /// [4] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment: - /// - /// The size of a value is always a multiple of its alignment. - /// - /// TODO(#278): Once we've updated the trait docs to refer to `u8`s rather - /// than bits or bytes, update this comment, especially the reference to - /// [1]. - unsafe_impl!(u8: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); - unsafe_impl!(i8: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); - assert_unaligned!(u8, i8); - unsafe_impl!(u16: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(i16: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(u32: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(i32: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(u64: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(i64: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(u128: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(i128: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(usize: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(isize: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(f32: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(f64: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); -} - -safety_comment! { - /// SAFETY: - /// - `Immutable`: `bool` self-evidently does not contain any `UnsafeCell`s. - /// - `FromZeros`: Valid since "[t]he value false has the bit pattern 0x00" - /// [1]. - /// - `IntoBytes`: Since "the boolean type has a size and alignment of 1 - /// each" and "The value false has the bit pattern 0x00 and the value true - /// has the bit pattern 0x01" [1]. Thus, the only byte of the bool is - /// always initialized. - /// - `Unaligned`: Per the reference [1], "[a]n object with the boolean type - /// has a size and alignment of 1 each." - /// - /// [1] https://doc.rust-lang.org/reference/types/boolean.html - unsafe_impl!(bool: Immutable, FromZeros, IntoBytes, Unaligned); - assert_unaligned!(bool); - /// SAFETY: - /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid` - /// closure: - /// - Given `t: *mut bool` and `let r = *mut u8`, `r` refers to an object - /// of the same size as that referred to by `t`. This is true because - /// `bool` and `u8` have the same size (1 byte) [1]. Neither `r` nor `t` - /// contain `UnsafeCell`s because neither `bool` nor `u8` do [4]. - /// - Since the closure takes a `&u8` argument, given a `Maybe<'a, - /// bool>` which satisfies the preconditions of - /// `TryFromBytes::::is_bit_valid`, it must be guaranteed that the - /// memory referenced by that `MaybeValid` always contains a valid `u8`. - /// Since `bool`'s single byte is always initialized, `is_bit_valid`'s - /// precondition requires that the same is true of its argument. Since - /// `u8`'s only bit validity invariant is that its single byte must be - /// initialized, this memory is guaranteed to contain a valid `u8`. - /// - The impl must only return `true` for its argument if the original - /// `Maybe` refers to a valid `bool`. We only return true if - /// the `u8` value is 0 or 1, and both of these are valid values for - /// `bool`. [3] - /// - /// [1] Per https://doc.rust-lang.org/reference/type-layout.html#primitive-data-layout: - /// - /// The size of most primitives is given in this table. - /// - /// | Type | `size_of::() ` | - /// |-----------|----------------------| - /// | `bool` | 1 | - /// | `u8`/`i8` | 1 | - /// - /// [2] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment: - /// - /// The size of a value is always a multiple of its alignment. - /// - /// [3] Per https://doc.rust-lang.org/reference/types/boolean.html: - /// - /// The value false has the bit pattern 0x00 and the value true has the - /// bit pattern 0x01. - /// - /// [4] TODO(#429): Justify this claim. - unsafe_impl!(bool: TryFromBytes; |byte: MaybeAligned| *byte.unaligned_as_ref() < 2); -} -safety_comment! { - /// SAFETY: - /// - `Immutable`: `char` self-evidently does not contain any `UnsafeCell`s. - /// - `FromZeros`: Per reference [1], "[a] value of type char is a Unicode - /// scalar value (i.e. a code point that is not a surrogate), represented - /// as a 32-bit unsigned word in the 0x0000 to 0xD7FF or 0xE000 to - /// 0x10FFFF range" which contains 0x0000. - /// - `IntoBytes`: `char` is per reference [1] "represented as a 32-bit - /// unsigned word" (`u32`) which is `IntoBytes`. Note that unlike `u32`, - /// not all bit patterns are valid for `char`. - /// - /// [1] https://doc.rust-lang.org/reference/types/textual.html - unsafe_impl!(char: Immutable, FromZeros, IntoBytes); - /// SAFETY: - /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid` - /// closure: - /// - Given `t: *mut char` and `let r = *mut u32`, `r` refers to an object - /// of the same size as that referred to by `t`. This is true because - /// `char` and `u32` have the same size [1]. Neither `r` nor `t` contain - /// `UnsafeCell`s because neither `char` nor `u32` do [4]. - /// - Since the closure takes a `&u32` argument, given a `Maybe<'a, - /// char>` which satisfies the preconditions of - /// `TryFromBytes::::is_bit_valid`, it must be guaranteed that the - /// memory referenced by that `MaybeValid` always contains a valid - /// `u32`. Since `char`'s bytes are always initialized [2], - /// `is_bit_valid`'s precondition requires that the same is true of its - /// argument. Since `u32`'s only bit validity invariant is that its - /// bytes must be initialized, this memory is guaranteed to contain a - /// valid `u32`. - /// - The impl must only return `true` for its argument if the original - /// `Maybe` refers to a valid `char`. `char::from_u32` - /// guarantees that it returns `None` if its input is not a valid - /// `char`. [3] - /// - /// [1] Per https://doc.rust-lang.org/nightly/reference/types/textual.html#layout-and-bit-validity: - /// - /// `char` is guaranteed to have the same size and alignment as `u32` on - /// all platforms. - /// - /// [2] Per https://doc.rust-lang.org/core/primitive.char.html#method.from_u32: - /// - /// Every byte of a `char` is guaranteed to be initialized. - /// - /// [3] Per https://doc.rust-lang.org/core/primitive.char.html#method.from_u32: - /// - /// `from_u32()` will return `None` if the input is not a valid value for - /// a `char`. - /// - /// [4] TODO(#429): Justify this claim. - unsafe_impl!(char: TryFromBytes; |candidate: MaybeAligned| { - let candidate = candidate.read_unaligned(); - char::from_u32(candidate).is_some() - }); -} -safety_comment! { - /// SAFETY: - /// Per the Reference [1], `str` has the same layout as `[u8]`. - /// - `Immutable`: `[u8]` does not contain any `UnsafeCell`s. - /// - `FromZeros`, `IntoBytes`, `Unaligned`: `[u8]` is `FromZeros`, - /// `IntoBytes`, and `Unaligned`. - /// - /// Note that we don't `assert_unaligned!(str)` because `assert_unaligned!` - /// uses `align_of`, which only works for `Sized` types. - /// - /// TODO(#429): - /// - Add quotes from documentation. - /// - Improve safety proof for `FromZeros` and `IntoBytes`; having the same - /// layout as `[u8]` isn't sufficient. - /// - /// [1] https://doc.rust-lang.org/reference/type-layout.html#str-layout - unsafe_impl!(str: Immutable, FromZeros, IntoBytes, Unaligned); - /// SAFETY: - /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid` - /// closure: - /// - Given `t: *mut str` and `let r = *mut [u8]`, `r` refers to an object - /// of the same size as that referred to by `t`. This is true because - /// `str` and `[u8]` have the same representation. [1] Neither `t` nor - /// `r` contain `UnsafeCell`s because `[u8]` doesn't, and both `t` and - /// `r` have that representation. - /// - Since the closure takes a `&[u8]` argument, given a `Maybe<'a, - /// str>` which satisfies the preconditions of - /// `TryFromBytes::::is_bit_valid`, it must be guaranteed that the - /// memory referenced by that `MaybeValid` always contains a valid - /// `[u8]`. Since `str`'s bytes are always initialized [1], - /// `is_bit_valid`'s precondition requires that the same is true of its - /// argument. Since `[u8]`'s only bit validity invariant is that its - /// bytes must be initialized, this memory is guaranteed to contain a - /// valid `[u8]`. - /// - The impl must only return `true` for its argument if the original - /// `Maybe` refers to a valid `str`. `str::from_utf8` - /// guarantees that it returns `Err` if its input is not a valid `str`. - /// [2] - /// - /// [1] Per https://doc.rust-lang.org/reference/types/textual.html: - /// - /// A value of type `str` is represented the same was as `[u8]`. - /// - /// [2] Per https://doc.rust-lang.org/core/str/fn.from_utf8.html#errors: - /// - /// Returns `Err` if the slice is not UTF-8. - unsafe_impl!(str: TryFromBytes; |candidate: MaybeAligned<[u8]>| { - let candidate = candidate.unaligned_as_ref(); - core::str::from_utf8(candidate).is_ok() - }); -} - -safety_comment! { - // `NonZeroXxx` is `IntoBytes`, but not `FromZeros` or `FromBytes`. - // - /// SAFETY: - /// - `IntoBytes`: `NonZeroXxx` has the same layout as its associated - /// primitive. Since it is the same size, this guarantees it has no - /// padding - integers have no padding, and there's no room for padding - /// if it can represent all of the same values except 0. - /// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that - /// `Option` and `Option` both have size 1. [1] [2] - /// This is worded in a way that makes it unclear whether it's meant as a - /// guarantee, but given the purpose of those types, it's virtually - /// unthinkable that that would ever change. `Option` cannot be smaller - /// than its contained type, which implies that, and `NonZeroX8` are of - /// size 1 or 0. `NonZeroX8` can represent multiple states, so they cannot - /// be 0 bytes, which means that they must be 1 byte. The only valid - /// alignment for a 1-byte type is 1. - /// - /// TODO(#429): - /// - Add quotes from documentation. - /// - Add safety comment for `Immutable`. How can we prove that `NonZeroXxx` - /// doesn't contain any `UnsafeCell`s? It's obviously true, but it's not - /// clear how we'd prove it short of adding text to the stdlib docs that - /// says so explicitly, which likely wouldn't be accepted. - /// - /// [1] https://doc.rust-lang.org/stable/std/num/struct.NonZeroU8.html - /// [2] https://doc.rust-lang.org/stable/std/num/struct.NonZeroI8.html - /// TODO(https://github.com/rust-lang/rust/pull/104082): Cite documentation - /// that layout is the same as primitive layout. - unsafe_impl!(NonZeroU8: Immutable, IntoBytes, Unaligned); - unsafe_impl!(NonZeroI8: Immutable, IntoBytes, Unaligned); - assert_unaligned!(NonZeroU8, NonZeroI8); - unsafe_impl!(NonZeroU16: Immutable, IntoBytes); - unsafe_impl!(NonZeroI16: Immutable, IntoBytes); - unsafe_impl!(NonZeroU32: Immutable, IntoBytes); - unsafe_impl!(NonZeroI32: Immutable, IntoBytes); - unsafe_impl!(NonZeroU64: Immutable, IntoBytes); - unsafe_impl!(NonZeroI64: Immutable, IntoBytes); - unsafe_impl!(NonZeroU128: Immutable, IntoBytes); - unsafe_impl!(NonZeroI128: Immutable, IntoBytes); - unsafe_impl!(NonZeroUsize: Immutable, IntoBytes); - unsafe_impl!(NonZeroIsize: Immutable, IntoBytes); - /// SAFETY: - /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid` - /// closure: - /// - Given `t: *mut NonZeroXxx` and `let r = *mut xxx`, `r` refers to an - /// object of the same size as that referred to by `t`. This is true - /// because `NonZeroXxx` and `xxx` have the same size. [1] Neither `r` - /// nor `t` refer to any `UnsafeCell`s because neither `NonZeroXxx` [2] - /// nor `xxx` do. - /// - Since the closure takes a `&xxx` argument, given a `Maybe<'a, - /// NonZeroXxx>` which satisfies the preconditions of - /// `TryFromBytes::::is_bit_valid`, it must be guaranteed - /// that the memory referenced by that `MabyeValid` always contains a - /// valid `xxx`. Since `NonZeroXxx`'s bytes are always initialized [1], - /// `is_bit_valid`'s precondition requires that the same is true of its - /// argument. Since `xxx`'s only bit validity invariant is that its - /// bytes must be initialized, this memory is guaranteed to contain a - /// valid `xxx`. - /// - The impl must only return `true` for its argument if the original - /// `Maybe` refers to a valid `NonZeroXxx`. The only - /// `xxx` which is not also a valid `NonZeroXxx` is 0. [1] - /// - /// [1] Per https://doc.rust-lang.org/core/num/struct.NonZeroU16.html: - /// - /// `NonZeroU16` is guaranteed to have the same layout and bit validity as - /// `u16` with the exception that `0` is not a valid instance. - /// - /// [2] TODO(#896): Write a safety proof for this before the next stable - /// release. - unsafe_impl!(NonZeroU8: TryFromBytes; |n: MaybeAligned| NonZeroU8::new(n.read_unaligned()).is_some()); - unsafe_impl!(NonZeroI8: TryFromBytes; |n: MaybeAligned| NonZeroI8::new(n.read_unaligned()).is_some()); - unsafe_impl!(NonZeroU16: TryFromBytes; |n: MaybeAligned| NonZeroU16::new(n.read_unaligned()).is_some()); - unsafe_impl!(NonZeroI16: TryFromBytes; |n: MaybeAligned| NonZeroI16::new(n.read_unaligned()).is_some()); - unsafe_impl!(NonZeroU32: TryFromBytes; |n: MaybeAligned| NonZeroU32::new(n.read_unaligned()).is_some()); - unsafe_impl!(NonZeroI32: TryFromBytes; |n: MaybeAligned| NonZeroI32::new(n.read_unaligned()).is_some()); - unsafe_impl!(NonZeroU64: TryFromBytes; |n: MaybeAligned| NonZeroU64::new(n.read_unaligned()).is_some()); - unsafe_impl!(NonZeroI64: TryFromBytes; |n: MaybeAligned| NonZeroI64::new(n.read_unaligned()).is_some()); - unsafe_impl!(NonZeroU128: TryFromBytes; |n: MaybeAligned| NonZeroU128::new(n.read_unaligned()).is_some()); - unsafe_impl!(NonZeroI128: TryFromBytes; |n: MaybeAligned| NonZeroI128::new(n.read_unaligned()).is_some()); - unsafe_impl!(NonZeroUsize: TryFromBytes; |n: MaybeAligned| NonZeroUsize::new(n.read_unaligned()).is_some()); - unsafe_impl!(NonZeroIsize: TryFromBytes; |n: MaybeAligned| NonZeroIsize::new(n.read_unaligned()).is_some()); -} -safety_comment! { - /// SAFETY: - /// - `TryFromBytes` (with no validator), `FromZeros`, `FromBytes`, - /// `IntoBytes`: The Rust compiler reuses `0` value to represent `None`, - /// so `size_of::>() == size_of::()`; see - /// `NonZeroXxx` documentation. - /// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that - /// `Option` and `Option` both have size 1. [1] [2] - /// This is worded in a way that makes it unclear whether it's meant as a - /// guarantee, but given the purpose of those types, it's virtually - /// unthinkable that that would ever change. The only valid alignment for - /// a 1-byte type is 1. - /// - /// TODO(#429): Add quotes from documentation. - /// - /// [1] https://doc.rust-lang.org/stable/std/num/struct.NonZeroU8.html - /// [2] https://doc.rust-lang.org/stable/std/num/struct.NonZeroI8.html - /// - /// TODO(https://github.com/rust-lang/rust/pull/104082): Cite documentation - /// for layout guarantees. - unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); - unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); - assert_unaligned!(Option, Option); - unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); - unsafe_impl!(Option: TryFromBytes, FromZeros, FromBytes, IntoBytes); -} - -safety_comment! { - /// SAFETY: - /// While it's not fully documented, the consensus is that `Box` does not - /// contain any `UnsafeCell`s for `T: Sized` [1]. - /// - /// [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/492 - /// - /// TODO(#896): Write a more complete safety proof before the next stable - /// release. - #[cfg(feature = "alloc")] - unsafe_impl!( - #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] - T: Sized => Immutable for Box - ); -} - -safety_comment! { - /// SAFETY: - /// The following types can be transmuted from `[0u8; size_of::()]`. [1] - /// - /// [1] Per https://doc.rust-lang.org/nightly/core/option/index.html#representation: - /// - /// Rust guarantees to optimize the following types `T` such that - /// [`Option`] has the same size and alignment as `T`. In some of these - /// cases, Rust further guarantees that `transmute::<_, Option>([0u8; - /// size_of::()])` is sound and produces `Option::::None`. These - /// cases are identified by the second column: - /// - /// | `T` | `transmute::<_, Option>([0u8; size_of::()])` sound? | - /// |-----------------------|-----------------------------------------------------------| - /// | [`Box`] | when `U: Sized` | - /// | `&U` | when `U: Sized` | - /// | `&mut U` | when `U: Sized` | - /// | [`ptr::NonNull`] | when `U: Sized` | - /// | `fn`, `extern "C" fn` | always | - /// - /// TODO(#429), TODO(https://github.com/rust-lang/rust/pull/115333): Cite - /// the Stable docs once they're available. - #[cfg(feature = "alloc")] - unsafe_impl!( - #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] - T => TryFromBytes for Option>; - |c: Maybe>>| pointer::is_zeroed(c) - ); - #[cfg(feature = "alloc")] - unsafe_impl!( - #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] - T => FromZeros for Option> - ); - unsafe_impl!( - T => TryFromBytes for Option<&'_ T>; - |c: Maybe>| pointer::is_zeroed(c) - ); - unsafe_impl!(T => FromZeros for Option<&'_ T>); - unsafe_impl!( - T => TryFromBytes for Option<&'_ mut T>; - |c: Maybe>| pointer::is_zeroed(c) - ); - unsafe_impl!(T => FromZeros for Option<&'_ mut T>); - unsafe_impl!( - T => TryFromBytes for Option>; - |c: Maybe>>| pointer::is_zeroed(c) - ); - unsafe_impl!(T => FromZeros for Option>); - unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeros for opt_fn!(...)); - unsafe_impl_for_power_set!( - A, B, C, D, E, F, G, H, I, J, K, L -> M => TryFromBytes for opt_fn!(...); - |c: Maybe| pointer::is_zeroed(c) - ); - unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeros for opt_extern_c_fn!(...)); - unsafe_impl_for_power_set!( - A, B, C, D, E, F, G, H, I, J, K, L -> M => TryFromBytes for opt_extern_c_fn!(...); - |c: Maybe| pointer::is_zeroed(c) - ); -} - -safety_comment! { - /// SAFETY: - /// TODO(#896): Write this safety proof before the next stable release. - unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => Immutable for opt_fn!(...)); - unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => Immutable for opt_extern_c_fn!(...)); -} - -macro_rules! impl_traits_for_atomics { - ($($atomics:ident [$inners:ident]),* $(,)?) => { - $( - impl_for_transparent_wrapper!(TryFromBytes for $atomics [UnsafeCell<$inners>]); - impl_for_transparent_wrapper!(FromZeros for $atomics [UnsafeCell<$inners>]); - impl_for_transparent_wrapper!(FromBytes for $atomics [UnsafeCell<$inners>]); - impl_for_transparent_wrapper!(IntoBytes for $atomics [UnsafeCell<$inners>]); - )* - }; -} - -#[rustfmt::skip] -impl_traits_for_atomics!( - AtomicBool [bool], - AtomicI16 [i16], AtomicI32 [i32], AtomicI8 [i8], AtomicIsize [isize], - AtomicU16 [u16], AtomicU32 [u32], AtomicU8 [u8], AtomicUsize [usize], -); - -safety_comment! { - /// SAFETY: - /// Per [1], `AtomicBool`, `AtomicU8`, and `AtomicI8` have the same size as - /// `bool`, `u8`, and `i8` respectively. Since a type's alignment cannot be - /// smaller than 1 [2], and since its alignment cannot be greater than its - /// size [3], the only possible value for the alignment is 1. Thus, it is - /// sound to implement `Unaligned`. - /// - /// [1] TODO(#896), TODO(https://github.com/rust-lang/rust/pull/121943): - /// Cite docs once they've landed. - /// - /// [2] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment: - /// - /// Alignment is measured in bytes, and must be at least 1. - /// - /// [3] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment: - /// - /// The size of a value is always a multiple of its alignment. - unsafe_impl!(AtomicBool: Unaligned); - unsafe_impl!(AtomicU8: Unaligned); - unsafe_impl!(AtomicI8: Unaligned); - assert_unaligned!(AtomicBool, AtomicU8, AtomicI8); -} - -safety_comment! { - /// SAFETY: - /// Per reference [1]: - /// "For all T, the following are guaranteed: - /// size_of::>() == 0 - /// align_of::>() == 1". - /// This gives: - /// - `Immutable`: `PhantomData` has no fields. - /// - `TryFromBytes` (with no validator), `FromZeros`, `FromBytes`: There is - /// only one possible sequence of 0 bytes, and `PhantomData` is inhabited. - /// - `IntoBytes`: Since `PhantomData` has size 0, it contains no padding - /// bytes. - /// - `Unaligned`: Per the preceding reference, `PhantomData` has alignment - /// 1. - /// - /// [1] https://doc.rust-lang.org/std/marker/struct.PhantomData.html#layout-1 - unsafe_impl!(T: ?Sized => Immutable for PhantomData); - unsafe_impl!(T: ?Sized => TryFromBytes for PhantomData); - unsafe_impl!(T: ?Sized => FromZeros for PhantomData); - unsafe_impl!(T: ?Sized => FromBytes for PhantomData); - unsafe_impl!(T: ?Sized => IntoBytes for PhantomData); - unsafe_impl!(T: ?Sized => Unaligned for PhantomData); - assert_unaligned!(PhantomData<()>, PhantomData, PhantomData); -} - -impl_for_transparent_wrapper!(T: Immutable => Immutable for Wrapping); -impl_for_transparent_wrapper!(T: TryFromBytes => TryFromBytes for Wrapping); -impl_for_transparent_wrapper!(T: FromZeros => FromZeros for Wrapping); -impl_for_transparent_wrapper!(T: FromBytes => FromBytes for Wrapping); -impl_for_transparent_wrapper!(T: IntoBytes => IntoBytes for Wrapping); -impl_for_transparent_wrapper!(T: Unaligned => Unaligned for Wrapping); -assert_unaligned!(Wrapping<()>, Wrapping); - -safety_comment! { - /// SAFETY: - /// `TryFromBytes` (with no validator), `FromZeros`, `FromBytes`: - /// `MaybeUninit` has no restrictions on its contents. - unsafe_impl!(T => TryFromBytes for MaybeUninit); - unsafe_impl!(T => FromZeros for MaybeUninit); - unsafe_impl!(T => FromBytes for MaybeUninit); -} - -impl_for_transparent_wrapper!(T: Immutable => Immutable for MaybeUninit); -impl_for_transparent_wrapper!(T: Unaligned => Unaligned for MaybeUninit); -assert_unaligned!(MaybeUninit<()>, MaybeUninit); - -impl_for_transparent_wrapper!(T: ?Sized + Immutable => Immutable for ManuallyDrop); -impl_for_transparent_wrapper!(T: ?Sized + TryFromBytes => TryFromBytes for ManuallyDrop); -impl_for_transparent_wrapper!(T: ?Sized + FromZeros => FromZeros for ManuallyDrop); -impl_for_transparent_wrapper!(T: ?Sized + FromBytes => FromBytes for ManuallyDrop); -impl_for_transparent_wrapper!(T: ?Sized + IntoBytes => IntoBytes for ManuallyDrop); -impl_for_transparent_wrapper!(T: ?Sized + Unaligned => Unaligned for ManuallyDrop); -assert_unaligned!(ManuallyDrop<()>, ManuallyDrop); - -// TODO(#5): Implement `FromZeros` and `FromBytes` when `T: ?Sized`. -impl_for_transparent_wrapper!(T: FromZeros => FromZeros for UnsafeCell); -impl_for_transparent_wrapper!(T: FromBytes => FromBytes for UnsafeCell); -impl_for_transparent_wrapper!(T: ?Sized + IntoBytes => IntoBytes for UnsafeCell); -impl_for_transparent_wrapper!(T: ?Sized + Unaligned => Unaligned for UnsafeCell); -assert_unaligned!(UnsafeCell<()>, UnsafeCell); - -// SAFETY: See safety comment in `is_bit_valid` impl. -// -// TODO(#5): Try to add `T: ?Sized` bound. -unsafe impl TryFromBytes for UnsafeCell { - #[allow(clippy::missing_inline_in_public_items)] - fn only_derive_is_allowed_to_implement_this_trait() - where - Self: Sized, - { - } - - #[inline] - fn is_bit_valid>( - candidate: Maybe<'_, Self, A>, - ) -> bool { - // The only way to implement this function is using an exclusive-aliased - // pointer. `UnsafeCell`s cannot be read via shared-aliased pointers - // (other than by using `unsafe` code, which we can't use since we can't - // guarantee how our users are accessing or modifying the `UnsafeCell`). - // - // `is_bit_valid` is documented as panicking or failing to monomorphize - // if called with a shared-aliased pointer on a type containing an - // `UnsafeCell`. In practice, it will always be a monorphization error. - // Since `is_bit_valid` is `#[doc(hidden)]` and only called directly - // from this crate, we only need to worry about our own code incorrectly - // calling `UnsafeCell::is_bit_valid`. The post-monomorphization error - // makes it easier to test that this is truly the case, and also means - // that if we make a mistake, it will cause downstream code to fail to - // compile, which will immediately surface the mistake and give us a - // chance to fix it quickly. - let c = candidate.into_exclusive_or_post_monomorphization_error(); - - // We wrap in `Unalign` here so that we can get a vanilla Rust reference - // below, which in turn allows us to call `UnsafeCell::get_mut`. - // - // SAFETY: - // - `.cast` preserves address. `Unalign` and `MaybeUninit` both have - // the same size as the types they wrap [1]. Thus, this cast will - // preserve the size of the pointer. As a result, the cast will - // address the same bytes as `c`. - // - `.cast` preserves provenance. - // - Since both the source and destination types are wrapped in - // `UnsafeCell`, all bytes of both types are inside of `UnsafeCell`s, - // and so the byte ranges covered by `UnsafeCell`s are identical in - // both types. Since the pointers refer to the same byte ranges, - // the same is true of the pointers' referents as well. - // - // [1] Per https://doc.rust-lang.org/stable/core/mem/union.MaybeUninit.html#layout-1: - // - // MaybeUninit is guaranteed to have the same size, alignment, and - // ABI as T. - let c = unsafe { - c.cast_unsized(|c: *mut UnsafeCell| c.cast::>>>()) - }; - // SAFETY: `MaybeUninit` has no validity requirements. - let c = unsafe { c.assume_valid() }; - let c = c.bikeshed_recall_aligned(); - // This is the crucial step at which we use `UnsafeCell::get_mut` to go - // from `UnsafeCell` to `U` (where `U = Unalign>`). - // Now that we've gotten rid of the `UnsafeCell`, we can delegate to - // `T::is_bit_valid`. - let c: &mut Unalign> = c.as_mut().get_mut(); - // This converts from an aligned `Unalign>` pointer to an - // unaligned `MaybeUninit` pointer. - let c: Ptr<'_, MaybeUninit, _> = Ptr::from_mut(c).transparent_wrapper_into_inner(); - let c: Ptr<'_, T, _> = c.transparent_wrapper_into_inner(); - - // SAFETY: The original `candidate` argument has `Initialized` validity. - // None of the subsequent operations modify the memory itself, and so - // that guarantee is still upheld. - let c = unsafe { c.assume_initialized() }; - // Confirm that `Maybe` is a type alias for `Ptr` with the validity - // invariant `Initialized`. Our safety proof depends upon this - // invariant, and it might change at some point. If that happens, we - // want this function to stop compiling. - let _: Ptr<'_, UnsafeCell, (_, _, invariant::Initialized)> = candidate; - - // SAFETY: Since `UnsafeCell` and `T` have the same layout and bit - // validity, `UnsafeCell` is bit-valid exactly when its wrapped `T` - // is. Thus, this is a sound implementation of - // `UnsafeCell::is_bit_valid`. - T::is_bit_valid(c.forget_exclusive()) - } -} - -safety_comment! { - /// SAFETY: - /// Per the reference [1]: - /// - /// An array of `[T; N]` has a size of `size_of::() * N` and the same - /// alignment of `T`. Arrays are laid out so that the zero-based `nth` - /// element of the array is offset from the start of the array by `n * - /// size_of::()` bytes. - /// - /// ... - /// - /// Slices have the same layout as the section of the array they slice. - /// - /// In other words, the layout of a `[T]` or `[T; N]` is a sequence of `T`s - /// laid out back-to-back with no bytes in between. Therefore, `[T]` or `[T; - /// N]` are `Immutable`, `TryFromBytes`, `FromZeros`, `FromBytes`, and - /// `IntoBytes` if `T` is (respectively). Furthermore, since an array/slice - /// has "the same alignment of `T`", `[T]` and `[T; N]` are `Unaligned` if - /// `T` is. - /// - /// Note that we don't `assert_unaligned!` for slice types because - /// `assert_unaligned!` uses `align_of`, which only works for `Sized` types. - /// - /// [1] https://doc.rust-lang.org/reference/type-layout.html#array-layout - unsafe_impl!(const N: usize, T: Immutable => Immutable for [T; N]); - unsafe_impl!(const N: usize, T: TryFromBytes => TryFromBytes for [T; N]; |c: Maybe<[T; N]>| { - // Note that this call may panic, but it would still be sound even if it - // did. `is_bit_valid` does not promise that it will not panic (in fact, - // it explicitly warns that it's a possibility), and we have not - // violated any safety invariants that we must fix before returning. - <[T] as TryFromBytes>::is_bit_valid(c.as_slice()) - }); - unsafe_impl!(const N: usize, T: FromZeros => FromZeros for [T; N]); - unsafe_impl!(const N: usize, T: FromBytes => FromBytes for [T; N]); - unsafe_impl!(const N: usize, T: IntoBytes => IntoBytes for [T; N]); - unsafe_impl!(const N: usize, T: Unaligned => Unaligned for [T; N]); - assert_unaligned!([(); 0], [(); 1], [u8; 0], [u8; 1]); - unsafe_impl!(T: Immutable => Immutable for [T]); - unsafe_impl!(T: TryFromBytes => TryFromBytes for [T]; |c: Maybe<[T]>| { - // SAFETY: Per the reference [1]: - // - // An array of `[T; N]` has a size of `size_of::() * N` and the - // same alignment of `T`. Arrays are laid out so that the zero-based - // `nth` element of the array is offset from the start of the array by - // `n * size_of::()` bytes. - // - // ... - // - // Slices have the same layout as the section of the array they slice. - // - // In other words, the layout of a `[T] is a sequence of `T`s laid out - // back-to-back with no bytes in between. If all elements in `candidate` - // are `is_bit_valid`, so too is `candidate`. - // - // Note that any of the below calls may panic, but it would still be - // sound even if it did. `is_bit_valid` does not promise that it will - // not panic (in fact, it explicitly warns that it's a possibility), and - // we have not violated any safety invariants that we must fix before - // returning. - c.iter().all(::is_bit_valid) - }); - unsafe_impl!(T: FromZeros => FromZeros for [T]); - unsafe_impl!(T: FromBytes => FromBytes for [T]); - unsafe_impl!(T: IntoBytes => IntoBytes for [T]); - unsafe_impl!(T: Unaligned => Unaligned for [T]); -} -safety_comment! { - /// SAFETY: - /// - `Immutable`: Raw pointers do not contain any `UnsafeCell`s. - /// - `FromZeros`: For thin pointers (note that `T: Sized`), the zero - /// pointer is considered "null". [1] No operations which require - /// provenance are legal on null pointers, so this is not a footgun. - /// - `TryFromBytes`: By the same reasoning as for `FromZeroes`, we can - /// implement `TryFromBytes` for thin pointers provided that - /// [`TryFromByte::is_bit_valid`] only produces `true` for zeroed bytes. - /// - /// NOTE(#170): Implementing `FromBytes` and `IntoBytes` for raw pointers - /// would be sound, but carries provenance footguns. We want to support - /// `FromBytes` and `IntoBytes` for raw pointers eventually, but we are - /// holding off until we can figure out how to address those footguns. - /// - /// [1] TODO(https://github.com/rust-lang/rust/pull/116988): Cite the - /// documentation once this PR lands. - unsafe_impl!(T: ?Sized => Immutable for *const T); - unsafe_impl!(T: ?Sized => Immutable for *mut T); - unsafe_impl!(T => TryFromBytes for *const T; |c: Maybe<*const T>| { - pointer::is_zeroed(c) - }); - unsafe_impl!(T => FromZeros for *const T); - unsafe_impl!(T => TryFromBytes for *mut T; |c: Maybe<*const T>| { - pointer::is_zeroed(c) - }); - unsafe_impl!(T => FromZeros for *mut T); -} - -safety_comment! { - /// SAFETY: - /// - /// TODO(#896): Write this safety proof before the next stable release. - unsafe_impl!(T: ?Sized => Immutable for NonNull); -} - -safety_comment! { - /// SAFETY: - /// Reference types do not contain any `UnsafeCell`s. - unsafe_impl!(T: ?Sized => Immutable for &'_ T); - unsafe_impl!(T: ?Sized => Immutable for &'_ mut T); -} - -safety_comment! { - /// SAFETY: - /// `Option` is not `#[non_exhaustive]` [1], which means that the types in - /// its variants cannot change, and no new variants can be added. - /// `Option` does not contain any `UnsafeCell`s outside of `T`. [1] - /// - /// [1] https://doc.rust-lang.org/core/option/enum.Option.html - unsafe_impl!(T: Immutable => Immutable for Option); -} - -// SIMD support -// -// Per the Unsafe Code Guidelines Reference [1]: -// -// Packed SIMD vector types are `repr(simd)` homogeneous tuple-structs -// containing `N` elements of type `T` where `N` is a power-of-two and the -// size and alignment requirements of `T` are equal: -// -// ```rust -// #[repr(simd)] -// struct Vector(T_0, ..., T_(N - 1)); -// ``` -// -// ... -// -// The size of `Vector` is `N * size_of::()` and its alignment is an -// implementation-defined function of `T` and `N` greater than or equal to -// `align_of::()`. -// -// ... -// -// Vector elements are laid out in source field order, enabling random access -// to vector elements by reinterpreting the vector as an array: -// -// ```rust -// union U { -// vec: Vector, -// arr: [T; N] -// } -// -// assert_eq!(size_of::>(), size_of::<[T; N]>()); -// assert!(align_of::>() >= align_of::<[T; N]>()); -// -// unsafe { -// let u = U { vec: Vector(t_0, ..., t_(N - 1)) }; -// -// assert_eq!(u.vec.0, u.arr[0]); -// // ... -// assert_eq!(u.vec.(N - 1), u.arr[N - 1]); -// } -// ``` -// -// Given this background, we can observe that: -// - The size and bit pattern requirements of a SIMD type are equivalent to the -// equivalent array type. Thus, for any SIMD type whose primitive `T` is -// `Immutable`, `TryFromBytes`, `FromZeros`, `FromBytes`, or `IntoBytes`, that -// SIMD type is also `Immutable`, `TryFromBytes`, `FromZeros`, `FromBytes`, or -// `IntoBytes` respectively. -// - Since no upper bound is placed on the alignment, no SIMD type can be -// guaranteed to be `Unaligned`. -// -// Also per [1]: -// -// This chapter represents the consensus from issue #38. The statements in -// here are not (yet) "guaranteed" not to change until an RFC ratifies them. -// -// See issue #38 [2]. While this behavior is not technically guaranteed, the -// likelihood that the behavior will change such that SIMD types are no longer -// `TryFromBytes`, `FromZeros`, `FromBytes`, or `IntoBytes` is next to zero, as -// that would defeat the entire purpose of SIMD types. Nonetheless, we put this -// behavior behind the `simd` Cargo feature, which requires consumers to opt -// into this stability hazard. -// -// [1] https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html -// [2] https://github.com/rust-lang/unsafe-code-guidelines/issues/38 -#[cfg(feature = "simd")] -#[cfg_attr(doc_cfg, doc(cfg(feature = "simd")))] -mod simd { - /// Defines a module which implements `TryFromBytes`, `FromZeros`, - /// `FromBytes`, and `IntoBytes` for a set of types from a module in - /// `core::arch`. - /// - /// `$arch` is both the name of the defined module and the name of the - /// module in `core::arch`, and `$typ` is the list of items from that module - /// to implement `FromZeros`, `FromBytes`, and `IntoBytes` for. - #[allow(unused_macros)] // `allow(unused_macros)` is needed because some - // target/feature combinations don't emit any impls - // and thus don't use this macro. - macro_rules! simd_arch_mod { - (#[cfg $cfg:tt] $arch:ident, $mod:ident, $($typ:ident),*) => { - #[cfg $cfg] - #[cfg_attr(doc_cfg, doc(cfg $cfg))] - mod $mod { - use core::arch::$arch::{$($typ),*}; - - use crate::*; - impl_known_layout!($($typ),*); - safety_comment! { - /// SAFETY: - /// See comment on module definition for justification. - $( unsafe_impl!($typ: Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes); )* - } - } - }; - } - - #[rustfmt::skip] - const _: () = { - simd_arch_mod!( - #[cfg(target_arch = "x86")] - x86, x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i - ); - simd_arch_mod!( - #[cfg(all(feature = "simd-nightly", target_arch = "x86"))] - x86, x86_nightly, __m512bh, __m512, __m512d, __m512i - ); - simd_arch_mod!( - #[cfg(target_arch = "x86_64")] - x86_64, x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i - ); - simd_arch_mod!( - #[cfg(all(feature = "simd-nightly", target_arch = "x86_64"))] - x86_64, x86_64_nightly, __m512bh, __m512, __m512d, __m512i - ); - simd_arch_mod!( - #[cfg(target_arch = "wasm32")] - wasm32, wasm32, v128 - ); - simd_arch_mod!( - #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))] - powerpc, powerpc, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long - ); - simd_arch_mod!( - #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))] - powerpc64, powerpc64, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long - ); - #[cfg(zerocopy_aarch64_simd)] - simd_arch_mod!( - #[cfg(target_arch = "aarch64")] - aarch64, aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t, - int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t, - int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t, - poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t, - poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t, - uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t, - uint64x1_t, uint64x2_t - ); - simd_arch_mod!( - #[cfg(all(feature = "simd-nightly", target_arch = "arm"))] - arm, arm, int8x4_t, uint8x4_t - ); - }; -} - /// Safely transmutes a value of one type to a value of another type of the same /// size. /// @@ -6736,979 +5855,6 @@ mod tests { assert_impl_all!(Bar: FromZeros, FromBytes, IntoBytes, Unaligned); } - - #[test] - fn test_impls() { - // A type that can supply test cases for testing - // `TryFromBytes::is_bit_valid`. All types passed to `assert_impls!` - // must implement this trait; that macro uses it to generate runtime - // tests for `TryFromBytes` impls. - // - // All `T: FromBytes` types are provided with a blanket impl. Other - // types must implement `TryFromBytesTestable` directly (ie using - // `impl_try_from_bytes_testable!`). - trait TryFromBytesTestable { - fn with_passing_test_cases)>(f: F); - fn with_failing_test_cases(f: F); - } - - impl TryFromBytesTestable for T { - fn with_passing_test_cases)>(f: F) { - // Test with a zeroed value. - f(Self::new_box_zeroed()); - - let ffs = { - let mut t = Self::new_zeroed(); - let ptr: *mut T = &mut t; - // SAFETY: `T: FromBytes` - unsafe { ptr::write_bytes(ptr.cast::(), 0xFF, mem::size_of::()) }; - t - }; - - // Test with a value initialized with 0xFF. - f(Box::new(ffs)); - } - - fn with_failing_test_cases(_f: F) {} - } - - macro_rules! impl_try_from_bytes_testable_for_null_pointer_optimization { - ($($tys:ty),*) => { - $( - impl TryFromBytesTestable for Option<$tys> { - fn with_passing_test_cases)>(f: F) { - // Test with a zeroed value. - f(Box::new(None)); - } - - fn with_failing_test_cases(f: F) { - for pos in 0..mem::size_of::() { - let mut bytes = [0u8; mem::size_of::()]; - bytes[pos] = 0x01; - f(&mut bytes[..]); - } - } - } - )* - }; - } - - // Implements `TryFromBytesTestable`. - macro_rules! impl_try_from_bytes_testable { - // Base case for recursion (when the list of types has run out). - (=> @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {}; - // Implements for type(s) with no type parameters. - ($ty:ty $(,$tys:ty)* => @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => { - impl TryFromBytesTestable for $ty { - impl_try_from_bytes_testable!( - @methods @success $($success_case),* - $(, @failure $($failure_case),*)? - ); - } - impl_try_from_bytes_testable!($($tys),* => @success $($success_case),* $(, @failure $($failure_case),*)?); - }; - // Implements for multiple types with no type parameters. - ($($($ty:ty),* => @success $($success_case:expr), * $(, @failure $($failure_case:expr),*)?;)*) => { - $( - impl_try_from_bytes_testable!($($ty),* => @success $($success_case),* $(, @failure $($failure_case),*)*); - )* - }; - // Implements only the methods; caller must invoke this from inside - // an impl block. - (@methods @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => { - fn with_passing_test_cases)>(_f: F) { - $( - _f(Box::::from($success_case));//.borrow()); - )* - } - - fn with_failing_test_cases(_f: F) { - $($( - // `unused_qualifications` is spuriously triggered on - // `Option::::None`. - #[allow(unused_qualifications)] - let mut case = $failure_case;//.as_mut_bytes(); - _f(case.as_mut_bytes()); - )*)? - } - }; - } - - impl_try_from_bytes_testable_for_null_pointer_optimization!( - Box>, - &'static UnsafeCell, - &'static mut UnsafeCell, - NonNull>, - fn(), - FnManyArgs, - extern "C" fn(), - ECFnManyArgs - ); - - macro_rules! bx { - ($e:expr) => { - Box::new($e) - }; - } - - // Note that these impls are only for types which are not `FromBytes`. - // `FromBytes` types are covered by a preceding blanket impl. - impl_try_from_bytes_testable!( - bool => @success true, false, - @failure 2u8, 3u8, 0xFFu8; - char => @success '\u{0}', '\u{D7FF}', '\u{E000}', '\u{10FFFF}', - @failure 0xD800u32, 0xDFFFu32, 0x110000u32; - str => @success "", "hello", "โค๏ธ๐Ÿงก๐Ÿ’›๐Ÿ’š๐Ÿ’™๐Ÿ’œ", - @failure [0, 159, 146, 150]; - [u8] => @success vec![].into_boxed_slice(), vec![0, 1, 2].into_boxed_slice(); - NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, - NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, - NonZeroUsize, NonZeroIsize - => @success Self::new(1).unwrap(), - // Doing this instead of `0` ensures that we always satisfy - // the size and alignment requirements of `Self` (whereas `0` - // may be any integer type with a different size or alignment - // than some `NonZeroXxx` types). - @failure Option::::None; - [bool; 0] => @success []; - [bool; 1] - => @success [true], [false], - @failure [2u8], [3u8], [0xFFu8]; - [bool] - => @success vec![true, false].into_boxed_slice(), vec![false, true].into_boxed_slice(), - @failure [2u8], [3u8], [0xFFu8], [0u8, 1u8, 2u8]; - Unalign - => @success Unalign::new(false), Unalign::new(true), - @failure 2u8, 0xFFu8; - ManuallyDrop - => @success ManuallyDrop::new(false), ManuallyDrop::new(true), - @failure 2u8, 0xFFu8; - ManuallyDrop<[u8]> - => @success bx!(ManuallyDrop::new([])), bx!(ManuallyDrop::new([0u8])), bx!(ManuallyDrop::new([0u8, 1u8])); - ManuallyDrop<[bool]> - => @success bx!(ManuallyDrop::new([])), bx!(ManuallyDrop::new([false])), bx!(ManuallyDrop::new([false, true])), - @failure [2u8], [3u8], [0xFFu8], [0u8, 1u8, 2u8]; - ManuallyDrop<[UnsafeCell]> - => @success bx!(ManuallyDrop::new([UnsafeCell::new(0)])), bx!(ManuallyDrop::new([UnsafeCell::new(0), UnsafeCell::new(1)])); - ManuallyDrop<[UnsafeCell]> - => @success bx!(ManuallyDrop::new([UnsafeCell::new(false)])), bx!(ManuallyDrop::new([UnsafeCell::new(false), UnsafeCell::new(true)])), - @failure [2u8], [3u8], [0xFFu8], [0u8, 1u8, 2u8]; - Wrapping - => @success Wrapping(false), Wrapping(true), - @failure 2u8, 0xFFu8; - *const NotZerocopy - => @success ptr::null::(), - @failure [0x01; mem::size_of::<*const NotZerocopy>()]; - *mut NotZerocopy - => @success ptr::null_mut::(), - @failure [0x01; mem::size_of::<*mut NotZerocopy>()]; - ); - - // Use the trick described in [1] to allow us to call methods - // conditional on certain trait bounds. - // - // In all of these cases, methods return `Option`, where `R` is the - // return type of the method we're conditionally calling. The "real" - // implementations (the ones defined in traits using `&self`) return - // `Some`, and the default implementations (the ones defined as inherent - // methods using `&mut self`) return `None`. - // - // [1] https://github.com/dtolnay/case-studies/blob/master/autoref-specialization/README.md - mod autoref_trick { - use super::*; - - pub(super) struct AutorefWrapper(pub(super) PhantomData); - - pub(super) trait TestIsBitValidShared { - #[allow(clippy::needless_lifetimes)] - fn test_is_bit_valid_shared< - 'ptr, - A: invariant::Aliasing + invariant::AtLeast, - >( - &self, - candidate: Maybe<'ptr, T, A>, - ) -> Option; - } - - impl TestIsBitValidShared for AutorefWrapper { - #[allow(clippy::needless_lifetimes)] - fn test_is_bit_valid_shared< - 'ptr, - A: invariant::Aliasing + invariant::AtLeast, - >( - &self, - candidate: Maybe<'ptr, T, A>, - ) -> Option { - Some(T::is_bit_valid(candidate)) - } - } - - pub(super) trait TestTryFromRef { - #[allow(clippy::needless_lifetimes)] - fn test_try_from_ref<'bytes>( - &self, - bytes: &'bytes [u8], - ) -> Option>; - - #[allow(clippy::needless_lifetimes)] - fn test_try_from_mut<'bytes>( - &self, - bytes: &'bytes mut [u8], - ) -> Option>; - } - - impl TestTryFromRef for AutorefWrapper { - #[allow(clippy::needless_lifetimes)] - fn test_try_from_ref<'bytes>( - &self, - bytes: &'bytes [u8], - ) -> Option> { - Some(T::try_ref_from(bytes).ok()) - } - - #[allow(clippy::needless_lifetimes)] - fn test_try_from_mut<'bytes>( - &self, - bytes: &'bytes mut [u8], - ) -> Option> { - Some(T::try_mut_from(bytes).ok()) - } - } - - pub(super) trait TestTryReadFrom { - fn test_try_read_from(&self, bytes: &[u8]) -> Option>; - } - - impl TestTryReadFrom for AutorefWrapper { - fn test_try_read_from(&self, bytes: &[u8]) -> Option> { - Some(T::try_read_from(bytes).ok()) - } - } - - pub(super) trait TestAsBytes { - #[allow(clippy::needless_lifetimes)] - fn test_as_bytes<'slf, 't>(&'slf self, t: &'t T) -> Option<&'t [u8]>; - } - - impl TestAsBytes for AutorefWrapper { - #[allow(clippy::needless_lifetimes)] - fn test_as_bytes<'slf, 't>(&'slf self, t: &'t T) -> Option<&'t [u8]> { - Some(t.as_bytes()) - } - } - } - - use autoref_trick::*; - - // Asserts that `$ty` is one of a list of types which are allowed to not - // provide a "real" implementation for `$fn_name`. Since the - // `autoref_trick` machinery fails silently, this allows us to ensure - // that the "default" impls are only being used for types which we - // expect. - // - // Note that, since this is a runtime test, it is possible to have an - // allowlist which is too restrictive if the function in question is - // never called for a particular type. For example, if `as_bytes` is not - // supported for a particular type, and so `test_as_bytes` returns - // `None`, methods such as `test_try_from_ref` may never be called for - // that type. As a result, it's possible that, for example, adding - // `as_bytes` support for a type would cause other allowlist assertions - // to fail. This means that allowlist assertion failures should not - // automatically be taken as a sign of a bug. - macro_rules! assert_on_allowlist { - ($fn_name:ident($ty:ty) $(: $($tys:ty),*)?) => {{ - use core::any::TypeId; - - let allowlist: &[TypeId] = &[ $($(TypeId::of::<$tys>()),*)? ]; - let allowlist_names: &[&str] = &[ $($(stringify!($tys)),*)? ]; - - let id = TypeId::of::<$ty>(); - assert!(allowlist.contains(&id), "{} is not on allowlist for {}: {:?}", stringify!($ty), stringify!($fn_name), allowlist_names); - }}; - } - - // Asserts that `$ty` implements any `$trait` and doesn't implement any - // `!$trait`. Note that all `$trait`s must come before any `!$trait`s. - // - // For `T: TryFromBytes`, uses `TryFromBytesTestable` to test success - // and failure cases. - macro_rules! assert_impls { - ($ty:ty: TryFromBytes) => { - // "Default" implementations that match the "real" - // implementations defined in the `autoref_trick` module above. - #[allow(unused, non_local_definitions)] - impl AutorefWrapper<$ty> { - #[allow(clippy::needless_lifetimes)] - fn test_is_bit_valid_shared<'ptr, A: invariant::Aliasing + invariant::AtLeast>( - &mut self, - candidate: Maybe<'ptr, $ty, A>, - ) -> Option { - assert_on_allowlist!( - test_is_bit_valid_shared($ty): - ManuallyDrop>, - ManuallyDrop<[UnsafeCell]>, - ManuallyDrop<[UnsafeCell]>, - MaybeUninit, - MaybeUninit>, - Wrapping> - ); - - None - } - - #[allow(clippy::needless_lifetimes)] - fn test_try_from_ref<'bytes>(&mut self, _bytes: &'bytes [u8]) -> Option> { - assert_on_allowlist!( - test_try_from_ref($ty): - ManuallyDrop<[UnsafeCell]> - ); - - None - } - - #[allow(clippy::needless_lifetimes)] - fn test_try_from_mut<'bytes>(&mut self, _bytes: &'bytes mut [u8]) -> Option> { - assert_on_allowlist!( - test_try_from_mut($ty): - ManuallyDrop<[UnsafeCell]> - ); - - None - } - - fn test_try_read_from(&mut self, _bytes: &[u8]) -> Option> { - assert_on_allowlist!( - test_try_read_from($ty): - str, - ManuallyDrop<[u8]>, - ManuallyDrop<[bool]>, - ManuallyDrop<[UnsafeCell]>, - [u8], - [bool] - ); - - None - } - - fn test_as_bytes(&mut self, _t: &$ty) -> Option<&[u8]> { - assert_on_allowlist!( - test_as_bytes($ty): - Option<&'static UnsafeCell>, - Option<&'static mut UnsafeCell>, - Option>>, - Option>>, - Option, - Option, - Option, - Option, - MaybeUninit, - MaybeUninit, - MaybeUninit>, - ManuallyDrop>, - ManuallyDrop<[UnsafeCell]>, - ManuallyDrop<[UnsafeCell]>, - Wrapping>, - *const NotZerocopy, - *mut NotZerocopy - ); - - None - } - } - - <$ty as TryFromBytesTestable>::with_passing_test_cases(|mut val| { - // TODO(#494): These tests only get exercised for types - // which are `IntoBytes`. Once we implement #494, we should - // be able to support non-`IntoBytes` types by zeroing - // padding. - - // We define `w` and `ww` since, in the case of the inherent - // methods, Rust thinks they're both borrowed mutably at the - // same time (given how we use them below). If we just - // defined a single `w` and used it for multiple operations, - // this would conflict. - // - // We `#[allow(unused_mut]` for the cases where the "real" - // impls are used, which take `&self`. - #[allow(unused_mut)] - let (mut w, mut ww) = (AutorefWrapper::<$ty>(PhantomData), AutorefWrapper::<$ty>(PhantomData)); - - let c = Ptr::from_ref(&*val); - let c = c.forget_aligned(); - // SAFETY: TODO(#899): This is unsound. `$ty` is not - // necessarily `IntoBytes`, but that's the corner we've - // backed ourselves into by using `Ptr::from_ref`. - let c = unsafe { c.assume_initialized() }; - let res = w.test_is_bit_valid_shared(c); - if let Some(res) = res { - assert!(res, "{}::is_bit_valid({:?}) (shared `Ptr`): got false, expected true", stringify!($ty), val); - } - - let c = Ptr::from_mut(&mut *val); - let c = c.forget_aligned(); - // SAFETY: TODO(#899): This is unsound. `$ty` is not - // necessarily `IntoBytes`, but that's the corner we've - // backed ourselves into by using `Ptr::from_ref`. - let c = unsafe { c.assume_initialized() }; - let res = <$ty as TryFromBytes>::is_bit_valid(c); - assert!(res, "{}::is_bit_valid({:?}) (exclusive `Ptr`): got false, expected true", stringify!($ty), val); - - // `bytes` is `Some(val.as_bytes())` if `$ty: IntoBytes + - // Immutable` and `None` otherwise. - let bytes = w.test_as_bytes(&*val); - - // The inner closure returns - // `Some($ty::try_ref_from(bytes))` if `$ty: Immutable` and - // `None` otherwise. - let res = bytes.and_then(|bytes| ww.test_try_from_ref(bytes)); - if let Some(res) = res { - assert!(res.is_some(), "{}::try_ref_from({:?}): got `None`, expected `Some`", stringify!($ty), val); - } - - if let Some(bytes) = bytes { - // We need to get a mutable byte slice, and so we clone - // into a `Vec`. However, we also need these bytes to - // satisfy `$ty`'s alignment requirement, which isn't - // guaranteed for `Vec`. In order to get around - // this, we create a `Vec` which is twice as long as we - // need. There is guaranteed to be an aligned byte range - // of size `size_of_val(val)` within that range. - let val = &*val; - let size = mem::size_of_val(val); - let align = mem::align_of_val(val); - - let mut vec = bytes.to_vec(); - vec.extend(bytes); - let slc = vec.as_slice(); - let offset = slc.as_ptr().align_offset(align); - let bytes_mut = &mut vec.as_mut_slice()[offset..offset+size]; - bytes_mut.copy_from_slice(bytes); - - let res = ww.test_try_from_mut(bytes_mut); - if let Some(res) = res { - assert!(res.is_some(), "{}::try_mut_from({:?}): got `None`, expected `Some`", stringify!($ty), val); - } - } - - let res = bytes.and_then(|bytes| ww.test_try_read_from(bytes)); - if let Some(res) = res { - assert!(res.is_some(), "{}::try_read_from({:?}): got `None`, expected `Some`", stringify!($ty), val); - } - }); - #[allow(clippy::as_conversions)] - <$ty as TryFromBytesTestable>::with_failing_test_cases(|c| { - #[allow(unused_mut)] // For cases where the "real" impls are used, which take `&self`. - let mut w = AutorefWrapper::<$ty>(PhantomData); - - // This is `Some($ty::try_ref_from(c))` if `$ty: Immutable` and - // `None` otherwise. - let res = w.test_try_from_ref(c); - if let Some(res) = res { - assert!(res.is_none(), "{}::try_ref_from({:?}): got Some, expected None", stringify!($ty), c); - } - - let res = w.test_try_from_mut(c); - if let Some(res) = res { - assert!(res.is_none(), "{}::try_mut_from({:?}): got Some, expected None", stringify!($ty), c); - } - - let res = w.test_try_read_from(c); - if let Some(res) = res { - assert!(res.is_none(), "{}::try_read_from({:?}): got Some, expected None", stringify!($ty), c); - } - }); - - #[allow(dead_code)] - const _: () = { static_assertions::assert_impl_all!($ty: TryFromBytes); }; - }; - ($ty:ty: $trait:ident) => { - #[allow(dead_code)] - const _: () = { static_assertions::assert_impl_all!($ty: $trait); }; - }; - ($ty:ty: !$trait:ident) => { - #[allow(dead_code)] - const _: () = { static_assertions::assert_not_impl_any!($ty: $trait); }; - }; - ($ty:ty: $($trait:ident),* $(,)? $(!$negative_trait:ident),*) => { - $( - assert_impls!($ty: $trait); - )* - - $( - assert_impls!($ty: !$negative_trait); - )* - }; - } - - // NOTE: The negative impl assertions here are not necessarily - // prescriptive. They merely serve as change detectors to make sure - // we're aware of what trait impls are getting added with a given - // change. Of course, some impls would be invalid (e.g., `bool: - // FromBytes`), and so this change detection is very important. - - assert_impls!( - (): KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - FromBytes, - IntoBytes, - Unaligned - ); - assert_impls!( - u8: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - FromBytes, - IntoBytes, - Unaligned - ); - assert_impls!( - i8: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - FromBytes, - IntoBytes, - Unaligned - ); - assert_impls!( - u16: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - FromBytes, - IntoBytes, - !Unaligned - ); - assert_impls!( - i16: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - FromBytes, - IntoBytes, - !Unaligned - ); - assert_impls!( - u32: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - FromBytes, - IntoBytes, - !Unaligned - ); - assert_impls!( - i32: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - FromBytes, - IntoBytes, - !Unaligned - ); - assert_impls!( - u64: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - FromBytes, - IntoBytes, - !Unaligned - ); - assert_impls!( - i64: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - FromBytes, - IntoBytes, - !Unaligned - ); - assert_impls!( - u128: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - FromBytes, - IntoBytes, - !Unaligned - ); - assert_impls!( - i128: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - FromBytes, - IntoBytes, - !Unaligned - ); - assert_impls!( - usize: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - FromBytes, - IntoBytes, - !Unaligned - ); - assert_impls!( - isize: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - FromBytes, - IntoBytes, - !Unaligned - ); - assert_impls!( - f32: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - FromBytes, - IntoBytes, - !Unaligned - ); - assert_impls!( - f64: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - FromBytes, - IntoBytes, - !Unaligned - ); - - assert_impls!( - bool: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - IntoBytes, - Unaligned, - !FromBytes - ); - assert_impls!( - char: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - IntoBytes, - !FromBytes, - !Unaligned - ); - assert_impls!( - str: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - IntoBytes, - Unaligned, - !FromBytes - ); - - assert_impls!( - NonZeroU8: KnownLayout, - Immutable, - TryFromBytes, - IntoBytes, - Unaligned, - !FromZeros, - !FromBytes - ); - assert_impls!( - NonZeroI8: KnownLayout, - Immutable, - TryFromBytes, - IntoBytes, - Unaligned, - !FromZeros, - !FromBytes - ); - assert_impls!( - NonZeroU16: KnownLayout, - Immutable, - TryFromBytes, - IntoBytes, - !FromBytes, - !Unaligned - ); - assert_impls!( - NonZeroI16: KnownLayout, - Immutable, - TryFromBytes, - IntoBytes, - !FromBytes, - !Unaligned - ); - assert_impls!( - NonZeroU32: KnownLayout, - Immutable, - TryFromBytes, - IntoBytes, - !FromBytes, - !Unaligned - ); - assert_impls!( - NonZeroI32: KnownLayout, - Immutable, - TryFromBytes, - IntoBytes, - !FromBytes, - !Unaligned - ); - assert_impls!( - NonZeroU64: KnownLayout, - Immutable, - TryFromBytes, - IntoBytes, - !FromBytes, - !Unaligned - ); - assert_impls!( - NonZeroI64: KnownLayout, - Immutable, - TryFromBytes, - IntoBytes, - !FromBytes, - !Unaligned - ); - assert_impls!( - NonZeroU128: KnownLayout, - Immutable, - TryFromBytes, - IntoBytes, - !FromBytes, - !Unaligned - ); - assert_impls!( - NonZeroI128: KnownLayout, - Immutable, - TryFromBytes, - IntoBytes, - !FromBytes, - !Unaligned - ); - assert_impls!( - NonZeroUsize: KnownLayout, - Immutable, - TryFromBytes, - IntoBytes, - !FromBytes, - !Unaligned - ); - assert_impls!( - NonZeroIsize: KnownLayout, - Immutable, - TryFromBytes, - IntoBytes, - !FromBytes, - !Unaligned - ); - - assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); - assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); - assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); - assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); - assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); - assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); - assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); - assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); - assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); - assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); - assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); - assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); - - // Implements none of the ZC traits. - struct NotZerocopy; - - #[rustfmt::skip] - type FnManyArgs = fn( - NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, - ) -> (NotZerocopy, NotZerocopy); - - // Allowed, because we're not actually using this type for FFI. - #[allow(improper_ctypes_definitions)] - #[rustfmt::skip] - type ECFnManyArgs = extern "C" fn( - NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, - ) -> (NotZerocopy, NotZerocopy); - - #[cfg(feature = "alloc")] - assert_impls!(Option>>: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(Option]>>: KnownLayout, !Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(Option<&'static UnsafeCell>: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(Option<&'static [UnsafeCell]>: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(Option<&'static mut UnsafeCell>: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(Option<&'static mut [UnsafeCell]>: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(Option>>: KnownLayout, TryFromBytes, FromZeros, Immutable, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(Option]>>: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(Option: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); - - assert_impls!(PhantomData: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); - assert_impls!(PhantomData>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); - assert_impls!(PhantomData<[u8]>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); - - assert_impls!(ManuallyDrop: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); - // This test is important because it allows us to test our hand-rolled - // implementation of ` as TryFromBytes>::is_bit_valid`. - assert_impls!(ManuallyDrop: KnownLayout, Immutable, TryFromBytes, FromZeros, IntoBytes, Unaligned, !FromBytes); - assert_impls!(ManuallyDrop<[u8]>: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); - // This test is important because it allows us to test our hand-rolled - // implementation of ` as TryFromBytes>::is_bit_valid`. - assert_impls!(ManuallyDrop<[bool]>: KnownLayout, Immutable, TryFromBytes, FromZeros, IntoBytes, Unaligned, !FromBytes); - assert_impls!(ManuallyDrop: !Immutable, !TryFromBytes, !KnownLayout, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(ManuallyDrop<[NotZerocopy]>: KnownLayout, !Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(ManuallyDrop>: KnownLayout, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned, !Immutable); - assert_impls!(ManuallyDrop<[UnsafeCell]>: KnownLayout, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned, !Immutable); - assert_impls!(ManuallyDrop<[UnsafeCell]>: KnownLayout, TryFromBytes, FromZeros, IntoBytes, Unaligned, !Immutable, !FromBytes); - - assert_impls!(MaybeUninit: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, Unaligned, !IntoBytes); - assert_impls!(MaybeUninit: KnownLayout, TryFromBytes, FromZeros, FromBytes, !Immutable, !IntoBytes, !Unaligned); - assert_impls!(MaybeUninit>: KnownLayout, TryFromBytes, FromZeros, FromBytes, Unaligned, !Immutable, !IntoBytes); - - assert_impls!(Wrapping: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); - // This test is important because it allows us to test our hand-rolled - // implementation of ` as TryFromBytes>::is_bit_valid`. - assert_impls!(Wrapping: KnownLayout, Immutable, TryFromBytes, FromZeros, IntoBytes, Unaligned, !FromBytes); - assert_impls!(Wrapping: KnownLayout, !Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(Wrapping>: KnownLayout, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned, !Immutable); - - assert_impls!(Unalign: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, Unaligned); - // This test is important because it allows us to test our hand-rolled - // implementation of ` as TryFromBytes>::is_bit_valid`. - assert_impls!(Unalign: KnownLayout, Immutable, TryFromBytes, FromZeros, IntoBytes, Unaligned, !FromBytes); - assert_impls!(Unalign: Unaligned, !Immutable, !KnownLayout, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes); - - assert_impls!( - [u8]: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - FromBytes, - IntoBytes, - Unaligned - ); - assert_impls!( - [bool]: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - IntoBytes, - Unaligned, - !FromBytes - ); - assert_impls!([NotZerocopy]: KnownLayout, !Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!( - [u8; 0]: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - FromBytes, - IntoBytes, - Unaligned, - ); - assert_impls!( - [NotZerocopy; 0]: KnownLayout, - !Immutable, - !TryFromBytes, - !FromZeros, - !FromBytes, - !IntoBytes, - !Unaligned - ); - assert_impls!( - [u8; 1]: KnownLayout, - Immutable, - TryFromBytes, - FromZeros, - FromBytes, - IntoBytes, - Unaligned, - ); - assert_impls!( - [NotZerocopy; 1]: KnownLayout, - !Immutable, - !TryFromBytes, - !FromZeros, - !FromBytes, - !IntoBytes, - !Unaligned - ); - - assert_impls!(*const NotZerocopy: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(*mut NotZerocopy: KnownLayout, Immutable, TryFromBytes, FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(*const [NotZerocopy]: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(*mut [NotZerocopy]: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(*const dyn Debug: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); - assert_impls!(*mut dyn Debug: KnownLayout, Immutable, !TryFromBytes, !FromZeros, !FromBytes, !IntoBytes, !Unaligned); - - #[cfg(feature = "simd")] - { - #[allow(unused_macros)] - macro_rules! test_simd_arch_mod { - ($arch:ident, $($typ:ident),*) => { - { - use core::arch::$arch::{$($typ),*}; - use crate::*; - $( assert_impls!($typ: KnownLayout, Immutable, TryFromBytes, FromZeros, FromBytes, IntoBytes, !Unaligned); )* - } - }; - } - #[cfg(target_arch = "x86")] - test_simd_arch_mod!(x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i); - - #[cfg(all(feature = "simd-nightly", target_arch = "x86"))] - test_simd_arch_mod!(x86, __m512bh, __m512, __m512d, __m512i); - - #[cfg(target_arch = "x86_64")] - test_simd_arch_mod!(x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i); - - #[cfg(all(feature = "simd-nightly", target_arch = "x86_64"))] - test_simd_arch_mod!(x86_64, __m512bh, __m512, __m512d, __m512i); - - #[cfg(target_arch = "wasm32")] - test_simd_arch_mod!(wasm32, v128); - - #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))] - test_simd_arch_mod!( - powerpc, - vector_bool_long, - vector_double, - vector_signed_long, - vector_unsigned_long - ); - - #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))] - test_simd_arch_mod!( - powerpc64, - vector_bool_long, - vector_double, - vector_signed_long, - vector_unsigned_long - ); - #[cfg(all(target_arch = "aarch64", zerocopy_aarch64_simd))] - #[rustfmt::skip] - test_simd_arch_mod!( - aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t, - int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t, - int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t, - poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t, - poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t, - uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t, - uint64x1_t, uint64x2_t - ); - #[cfg(all(feature = "simd-nightly", target_arch = "arm"))] - #[rustfmt::skip] - test_simd_arch_mod!(arm, int8x4_t, uint8x4_t); - } - } } #[cfg(kani)]