zerocopy/
lib.rs

1// Copyright 2018 The Fuchsia Authors
2//
3// Licensed under the 2-Clause BSD License <LICENSE-BSD or
4// https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
5// <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
6// license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
7// This file may not be copied, modified, or distributed except according to
8// those terms.
9
10// After updating the following doc comment, make sure to run the following
11// command to update `README.md` based on its contents:
12//
13//   cargo -q run --manifest-path tools/Cargo.toml -p generate-readme > README.md
14
15//! ***<span style="font-size: 140%">Fast, safe, <span
16//! style="color:red;">compile error</span>. Pick two.</span>***
17//!
18//! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe`
19//! so you don't have to.
20//!
21//! *For an overview of what's changed from zerocopy 0.7, check out our [release
22//! notes][release-notes], which include a step-by-step upgrading guide.*
23//!
24//! *Have questions? Need more out of zerocopy? Submit a [customer request
25//! issue][customer-request-issue] or ask the maintainers on
26//! [GitHub][github-q-a] or [Discord][discord]!*
27//!
28//! [customer-request-issue]: https://github.com/google/zerocopy/issues/new/choose
29//! [release-notes]: https://github.com/google/zerocopy/discussions/1680
30//! [github-q-a]: https://github.com/google/zerocopy/discussions/categories/q-a
31//! [discord]: https://discord.gg/MAvWH2R6zk
32//!
33//! # Overview
34//!
35//! ##### Conversion Traits
36//!
37//! Zerocopy provides four derivable traits for zero-cost conversions:
38//! - [`TryFromBytes`] indicates that a type may safely be converted from
39//!   certain byte sequences (conditional on runtime checks)
40//! - [`FromZeros`] indicates that a sequence of zero bytes represents a valid
41//!   instance of a type
42//! - [`FromBytes`] indicates that a type may safely be converted from an
43//!   arbitrary byte sequence
44//! - [`IntoBytes`] indicates that a type may safely be converted *to* a byte
45//!   sequence
46//!
47//! These traits support sized types, slices, and [slice DSTs][slice-dsts].
48//!
49//! [slice-dsts]: KnownLayout#dynamically-sized-types
50//!
51//! ##### Marker Traits
52//!
53//! Zerocopy provides three derivable marker traits that do not provide any
54//! functionality themselves, but are required to call certain methods provided
55//! by the conversion traits:
56//! - [`KnownLayout`] indicates that zerocopy can reason about certain layout
57//!   qualities of a type
58//! - [`Immutable`] indicates that a type is free from interior mutability,
59//!   except by ownership or an exclusive (`&mut`) borrow
60//! - [`Unaligned`] indicates that a type's alignment requirement is 1
61//!
62//! You should generally derive these marker traits whenever possible.
63//!
64//! ##### Conversion Macros
65//!
66//! Zerocopy provides six macros for safe casting between types:
67//!
68//! - ([`try_`][try_transmute])[`transmute`] (conditionally) converts a value of
69//!   one type to a value of another type of the same size
70//! - ([`try_`][try_transmute_mut])[`transmute_mut`] (conditionally) converts a
71//!   mutable reference of one type to a mutable reference of another type of
72//!   the same size
73//! - ([`try_`][try_transmute_ref])[`transmute_ref`] (conditionally) converts a
74//!   mutable or immutable reference of one type to an immutable reference of
75//!   another type of the same size
76//!
77//! These macros perform *compile-time* size and alignment checks, meaning that
78//! unconditional casts have zero cost at runtime. Conditional casts do not need
79//! to validate size or alignment runtime, but do need to validate contents.
80//!
81//! These macros cannot be used in generic contexts. For generic conversions,
82//! use the methods defined by the [conversion traits](#conversion-traits).
83//!
84//! ##### Byteorder-Aware Numerics
85//!
86//! Zerocopy provides byte-order aware integer types that support these
87//! conversions; see the [`byteorder`] module. These types are especially useful
88//! for network parsing.
89//!
90//! # Cargo Features
91//!
92//! - **`alloc`**
93//!   By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled,
94//!   the `alloc` crate is added as a dependency, and some allocation-related
95//!   functionality is added.
96//!
97//! - **`std`**
98//!   By default, `zerocopy` is `no_std`. When the `std` feature is enabled, the
99//!   `std` crate is added as a dependency (ie, `no_std` is disabled), and
100//!   support for some `std` types is added. `std` implies `alloc`.
101//!
102//! - **`derive`**
103//!   Provides derives for the core marker traits via the `zerocopy-derive`
104//!   crate. These derives are re-exported from `zerocopy`, so it is not
105//!   necessary to depend on `zerocopy-derive` directly.
106//!
107//!   However, you may experience better compile times if you instead directly
108//!   depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`,
109//!   since doing so will allow Rust to compile these crates in parallel. To do
110//!   so, do *not* enable the `derive` feature, and list both dependencies in
111//!   your `Cargo.toml` with the same leading non-zero version number; e.g:
112//!
113//!   ```toml
114//!   [dependencies]
115//!   zerocopy = "0.X"
116//!   zerocopy-derive = "0.X"
117//!   ```
118//!
119//!   To avoid the risk of [duplicate import errors][duplicate-import-errors] if
120//!   one of your dependencies enables zerocopy's `derive` feature, import
121//!   derives as `use zerocopy_derive::*` rather than by name (e.g., `use
122//!   zerocopy_derive::FromBytes`).
123//!
124//! - **`simd`**
125//!   When the `simd` feature is enabled, `FromZeros`, `FromBytes`, and
126//!   `IntoBytes` impls are emitted for all stable SIMD types which exist on the
127//!   target platform. Note that the layout of SIMD types is not yet stabilized,
128//!   so these impls may be removed in the future if layout changes make them
129//!   invalid. For more information, see the Unsafe Code Guidelines Reference
130//!   page on the [layout of packed SIMD vectors][simd-layout].
131//!
132//! - **`simd-nightly`**
133//!   Enables the `simd` feature and adds support for SIMD types which are only
134//!   available on nightly. Since these types are unstable, support for any type
135//!   may be removed at any point in the future.
136//!
137//! - **`float-nightly`**
138//!   Adds support for the unstable `f16` and `f128` types. These types are
139//!   not yet fully implemented and may not be supported on all platforms.
140//!
141//! [duplicate-import-errors]: https://github.com/google/zerocopy/issues/1587
142//! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
143//!
144//! # Security Ethos
145//!
146//! Zerocopy is expressly designed for use in security-critical contexts. We
147//! strive to ensure that that zerocopy code is sound under Rust's current
148//! memory model, and *any future memory model*. We ensure this by:
149//! - **...not 'guessing' about Rust's semantics.**
150//!   We annotate `unsafe` code with a precise rationale for its soundness that
151//!   cites a relevant section of Rust's official documentation. When Rust's
152//!   documented semantics are unclear, we work with the Rust Operational
153//!   Semantics Team to clarify Rust's documentation.
154//! - **...rigorously testing our implementation.**
155//!   We run tests using [Miri], ensuring that zerocopy is sound across a wide
156//!   array of supported target platforms of varying endianness and pointer
157//!   width, and across both current and experimental memory models of Rust.
158//! - **...formally proving the correctness of our implementation.**
159//!   We apply formal verification tools like [Kani][kani] to prove zerocopy's
160//!   correctness.
161//!
162//! For more information, see our full [soundness policy].
163//!
164//! [Miri]: https://github.com/rust-lang/miri
165//! [Kani]: https://github.com/model-checking/kani
166//! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness
167//!
168//! # Relationship to Project Safe Transmute
169//!
170//! [Project Safe Transmute] is an official initiative of the Rust Project to
171//! develop language-level support for safer transmutation. The Project consults
172//! with crates like zerocopy to identify aspects of safer transmutation that
173//! would benefit from compiler support, and has developed an [experimental,
174//! compiler-supported analysis][mcp-transmutability] which determines whether,
175//! for a given type, any value of that type may be soundly transmuted into
176//! another type. Once this functionality is sufficiently mature, zerocopy
177//! intends to replace its internal transmutability analysis (implemented by our
178//! custom derives) with the compiler-supported one. This change will likely be
179//! an implementation detail that is invisible to zerocopy's users.
180//!
181//! Project Safe Transmute will not replace the need for most of zerocopy's
182//! higher-level abstractions. The experimental compiler analysis is a tool for
183//! checking the soundness of `unsafe` code, not a tool to avoid writing
184//! `unsafe` code altogether. For the foreseeable future, crates like zerocopy
185//! will still be required in order to provide higher-level abstractions on top
186//! of the building block provided by Project Safe Transmute.
187//!
188//! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html
189//! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411
190//!
191//! # MSRV
192//!
193//! See our [MSRV policy].
194//!
195//! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv
196//!
197//! # Changelog
198//!
199//! Zerocopy uses [GitHub Releases].
200//!
201//! [GitHub Releases]: https://github.com/google/zerocopy/releases
202//!
203//! # Thanks
204//!
205//! Zerocopy is maintained by engineers at Google with help from [many wonderful
206//! contributors][contributors]. Thank you to everyone who has lent a hand in
207//! making Rust a little more secure!
208//!
209//! [contributors]: https://github.com/google/zerocopy/graphs/contributors
210
211// Sometimes we want to use lints which were added after our MSRV.
212// `unknown_lints` is `warn` by default and we deny warnings in CI, so without
213// this attribute, any unknown lint would cause a CI failure when testing with
214// our MSRV.
215#![allow(unknown_lints, non_local_definitions, unreachable_patterns)]
216#![deny(renamed_and_removed_lints)]
217#![deny(
218    anonymous_parameters,
219    deprecated_in_future,
220    late_bound_lifetime_arguments,
221    missing_copy_implementations,
222    missing_debug_implementations,
223    missing_docs,
224    path_statements,
225    patterns_in_fns_without_body,
226    rust_2018_idioms,
227    trivial_numeric_casts,
228    unreachable_pub,
229    unsafe_op_in_unsafe_fn,
230    unused_extern_crates,
231    // We intentionally choose not to deny `unused_qualifications`. When items
232    // are added to the prelude (e.g., `core::mem::size_of`), this has the
233    // consequence of making some uses trigger this lint on the latest toolchain
234    // (e.g., `mem::size_of`), but fixing it (e.g. by replacing with `size_of`)
235    // does not work on older toolchains.
236    //
237    // We tested a more complicated fix in #1413, but ultimately decided that,
238    // since this lint is just a minor style lint, the complexity isn't worth it
239    // - it's fine to occasionally have unused qualifications slip through,
240    // especially since these do not affect our user-facing API in any way.
241    variant_size_differences
242)]
243#![cfg_attr(
244    __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
245    deny(fuzzy_provenance_casts, lossy_provenance_casts)
246)]
247#![deny(
248    clippy::all,
249    clippy::alloc_instead_of_core,
250    clippy::arithmetic_side_effects,
251    clippy::as_underscore,
252    clippy::assertions_on_result_states,
253    clippy::as_conversions,
254    clippy::correctness,
255    clippy::dbg_macro,
256    clippy::decimal_literal_representation,
257    clippy::double_must_use,
258    clippy::get_unwrap,
259    clippy::indexing_slicing,
260    clippy::missing_inline_in_public_items,
261    clippy::missing_safety_doc,
262    clippy::multiple_unsafe_ops_per_block,
263    clippy::must_use_candidate,
264    clippy::must_use_unit,
265    clippy::obfuscated_if_else,
266    clippy::perf,
267    clippy::print_stdout,
268    clippy::return_self_not_must_use,
269    clippy::std_instead_of_core,
270    clippy::style,
271    clippy::suspicious,
272    clippy::todo,
273    clippy::undocumented_unsafe_blocks,
274    clippy::unimplemented,
275    clippy::unnested_or_patterns,
276    clippy::unwrap_used,
277    clippy::use_debug
278)]
279// `clippy::incompatible_msrv` (implied by `clippy::suspicious`): This sometimes
280// has false positives, and we test on our MSRV in CI, so it doesn't help us
281// anyway.
282#![allow(clippy::needless_lifetimes, clippy::type_complexity, clippy::incompatible_msrv)]
283#![deny(
284    rustdoc::bare_urls,
285    rustdoc::broken_intra_doc_links,
286    rustdoc::invalid_codeblock_attributes,
287    rustdoc::invalid_html_tags,
288    rustdoc::invalid_rust_codeblocks,
289    rustdoc::missing_crate_level_docs,
290    rustdoc::private_intra_doc_links
291)]
292// In test code, it makes sense to weight more heavily towards concise, readable
293// code over correct or debuggable code.
294#![cfg_attr(any(test, kani), allow(
295    // In tests, you get line numbers and have access to source code, so panic
296    // messages are less important. You also often unwrap a lot, which would
297    // make expect'ing instead very verbose.
298    clippy::unwrap_used,
299    // In tests, there's no harm to "panic risks" - the worst that can happen is
300    // that your test will fail, and you'll fix it. By contrast, panic risks in
301    // production code introduce the possibly of code panicking unexpectedly "in
302    // the field".
303    clippy::arithmetic_side_effects,
304    clippy::indexing_slicing,
305))]
306#![cfg_attr(not(any(test, kani, feature = "std")), no_std)]
307#![cfg_attr(
308    all(feature = "simd-nightly", target_arch = "arm"),
309    feature(stdarch_arm_neon_intrinsics)
310)]
311#![cfg_attr(
312    all(feature = "simd-nightly", any(target_arch = "powerpc", target_arch = "powerpc64")),
313    feature(stdarch_powerpc)
314)]
315#![cfg_attr(feature = "float-nightly", feature(f16, f128))]
316#![cfg_attr(doc_cfg, feature(doc_cfg))]
317#![cfg_attr(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, feature(coverage_attribute))]
318#![cfg_attr(
319    any(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS, miri),
320    feature(layout_for_ptr)
321)]
322#![cfg_attr(all(test, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS), feature(test))]
323
324// This is a hack to allow zerocopy-derive derives to work in this crate. They
325// assume that zerocopy is linked as an extern crate, so they access items from
326// it as `zerocopy::Xxx`. This makes that still work.
327#[cfg(any(feature = "derive", test))]
328extern crate self as zerocopy;
329
330#[cfg(all(test, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS))]
331extern crate test;
332
333#[doc(hidden)]
334#[macro_use]
335pub mod util;
336
337pub mod byte_slice;
338pub mod byteorder;
339mod deprecated;
340
341#[doc(hidden)]
342pub mod doctests;
343
344// This module is `pub` so that zerocopy's error types and error handling
345// documentation is grouped together in a cohesive module. In practice, we
346// expect most users to use the re-export of `error`'s items to avoid identifier
347// stuttering.
348pub mod error;
349mod impls;
350#[doc(hidden)]
351pub mod layout;
352mod macros;
353#[doc(hidden)]
354pub mod pointer;
355mod r#ref;
356mod split_at;
357// FIXME(#252): If we make this pub, come up with a better name.
358mod wrappers;
359
360use core::{
361    cell::{Cell, UnsafeCell},
362    cmp::Ordering,
363    fmt::{self, Debug, Display, Formatter},
364    hash::Hasher,
365    marker::PhantomData,
366    mem::{self, ManuallyDrop, MaybeUninit as CoreMaybeUninit},
367    num::{
368        NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
369        NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping,
370    },
371    ops::{Deref, DerefMut},
372    ptr::{self, NonNull},
373    slice,
374};
375#[cfg(feature = "std")]
376use std::io;
377
378use crate::pointer::invariant::{self, BecauseExclusive};
379#[doc(hidden)]
380pub use crate::pointer::PtrInner;
381pub use crate::{
382    byte_slice::*,
383    byteorder::*,
384    error::*,
385    r#ref::*,
386    split_at::{Split, SplitAt},
387    wrappers::*,
388};
389
390#[cfg(any(feature = "alloc", test, kani))]
391extern crate alloc;
392#[cfg(any(feature = "alloc", test))]
393use alloc::{boxed::Box, vec::Vec};
394#[cfg(any(feature = "alloc", test))]
395use core::alloc::Layout;
396
397use util::MetadataOf;
398
399// Used by `KnownLayout`.
400#[doc(hidden)]
401pub use crate::layout::*;
402// Used by `TryFromBytes::is_bit_valid`.
403#[doc(hidden)]
404pub use crate::pointer::{invariant::BecauseImmutable, Maybe, Ptr};
405// For each trait polyfill, as soon as the corresponding feature is stable, the
406// polyfill import will be unused because method/function resolution will prefer
407// the inherent method/function over a trait method/function. Thus, we suppress
408// the `unused_imports` warning.
409//
410// See the documentation on `util::polyfills` for more information.
411#[allow(unused_imports)]
412use crate::util::polyfills::{self, NonNullExt as _, NumExt as _};
413
414#[rustversion::nightly]
415#[cfg(all(test, not(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)))]
416const _: () = {
417    #[deprecated = "some tests may be skipped due to missing RUSTFLAGS=\"--cfg __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS\""]
418    const _WARNING: () = ();
419    #[warn(deprecated)]
420    _WARNING
421};
422
423// These exist so that code which was written against the old names will get
424// less confusing error messages when they upgrade to a more recent version of
425// zerocopy. On our MSRV toolchain, the error messages read, for example:
426//
427//   error[E0603]: trait `FromZeroes` is private
428//       --> examples/deprecated.rs:1:15
429//        |
430//   1    | use zerocopy::FromZeroes;
431//        |               ^^^^^^^^^^ private trait
432//        |
433//   note: the trait `FromZeroes` is defined here
434//       --> /Users/josh/workspace/zerocopy/src/lib.rs:1845:5
435//        |
436//   1845 | use FromZeros as FromZeroes;
437//        |     ^^^^^^^^^^^^^^^^^^^^^^^
438//
439// The "note" provides enough context to make it easy to figure out how to fix
440// the error.
441/// Implements [`KnownLayout`].
442///
443/// This derive analyzes various aspects of a type's layout that are needed for
444/// some of zerocopy's APIs. It can be applied to structs, enums, and unions;
445/// e.g.:
446///
447/// ```
448/// # use zerocopy_derive::KnownLayout;
449/// #[derive(KnownLayout)]
450/// struct MyStruct {
451/// # /*
452///     ...
453/// # */
454/// }
455///
456/// #[derive(KnownLayout)]
457/// enum MyEnum {
458/// #   V00,
459/// # /*
460///     ...
461/// # */
462/// }
463///
464/// #[derive(KnownLayout)]
465/// union MyUnion {
466/// #   variant: u8,
467/// # /*
468///     ...
469/// # */
470/// }
471/// ```
472///
473/// # Limitations
474///
475/// This derive cannot currently be applied to unsized structs without an
476/// explicit `repr` attribute.
477///
478/// Some invocations of this derive run afoul of a [known bug] in Rust's type
479/// privacy checker. For example, this code:
480///
481/// ```compile_fail,E0446
482/// use zerocopy::*;
483/// # use zerocopy_derive::*;
484///
485/// #[derive(KnownLayout)]
486/// #[repr(C)]
487/// pub struct PublicType {
488///     leading: Foo,
489///     trailing: Bar,
490/// }
491///
492/// #[derive(KnownLayout)]
493/// struct Foo;
494///
495/// #[derive(KnownLayout)]
496/// struct Bar;
497/// ```
498///
499/// ...results in a compilation error:
500///
501/// ```text
502/// error[E0446]: private type `Bar` in public interface
503///  --> examples/bug.rs:3:10
504///    |
505/// 3  | #[derive(KnownLayout)]
506///    |          ^^^^^^^^^^^ can't leak private type
507/// ...
508/// 14 | struct Bar;
509///    | ---------- `Bar` declared as private
510///    |
511///    = note: this error originates in the derive macro `KnownLayout` (in Nightly builds, run with -Z macro-backtrace for more info)
512/// ```
513///
514/// This issue arises when `#[derive(KnownLayout)]` is applied to `repr(C)`
515/// structs whose trailing field type is less public than the enclosing struct.
516///
517/// To work around this, mark the trailing field type `pub` and annotate it with
518/// `#[doc(hidden)]`; e.g.:
519///
520/// ```no_run
521/// use zerocopy::*;
522/// # use zerocopy_derive::*;
523///
524/// #[derive(KnownLayout)]
525/// #[repr(C)]
526/// pub struct PublicType {
527///     leading: Foo,
528///     trailing: Bar,
529/// }
530///
531/// #[derive(KnownLayout)]
532/// struct Foo;
533///
534/// #[doc(hidden)]
535/// #[derive(KnownLayout)]
536/// pub struct Bar; // <- `Bar` is now also `pub`
537/// ```
538///
539/// [known bug]: https://github.com/rust-lang/rust/issues/45713
540#[cfg(any(feature = "derive", test))]
541#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
542pub use zerocopy_derive::KnownLayout;
543#[allow(unused)]
544use {FromZeros as FromZeroes, IntoBytes as AsBytes, Ref as LayoutVerified};
545
546/// Indicates that zerocopy can reason about certain aspects of a type's layout.
547///
548/// This trait is required by many of zerocopy's APIs. It supports sized types,
549/// slices, and [slice DSTs](#dynamically-sized-types).
550///
551/// # Implementation
552///
553/// **Do not implement this trait yourself!** Instead, use
554/// [`#[derive(KnownLayout)]`][derive]; e.g.:
555///
556/// ```
557/// # use zerocopy_derive::KnownLayout;
558/// #[derive(KnownLayout)]
559/// struct MyStruct {
560/// # /*
561///     ...
562/// # */
563/// }
564///
565/// #[derive(KnownLayout)]
566/// enum MyEnum {
567/// # /*
568///     ...
569/// # */
570/// }
571///
572/// #[derive(KnownLayout)]
573/// union MyUnion {
574/// #   variant: u8,
575/// # /*
576///     ...
577/// # */
578/// }
579/// ```
580///
581/// This derive performs a sophisticated analysis to deduce the layout
582/// characteristics of types. You **must** implement this trait via the derive.
583///
584/// # Dynamically-sized types
585///
586/// `KnownLayout` supports slice-based dynamically sized types ("slice DSTs").
587///
588/// A slice DST is a type whose trailing field is either a slice or another
589/// slice DST, rather than a type with fixed size. For example:
590///
591/// ```
592/// #[repr(C)]
593/// struct PacketHeader {
594/// # /*
595///     ...
596/// # */
597/// }
598///
599/// #[repr(C)]
600/// struct Packet {
601///     header: PacketHeader,
602///     body: [u8],
603/// }
604/// ```
605///
606/// It can be useful to think of slice DSTs as a generalization of slices - in
607/// other words, a normal slice is just the special case of a slice DST with
608/// zero leading fields. In particular:
609/// - Like slices, slice DSTs can have different lengths at runtime
610/// - Like slices, slice DSTs cannot be passed by-value, but only by reference
611///   or via other indirection such as `Box`
612/// - Like slices, a reference (or `Box`, or other pointer type) to a slice DST
613///   encodes the number of elements in the trailing slice field
614///
615/// ## Slice DST layout
616///
617/// Just like other composite Rust types, the layout of a slice DST is not
618/// well-defined unless it is specified using an explicit `#[repr(...)]`
619/// attribute such as `#[repr(C)]`. [Other representations are
620/// supported][reprs], but in this section, we'll use `#[repr(C)]` as our
621/// example.
622///
623/// A `#[repr(C)]` slice DST is laid out [just like sized `#[repr(C)]`
624/// types][repr-c-structs], but the presence of a variable-length field
625/// introduces the possibility of *dynamic padding*. In particular, it may be
626/// necessary to add trailing padding *after* the trailing slice field in order
627/// to satisfy the outer type's alignment, and the amount of padding required
628/// may be a function of the length of the trailing slice field. This is just a
629/// natural consequence of the normal `#[repr(C)]` rules applied to slice DSTs,
630/// but it can result in surprising behavior. For example, consider the
631/// following type:
632///
633/// ```
634/// #[repr(C)]
635/// struct Foo {
636///     a: u32,
637///     b: u8,
638///     z: [u16],
639/// }
640/// ```
641///
642/// Assuming that `u32` has alignment 4 (this is not true on all platforms),
643/// then `Foo` has alignment 4 as well. Here is the smallest possible value for
644/// `Foo`:
645///
646/// ```text
647/// byte offset | 01234567
648///       field | aaaab---
649///                    ><
650/// ```
651///
652/// In this value, `z` has length 0. Abiding by `#[repr(C)]`, the lowest offset
653/// that we can place `z` at is 5, but since `z` has alignment 2, we need to
654/// round up to offset 6. This means that there is one byte of padding between
655/// `b` and `z`, then 0 bytes of `z` itself (denoted `><` in this diagram), and
656/// then two bytes of padding after `z` in order to satisfy the overall
657/// alignment of `Foo`. The size of this instance is 8 bytes.
658///
659/// What about if `z` has length 1?
660///
661/// ```text
662/// byte offset | 01234567
663///       field | aaaab-zz
664/// ```
665///
666/// In this instance, `z` has length 1, and thus takes up 2 bytes. That means
667/// that we no longer need padding after `z` in order to satisfy `Foo`'s
668/// alignment. We've now seen two different values of `Foo` with two different
669/// lengths of `z`, but they both have the same size - 8 bytes.
670///
671/// What about if `z` has length 2?
672///
673/// ```text
674/// byte offset | 012345678901
675///       field | aaaab-zzzz--
676/// ```
677///
678/// Now `z` has length 2, and thus takes up 4 bytes. This brings our un-padded
679/// size to 10, and so we now need another 2 bytes of padding after `z` to
680/// satisfy `Foo`'s alignment.
681///
682/// Again, all of this is just a logical consequence of the `#[repr(C)]` rules
683/// applied to slice DSTs, but it can be surprising that the amount of trailing
684/// padding becomes a function of the trailing slice field's length, and thus
685/// can only be computed at runtime.
686///
687/// [reprs]: https://doc.rust-lang.org/reference/type-layout.html#representations
688/// [repr-c-structs]: https://doc.rust-lang.org/reference/type-layout.html#reprc-structs
689///
690/// ## What is a valid size?
691///
692/// There are two places in zerocopy's API that we refer to "a valid size" of a
693/// type. In normal casts or conversions, where the source is a byte slice, we
694/// need to know whether the source byte slice is a valid size of the
695/// destination type. In prefix or suffix casts, we need to know whether *there
696/// exists* a valid size of the destination type which fits in the source byte
697/// slice and, if so, what the largest such size is.
698///
699/// As outlined above, a slice DST's size is defined by the number of elements
700/// in its trailing slice field. However, there is not necessarily a 1-to-1
701/// mapping between trailing slice field length and overall size. As we saw in
702/// the previous section with the type `Foo`, instances with both 0 and 1
703/// elements in the trailing `z` field result in a `Foo` whose size is 8 bytes.
704///
705/// When we say "x is a valid size of `T`", we mean one of two things:
706/// - If `T: Sized`, then we mean that `x == size_of::<T>()`
707/// - If `T` is a slice DST, then we mean that there exists a `len` such that the instance of
708///   `T` with `len` trailing slice elements has size `x`
709///
710/// When we say "largest possible size of `T` that fits in a byte slice", we
711/// mean one of two things:
712/// - If `T: Sized`, then we mean `size_of::<T>()` if the byte slice is at least
713///   `size_of::<T>()` bytes long
714/// - If `T` is a slice DST, then we mean to consider all values, `len`, such
715///   that the instance of `T` with `len` trailing slice elements fits in the
716///   byte slice, and to choose the largest such `len`, if any
717///
718///
719/// # Safety
720///
721/// This trait does not convey any safety guarantees to code outside this crate.
722///
723/// You must not rely on the `#[doc(hidden)]` internals of `KnownLayout`. Future
724/// releases of zerocopy may make backwards-breaking changes to these items,
725/// including changes that only affect soundness, which may cause code which
726/// uses those items to silently become unsound.
727///
728#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::KnownLayout")]
729#[cfg_attr(
730    not(feature = "derive"),
731    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.KnownLayout.html"),
732)]
733#[cfg_attr(
734    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
735    diagnostic::on_unimplemented(note = "Consider adding `#[derive(KnownLayout)]` to `{Self}`")
736)]
737pub unsafe trait KnownLayout {
738    // The `Self: Sized` bound makes it so that `KnownLayout` can still be
739    // object safe. It's not currently object safe thanks to `const LAYOUT`, and
740    // it likely won't be in the future, but there's no reason not to be
741    // forwards-compatible with object safety.
742    #[doc(hidden)]
743    fn only_derive_is_allowed_to_implement_this_trait()
744    where
745        Self: Sized;
746
747    /// The type of metadata stored in a pointer to `Self`.
748    ///
749    /// This is `()` for sized types and `usize` for slice DSTs.
750    type PointerMetadata: PointerMetadata;
751
752    /// A maybe-uninitialized analog of `Self`
753    ///
754    /// # Safety
755    ///
756    /// `Self::LAYOUT` and `Self::MaybeUninit::LAYOUT` are identical.
757    /// `Self::MaybeUninit` admits uninitialized bytes in all positions.
758    #[doc(hidden)]
759    type MaybeUninit: ?Sized + KnownLayout<PointerMetadata = Self::PointerMetadata>;
760
761    /// The layout of `Self`.
762    ///
763    /// # Safety
764    ///
765    /// Callers may assume that `LAYOUT` accurately reflects the layout of
766    /// `Self`. In particular:
767    /// - `LAYOUT.align` is equal to `Self`'s alignment
768    /// - If `Self: Sized`, then `LAYOUT.size_info == SizeInfo::Sized { size }`
769    ///   where `size == size_of::<Self>()`
770    /// - If `Self` is a slice DST, then `LAYOUT.size_info ==
771    ///   SizeInfo::SliceDst(slice_layout)` where:
772    ///   - The size, `size`, of an instance of `Self` with `elems` trailing
773    ///     slice elements is equal to `slice_layout.offset +
774    ///     slice_layout.elem_size * elems` rounded up to the nearest multiple
775    ///     of `LAYOUT.align`
776    ///   - For such an instance, any bytes in the range `[slice_layout.offset +
777    ///     slice_layout.elem_size * elems, size)` are padding and must not be
778    ///     assumed to be initialized
779    #[doc(hidden)]
780    const LAYOUT: DstLayout;
781
782    /// SAFETY: The returned pointer has the same address and provenance as
783    /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems`
784    /// elements in its trailing slice.
785    #[doc(hidden)]
786    fn raw_from_ptr_len(bytes: NonNull<u8>, meta: Self::PointerMetadata) -> NonNull<Self>;
787
788    /// Extracts the metadata from a pointer to `Self`.
789    ///
790    /// # Safety
791    ///
792    /// `pointer_to_metadata` always returns the correct metadata stored in
793    /// `ptr`.
794    #[doc(hidden)]
795    fn pointer_to_metadata(ptr: *mut Self) -> Self::PointerMetadata;
796
797    /// Computes the length of the byte range addressed by `ptr`.
798    ///
799    /// Returns `None` if the resulting length would not fit in an `usize`.
800    ///
801    /// # Safety
802    ///
803    /// Callers may assume that `size_of_val_raw` always returns the correct
804    /// size.
805    ///
806    /// Callers may assume that, if `ptr` addresses a byte range whose length
807    /// fits in an `usize`, this will return `Some`.
808    #[doc(hidden)]
809    #[must_use]
810    #[inline(always)]
811    fn size_of_val_raw(ptr: NonNull<Self>) -> Option<usize> {
812        let meta = Self::pointer_to_metadata(ptr.as_ptr());
813        // SAFETY: `size_for_metadata` promises to only return `None` if the
814        // resulting size would not fit in a `usize`.
815        Self::size_for_metadata(meta)
816    }
817
818    #[doc(hidden)]
819    #[must_use]
820    #[inline(always)]
821    fn raw_dangling() -> NonNull<Self> {
822        let meta = Self::PointerMetadata::from_elem_count(0);
823        Self::raw_from_ptr_len(NonNull::dangling(), meta)
824    }
825
826    /// Computes the size of an object of type `Self` with the given pointer
827    /// metadata.
828    ///
829    /// # Safety
830    ///
831    /// `size_for_metadata` promises to return `None` if and only if the
832    /// resulting size would not fit in a `usize`. Note that the returned size
833    /// could exceed the actual maximum valid size of an allocated object,
834    /// `isize::MAX`.
835    ///
836    /// # Examples
837    ///
838    /// ```
839    /// use zerocopy::KnownLayout;
840    ///
841    /// assert_eq!(u8::size_for_metadata(()), Some(1));
842    /// assert_eq!(u16::size_for_metadata(()), Some(2));
843    /// assert_eq!(<[u8]>::size_for_metadata(42), Some(42));
844    /// assert_eq!(<[u16]>::size_for_metadata(42), Some(84));
845    ///
846    /// // This size exceeds the maximum valid object size (`isize::MAX`):
847    /// assert_eq!(<[u8]>::size_for_metadata(usize::MAX), Some(usize::MAX));
848    ///
849    /// // This size, if computed, would exceed `usize::MAX`:
850    /// assert_eq!(<[u16]>::size_for_metadata(usize::MAX), None);
851    /// ```
852    #[inline(always)]
853    fn size_for_metadata(meta: Self::PointerMetadata) -> Option<usize> {
854        meta.size_for_metadata(Self::LAYOUT)
855    }
856}
857
858/// Efficiently produces the [`TrailingSliceLayout`] of `T`.
859#[inline(always)]
860pub(crate) fn trailing_slice_layout<T>() -> TrailingSliceLayout
861where
862    T: ?Sized + KnownLayout<PointerMetadata = usize>,
863{
864    trait LayoutFacts {
865        const SIZE_INFO: TrailingSliceLayout;
866    }
867
868    impl<T: ?Sized> LayoutFacts for T
869    where
870        T: KnownLayout<PointerMetadata = usize>,
871    {
872        const SIZE_INFO: TrailingSliceLayout = match T::LAYOUT.size_info {
873            crate::SizeInfo::Sized { .. } => const_panic!("unreachable"),
874            crate::SizeInfo::SliceDst(info) => info,
875        };
876    }
877
878    T::SIZE_INFO
879}
880
881/// The metadata associated with a [`KnownLayout`] type.
882#[doc(hidden)]
883pub trait PointerMetadata: Copy + Eq + Debug {
884    /// Constructs a `Self` from an element count.
885    ///
886    /// If `Self = ()`, this returns `()`. If `Self = usize`, this returns
887    /// `elems`. No other types are currently supported.
888    fn from_elem_count(elems: usize) -> Self;
889
890    /// Computes the size of the object with the given layout and pointer
891    /// metadata.
892    ///
893    /// # Panics
894    ///
895    /// If `Self = ()`, `layout` must describe a sized type. If `Self = usize`,
896    /// `layout` must describe a slice DST. Otherwise, `size_for_metadata` may
897    /// panic.
898    ///
899    /// # Safety
900    ///
901    /// `size_for_metadata` promises to only return `None` if the resulting size
902    /// would not fit in a `usize`.
903    fn size_for_metadata(self, layout: DstLayout) -> Option<usize>;
904}
905
906impl PointerMetadata for () {
907    #[inline]
908    #[allow(clippy::unused_unit)]
909    fn from_elem_count(_elems: usize) -> () {}
910
911    #[inline]
912    fn size_for_metadata(self, layout: DstLayout) -> Option<usize> {
913        match layout.size_info {
914            SizeInfo::Sized { size } => Some(size),
915            // NOTE: This branch is unreachable, but we return `None` rather
916            // than `unreachable!()` to avoid generating panic paths.
917            SizeInfo::SliceDst(_) => None,
918        }
919    }
920}
921
922impl PointerMetadata for usize {
923    #[inline]
924    fn from_elem_count(elems: usize) -> usize {
925        elems
926    }
927
928    #[inline]
929    fn size_for_metadata(self, layout: DstLayout) -> Option<usize> {
930        match layout.size_info {
931            SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }) => {
932                let slice_len = elem_size.checked_mul(self)?;
933                let without_padding = offset.checked_add(slice_len)?;
934                without_padding.checked_add(util::padding_needed_for(without_padding, layout.align))
935            }
936            // NOTE: This branch is unreachable, but we return `None` rather
937            // than `unreachable!()` to avoid generating panic paths.
938            SizeInfo::Sized { .. } => None,
939        }
940    }
941}
942
943// SAFETY: Delegates safety to `DstLayout::for_slice`.
944unsafe impl<T> KnownLayout for [T] {
945    #[allow(clippy::missing_inline_in_public_items, dead_code)]
946    #[cfg_attr(
947        all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
948        coverage(off)
949    )]
950    fn only_derive_is_allowed_to_implement_this_trait()
951    where
952        Self: Sized,
953    {
954    }
955
956    type PointerMetadata = usize;
957
958    // SAFETY: `CoreMaybeUninit<T>::LAYOUT` and `T::LAYOUT` are identical
959    // because `CoreMaybeUninit<T>` has the same size and alignment as `T` [1].
960    // Consequently, `[CoreMaybeUninit<T>]::LAYOUT` and `[T]::LAYOUT` are
961    // identical, because they both lack a fixed-sized prefix and because they
962    // inherit the alignments of their inner element type (which are identical)
963    // [2][3].
964    //
965    // `[CoreMaybeUninit<T>]` admits uninitialized bytes at all positions
966    // because `CoreMaybeUninit<T>` admits uninitialized bytes at all positions
967    // and because the inner elements of `[CoreMaybeUninit<T>]` are laid out
968    // back-to-back [2][3].
969    //
970    // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1:
971    //
972    //   `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as
973    //   `T`
974    //
975    // [2] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#slice-layout:
976    //
977    //   Slices have the same layout as the section of the array they slice.
978    //
979    // [3] Per https://doc.rust-lang.org/1.82.0/reference/type-layout.html#array-layout:
980    //
981    //   An array of `[T; N]` has a size of `size_of::<T>() * N` and the same
982    //   alignment of `T`. Arrays are laid out so that the zero-based `nth`
983    //   element of the array is offset from the start of the array by `n *
984    //   size_of::<T>()` bytes.
985    type MaybeUninit = [CoreMaybeUninit<T>];
986
987    const LAYOUT: DstLayout = DstLayout::for_slice::<T>();
988
989    // SAFETY: `.cast` preserves address and provenance. The returned pointer
990    // refers to an object with `elems` elements by construction.
991    #[inline(always)]
992    fn raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self> {
993        // FIXME(#67): Remove this allow. See NonNullExt for more details.
994        #[allow(unstable_name_collisions)]
995        NonNull::slice_from_raw_parts(data.cast::<T>(), elems)
996    }
997
998    #[inline(always)]
999    fn pointer_to_metadata(ptr: *mut [T]) -> usize {
1000        #[allow(clippy::as_conversions)]
1001        let slc = ptr as *const [()];
1002
1003        // SAFETY:
1004        // - `()` has alignment 1, so `slc` is trivially aligned.
1005        // - `slc` was derived from a non-null pointer.
1006        // - The size is 0 regardless of the length, so it is sound to
1007        //   materialize a reference regardless of location.
1008        // - By invariant, `self.ptr` has valid provenance.
1009        let slc = unsafe { &*slc };
1010
1011        // This is correct because the preceding `as` cast preserves the number
1012        // of slice elements. [1]
1013        //
1014        // [1] Per https://doc.rust-lang.org/reference/expressions/operator-expr.html#pointer-to-pointer-cast:
1015        //
1016        //   For slice types like `[T]` and `[U]`, the raw pointer types `*const
1017        //   [T]`, `*mut [T]`, `*const [U]`, and `*mut [U]` encode the number of
1018        //   elements in this slice. Casts between these raw pointer types
1019        //   preserve the number of elements. ... The same holds for `str` and
1020        //   any compound type whose unsized tail is a slice type, such as
1021        //   struct `Foo(i32, [u8])` or `(u64, Foo)`.
1022        slc.len()
1023    }
1024}
1025
1026#[rustfmt::skip]
1027impl_known_layout!(
1028    (),
1029    u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64,
1030    bool, char,
1031    NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32,
1032    NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize
1033);
1034#[rustfmt::skip]
1035#[cfg(feature = "float-nightly")]
1036impl_known_layout!(
1037    #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
1038    f16,
1039    #[cfg_attr(doc_cfg, doc(cfg(feature = "float-nightly")))]
1040    f128
1041);
1042#[rustfmt::skip]
1043impl_known_layout!(
1044    T         => Option<T>,
1045    T: ?Sized => PhantomData<T>,
1046    T         => Wrapping<T>,
1047    T         => CoreMaybeUninit<T>,
1048    T: ?Sized => *const T,
1049    T: ?Sized => *mut T,
1050    T: ?Sized => &'_ T,
1051    T: ?Sized => &'_ mut T,
1052);
1053impl_known_layout!(const N: usize, T => [T; N]);
1054
1055// SAFETY: `str` has the same representation as `[u8]`. `ManuallyDrop<T>` [1],
1056// `UnsafeCell<T>` [2], and `Cell<T>` [3] have the same representation as `T`.
1057//
1058// [1] Per https://doc.rust-lang.org/1.85.0/std/mem/struct.ManuallyDrop.html:
1059//
1060//   `ManuallyDrop<T>` is guaranteed to have the same layout and bit validity as
1061//   `T`
1062//
1063// [2] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.UnsafeCell.html#memory-layout:
1064//
1065//   `UnsafeCell<T>` has the same in-memory representation as its inner type
1066//   `T`.
1067//
1068// [3] Per https://doc.rust-lang.org/1.85.0/core/cell/struct.Cell.html#memory-layout:
1069//
1070//   `Cell<T>` has the same in-memory representation as `T`.
1071#[allow(clippy::multiple_unsafe_ops_per_block)]
1072const _: () = unsafe {
1073    unsafe_impl_known_layout!(
1074        #[repr([u8])]
1075        str
1076    );
1077    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>);
1078    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] UnsafeCell<T>);
1079    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] Cell<T>);
1080};
1081
1082// SAFETY:
1083// - By consequence of the invariant on `T::MaybeUninit` that `T::LAYOUT` and
1084//   `T::MaybeUninit::LAYOUT` are equal, `T` and `T::MaybeUninit` have the same:
1085//   - Fixed prefix size
1086//   - Alignment
1087//   - (For DSTs) trailing slice element size
1088// - By consequence of the above, referents `T::MaybeUninit` and `T` have the
1089//   require the same kind of pointer metadata, and thus it is valid to perform
1090//   an `as` cast from `*mut T` and `*mut T::MaybeUninit`, and this operation
1091//   preserves referent size (ie, `size_of_val_raw`).
1092const _: () = unsafe {
1093    unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T::MaybeUninit)] MaybeUninit<T>)
1094};
1095
1096// FIXME(#196, #2856): Eventually, we'll want to support enums variants and
1097// union fields being treated uniformly since they behave similarly to each
1098// other in terms of projecting validity – specifically, for a type `T` with
1099// validity `V`, if `T` is a struct type, then its fields straightforwardly also
1100// have validity `V`. By contrast, if `T` is an enum or union type, then
1101// validity is not straightforwardly recursive in this way.
1102#[doc(hidden)]
1103pub const STRUCT_VARIANT_ID: i128 = -1;
1104#[doc(hidden)]
1105pub const UNION_VARIANT_ID: i128 = -2;
1106
1107/// Projects a given field from `Self`.
1108///
1109/// All implementations of `HasField` for a particular field `f` in `Self`
1110/// should use the same `Field` type; this ensures that `Field` is inferable
1111/// given an explicit `VARIANT_ID` and `FIELD_ID`.
1112///
1113/// # Safety
1114///
1115/// A field `f` is `HasField` for `Self` if and only if:
1116///
1117/// - If `Self` is a struct or union type, then `VARIANT_ID` is
1118///   `STRUCT_VARIANT_ID` or `UNION_VARIANT_ID` respectively; otherwise, if
1119///   `Self` is an enum type, `VARIANT_ID` is the numerical index of the enum
1120///   variant in which `f` appears.
1121/// - If `f` has name `n`, `FIELD_ID` is `zerocopy::ident_id!(n)`; otherwise,
1122///   if `f` is at index `i`, `FIELD_ID` is `zerocopy::ident_id!(i)`.
1123/// - `Field` is a type with the same visibility as `f`.
1124/// - `Type` has the same type as `f`.
1125///
1126/// The caller must **not** assume that a pointer's referent being aligned
1127/// implies that calling `project` on that pointer will result in a pointer to
1128/// an aligned referent. For example, `HasField` may be implemented for
1129/// `#[repr(packed)]` structs.
1130///
1131/// The implementation of `project` must satisfy its safety post-condition.
1132#[doc(hidden)]
1133pub unsafe trait HasField<Field, const VARIANT_ID: i128, const FIELD_ID: i128> {
1134    fn only_derive_is_allowed_to_implement_this_trait()
1135    where
1136        Self: Sized;
1137
1138    /// The type of the field.
1139    type Type: ?Sized;
1140
1141    /// Projects from `slf` to the field.
1142    ///
1143    /// Users should generally not call `project` directly, and instead should
1144    /// use high-level APIs like [`PtrInner::project`] or [`Ptr::project`].
1145    ///
1146    /// # Safety
1147    ///
1148    /// The returned pointer refers to a non-strict subset of the bytes of
1149    /// `slf`'s referent, and has the same provenance as `slf`.
1150    #[must_use]
1151    fn project(slf: PtrInner<'_, Self>) -> *mut Self::Type;
1152}
1153
1154/// Analyzes whether a type is [`FromZeros`].
1155///
1156/// This derive analyzes, at compile time, whether the annotated type satisfies
1157/// the [safety conditions] of `FromZeros` and implements `FromZeros` and its
1158/// supertraits if it is sound to do so. This derive can be applied to structs,
1159/// enums, and unions; e.g.:
1160///
1161/// ```
1162/// # use zerocopy_derive::{FromZeros, Immutable};
1163/// #[derive(FromZeros)]
1164/// struct MyStruct {
1165/// # /*
1166///     ...
1167/// # */
1168/// }
1169///
1170/// #[derive(FromZeros)]
1171/// #[repr(u8)]
1172/// enum MyEnum {
1173/// #   Variant0,
1174/// # /*
1175///     ...
1176/// # */
1177/// }
1178///
1179/// #[derive(FromZeros, Immutable)]
1180/// union MyUnion {
1181/// #   variant: u8,
1182/// # /*
1183///     ...
1184/// # */
1185/// }
1186/// ```
1187///
1188/// [safety conditions]: trait@FromZeros#safety
1189///
1190/// # Analysis
1191///
1192/// *This section describes, roughly, the analysis performed by this derive to
1193/// determine whether it is sound to implement `FromZeros` for a given type.
1194/// Unless you are modifying the implementation of this derive, or attempting to
1195/// manually implement `FromZeros` for a type yourself, you don't need to read
1196/// this section.*
1197///
1198/// If a type has the following properties, then this derive can implement
1199/// `FromZeros` for that type:
1200///
1201/// - If the type is a struct, all of its fields must be `FromZeros`.
1202/// - If the type is an enum:
1203///   - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
1204///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
1205///   - It must have a variant with a discriminant/tag of `0`, and its fields
1206///     must be `FromZeros`. See [the reference] for a description of
1207///     discriminant values are specified.
1208///   - The fields of that variant must be `FromZeros`.
1209///
1210/// This analysis is subject to change. Unsafe code may *only* rely on the
1211/// documented [safety conditions] of `FromZeros`, and must *not* rely on the
1212/// implementation details of this derive.
1213///
1214/// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
1215///
1216/// ## Why isn't an explicit representation required for structs?
1217///
1218/// Neither this derive, nor the [safety conditions] of `FromZeros`, requires
1219/// that structs are marked with `#[repr(C)]`.
1220///
1221/// Per the [Rust reference](reference),
1222///
1223/// > The representation of a type can change the padding between fields, but
1224/// > does not change the layout of the fields themselves.
1225///
1226/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
1227///
1228/// Since the layout of structs only consists of padding bytes and field bytes,
1229/// a struct is soundly `FromZeros` if:
1230/// 1. its padding is soundly `FromZeros`, and
1231/// 2. its fields are soundly `FromZeros`.
1232///
1233/// The answer to the first question is always yes: padding bytes do not have
1234/// any validity constraints. A [discussion] of this question in the Unsafe Code
1235/// Guidelines Working Group concluded that it would be virtually unimaginable
1236/// for future versions of rustc to add validity constraints to padding bytes.
1237///
1238/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
1239///
1240/// Whether a struct is soundly `FromZeros` therefore solely depends on whether
1241/// its fields are `FromZeros`.
1242// FIXME(#146): Document why we don't require an enum to have an explicit `repr`
1243// attribute.
1244#[cfg(any(feature = "derive", test))]
1245#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1246pub use zerocopy_derive::FromZeros;
1247/// Analyzes whether a type is [`Immutable`].
1248///
1249/// This derive analyzes, at compile time, whether the annotated type satisfies
1250/// the [safety conditions] of `Immutable` and implements `Immutable` if it is
1251/// sound to do so. This derive can be applied to structs, enums, and unions;
1252/// e.g.:
1253///
1254/// ```
1255/// # use zerocopy_derive::Immutable;
1256/// #[derive(Immutable)]
1257/// struct MyStruct {
1258/// # /*
1259///     ...
1260/// # */
1261/// }
1262///
1263/// #[derive(Immutable)]
1264/// enum MyEnum {
1265/// #   Variant0,
1266/// # /*
1267///     ...
1268/// # */
1269/// }
1270///
1271/// #[derive(Immutable)]
1272/// union MyUnion {
1273/// #   variant: u8,
1274/// # /*
1275///     ...
1276/// # */
1277/// }
1278/// ```
1279///
1280/// # Analysis
1281///
1282/// *This section describes, roughly, the analysis performed by this derive to
1283/// determine whether it is sound to implement `Immutable` for a given type.
1284/// Unless you are modifying the implementation of this derive, you don't need
1285/// to read this section.*
1286///
1287/// If a type has the following properties, then this derive can implement
1288/// `Immutable` for that type:
1289///
1290/// - All fields must be `Immutable`.
1291///
1292/// This analysis is subject to change. Unsafe code may *only* rely on the
1293/// documented [safety conditions] of `Immutable`, and must *not* rely on the
1294/// implementation details of this derive.
1295///
1296/// [safety conditions]: trait@Immutable#safety
1297#[cfg(any(feature = "derive", test))]
1298#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1299pub use zerocopy_derive::Immutable;
1300
1301/// Types which are free from interior mutability.
1302///
1303/// `T: Immutable` indicates that `T` does not permit interior mutation, except
1304/// by ownership or an exclusive (`&mut`) borrow.
1305///
1306/// # Implementation
1307///
1308/// **Do not implement this trait yourself!** Instead, use
1309/// [`#[derive(Immutable)]`][derive] (requires the `derive` Cargo feature);
1310/// e.g.:
1311///
1312/// ```
1313/// # use zerocopy_derive::Immutable;
1314/// #[derive(Immutable)]
1315/// struct MyStruct {
1316/// # /*
1317///     ...
1318/// # */
1319/// }
1320///
1321/// #[derive(Immutable)]
1322/// enum MyEnum {
1323/// # /*
1324///     ...
1325/// # */
1326/// }
1327///
1328/// #[derive(Immutable)]
1329/// union MyUnion {
1330/// #   variant: u8,
1331/// # /*
1332///     ...
1333/// # */
1334/// }
1335/// ```
1336///
1337/// This derive performs a sophisticated, compile-time safety analysis to
1338/// determine whether a type is `Immutable`.
1339///
1340/// # Safety
1341///
1342/// Unsafe code outside of this crate must not make any assumptions about `T`
1343/// based on `T: Immutable`. We reserve the right to relax the requirements for
1344/// `Immutable` in the future, and if unsafe code outside of this crate makes
1345/// assumptions based on `T: Immutable`, future relaxations may cause that code
1346/// to become unsound.
1347///
1348// # Safety (Internal)
1349//
1350// If `T: Immutable`, unsafe code *inside of this crate* may assume that, given
1351// `t: &T`, `t` does not contain any [`UnsafeCell`]s at any byte location
1352// within the byte range addressed by `t`. This includes ranges of length 0
1353// (e.g., `UnsafeCell<()>` and `[UnsafeCell<u8>; 0]`). If a type implements
1354// `Immutable` which violates this assumptions, it may cause this crate to
1355// exhibit [undefined behavior].
1356//
1357// [`UnsafeCell`]: core::cell::UnsafeCell
1358// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1359#[cfg_attr(
1360    feature = "derive",
1361    doc = "[derive]: zerocopy_derive::Immutable",
1362    doc = "[derive-analysis]: zerocopy_derive::Immutable#analysis"
1363)]
1364#[cfg_attr(
1365    not(feature = "derive"),
1366    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html"),
1367    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Immutable.html#analysis"),
1368)]
1369#[cfg_attr(
1370    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
1371    diagnostic::on_unimplemented(note = "Consider adding `#[derive(Immutable)]` to `{Self}`")
1372)]
1373pub unsafe trait Immutable {
1374    // The `Self: Sized` bound makes it so that `Immutable` is still object
1375    // safe.
1376    #[doc(hidden)]
1377    fn only_derive_is_allowed_to_implement_this_trait()
1378    where
1379        Self: Sized;
1380}
1381
1382/// Implements [`TryFromBytes`].
1383///
1384/// This derive synthesizes the runtime checks required to check whether a
1385/// sequence of initialized bytes corresponds to a valid instance of a type.
1386/// This derive can be applied to structs, enums, and unions; e.g.:
1387///
1388/// ```
1389/// # use zerocopy_derive::{TryFromBytes, Immutable};
1390/// #[derive(TryFromBytes)]
1391/// struct MyStruct {
1392/// # /*
1393///     ...
1394/// # */
1395/// }
1396///
1397/// #[derive(TryFromBytes)]
1398/// #[repr(u8)]
1399/// enum MyEnum {
1400/// #   V00,
1401/// # /*
1402///     ...
1403/// # */
1404/// }
1405///
1406/// #[derive(TryFromBytes, Immutable)]
1407/// union MyUnion {
1408/// #   variant: u8,
1409/// # /*
1410///     ...
1411/// # */
1412/// }
1413/// ```
1414///
1415/// # Portability
1416///
1417/// To ensure consistent endianness for enums with multi-byte representations,
1418/// explicitly specify and convert each discriminant using `.to_le()` or
1419/// `.to_be()`; e.g.:
1420///
1421/// ```
1422/// # use zerocopy_derive::TryFromBytes;
1423/// // `DataStoreVersion` is encoded in little-endian.
1424/// #[derive(TryFromBytes)]
1425/// #[repr(u32)]
1426/// pub enum DataStoreVersion {
1427///     /// Version 1 of the data store.
1428///     V1 = 9u32.to_le(),
1429///
1430///     /// Version 2 of the data store.
1431///     V2 = 10u32.to_le(),
1432/// }
1433/// ```
1434///
1435/// [safety conditions]: trait@TryFromBytes#safety
1436#[cfg(any(feature = "derive", test))]
1437#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1438pub use zerocopy_derive::TryFromBytes;
1439
1440/// Types for which some bit patterns are valid.
1441///
1442/// A memory region of the appropriate length which contains initialized bytes
1443/// can be viewed as a `TryFromBytes` type so long as the runtime value of those
1444/// bytes corresponds to a [*valid instance*] of that type. For example,
1445/// [`bool`] is `TryFromBytes`, so zerocopy can transmute a [`u8`] into a
1446/// [`bool`] so long as it first checks that the value of the [`u8`] is `0` or
1447/// `1`.
1448///
1449/// # Implementation
1450///
1451/// **Do not implement this trait yourself!** Instead, use
1452/// [`#[derive(TryFromBytes)]`][derive]; e.g.:
1453///
1454/// ```
1455/// # use zerocopy_derive::{TryFromBytes, Immutable};
1456/// #[derive(TryFromBytes)]
1457/// struct MyStruct {
1458/// # /*
1459///     ...
1460/// # */
1461/// }
1462///
1463/// #[derive(TryFromBytes)]
1464/// #[repr(u8)]
1465/// enum MyEnum {
1466/// #   V00,
1467/// # /*
1468///     ...
1469/// # */
1470/// }
1471///
1472/// #[derive(TryFromBytes, Immutable)]
1473/// union MyUnion {
1474/// #   variant: u8,
1475/// # /*
1476///     ...
1477/// # */
1478/// }
1479/// ```
1480///
1481/// This derive ensures that the runtime check of whether bytes correspond to a
1482/// valid instance is sound. You **must** implement this trait via the derive.
1483///
1484/// # What is a "valid instance"?
1485///
1486/// In Rust, each type has *bit validity*, which refers to the set of bit
1487/// patterns which may appear in an instance of that type. It is impossible for
1488/// safe Rust code to produce values which violate bit validity (ie, values
1489/// outside of the "valid" set of bit patterns). If `unsafe` code produces an
1490/// invalid value, this is considered [undefined behavior].
1491///
1492/// Rust's bit validity rules are currently being decided, which means that some
1493/// types have three classes of bit patterns: those which are definitely valid,
1494/// and whose validity is documented in the language; those which may or may not
1495/// be considered valid at some point in the future; and those which are
1496/// definitely invalid.
1497///
1498/// Zerocopy takes a conservative approach, and only considers a bit pattern to
1499/// be valid if its validity is a documented guarantee provided by the
1500/// language.
1501///
1502/// For most use cases, Rust's current guarantees align with programmers'
1503/// intuitions about what ought to be valid. As a result, zerocopy's
1504/// conservatism should not affect most users.
1505///
1506/// If you are negatively affected by lack of support for a particular type,
1507/// we encourage you to let us know by [filing an issue][github-repo].
1508///
1509/// # `TryFromBytes` is not symmetrical with [`IntoBytes`]
1510///
1511/// There are some types which implement both `TryFromBytes` and [`IntoBytes`],
1512/// but for which `TryFromBytes` is not guaranteed to accept all byte sequences
1513/// produced by `IntoBytes`. In other words, for some `T: TryFromBytes +
1514/// IntoBytes`, there exist values of `t: T` such that
1515/// `TryFromBytes::try_ref_from_bytes(t.as_bytes()) == None`. Code should not
1516/// generally assume that values produced by `IntoBytes` will necessarily be
1517/// accepted as valid by `TryFromBytes`.
1518///
1519/// # Safety
1520///
1521/// On its own, `T: TryFromBytes` does not make any guarantees about the layout
1522/// or representation of `T`. It merely provides the ability to perform a
1523/// validity check at runtime via methods like [`try_ref_from_bytes`].
1524///
1525/// You must not rely on the `#[doc(hidden)]` internals of `TryFromBytes`.
1526/// Future releases of zerocopy may make backwards-breaking changes to these
1527/// items, including changes that only affect soundness, which may cause code
1528/// which uses those items to silently become unsound.
1529///
1530/// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1531/// [github-repo]: https://github.com/google/zerocopy
1532/// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
1533/// [*valid instance*]: #what-is-a-valid-instance
1534#[cfg_attr(feature = "derive", doc = "[derive]: zerocopy_derive::TryFromBytes")]
1535#[cfg_attr(
1536    not(feature = "derive"),
1537    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.TryFromBytes.html"),
1538)]
1539#[cfg_attr(
1540    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
1541    diagnostic::on_unimplemented(note = "Consider adding `#[derive(TryFromBytes)]` to `{Self}`")
1542)]
1543pub unsafe trait TryFromBytes {
1544    // The `Self: Sized` bound makes it so that `TryFromBytes` is still object
1545    // safe.
1546    #[doc(hidden)]
1547    fn only_derive_is_allowed_to_implement_this_trait()
1548    where
1549        Self: Sized;
1550
1551    /// Does a given memory range contain a valid instance of `Self`?
1552    ///
1553    /// # Safety
1554    ///
1555    /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true,
1556    /// `*candidate` contains a valid `Self`.
1557    ///
1558    /// # Panics
1559    ///
1560    /// `is_bit_valid` may panic. Callers are responsible for ensuring that any
1561    /// `unsafe` code remains sound even in the face of `is_bit_valid`
1562    /// panicking. (We support user-defined validation routines; so long as
1563    /// these routines are not required to be `unsafe`, there is no way to
1564    /// ensure that these do not generate panics.)
1565    ///
1566    /// Besides user-defined validation routines panicking, `is_bit_valid` will
1567    /// either panic or fail to compile if called on a pointer with [`Shared`]
1568    /// aliasing when `Self: !Immutable`.
1569    ///
1570    /// [`UnsafeCell`]: core::cell::UnsafeCell
1571    /// [`Shared`]: invariant::Shared
1572    #[doc(hidden)]
1573    fn is_bit_valid<A: invariant::Reference>(candidate: Maybe<'_, Self, A>) -> bool;
1574
1575    /// Attempts to interpret the given `source` as a `&Self`.
1576    ///
1577    /// If the bytes of `source` are a valid instance of `Self`, this method
1578    /// returns a reference to those bytes interpreted as a `Self`. If the
1579    /// length of `source` is not a [valid size of `Self`][valid-size], or if
1580    /// `source` is not appropriately aligned, or if `source` is not a valid
1581    /// instance of `Self`, this returns `Err`. If [`Self:
1582    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1583    /// error][ConvertError::from].
1584    ///
1585    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1586    ///
1587    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1588    /// [self-unaligned]: Unaligned
1589    /// [slice-dst]: KnownLayout#dynamically-sized-types
1590    ///
1591    /// # Compile-Time Assertions
1592    ///
1593    /// This method cannot yet be used on unsized types whose dynamically-sized
1594    /// component is zero-sized. Attempting to use this method on such types
1595    /// results in a compile-time assertion error; e.g.:
1596    ///
1597    /// ```compile_fail,E0080
1598    /// use zerocopy::*;
1599    /// # use zerocopy_derive::*;
1600    ///
1601    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1602    /// #[repr(C)]
1603    /// struct ZSTy {
1604    ///     leading_sized: u16,
1605    ///     trailing_dst: [()],
1606    /// }
1607    ///
1608    /// let _ = ZSTy::try_ref_from_bytes(0u16.as_bytes()); // âš  Compile Error!
1609    /// ```
1610    ///
1611    /// # Examples
1612    ///
1613    /// ```
1614    /// use zerocopy::TryFromBytes;
1615    /// # use zerocopy_derive::*;
1616    ///
1617    /// // The only valid value of this type is the byte `0xC0`
1618    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1619    /// #[repr(u8)]
1620    /// enum C0 { xC0 = 0xC0 }
1621    ///
1622    /// // The only valid value of this type is the byte sequence `0xC0C0`.
1623    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1624    /// #[repr(C)]
1625    /// struct C0C0(C0, C0);
1626    ///
1627    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1628    /// #[repr(C)]
1629    /// struct Packet {
1630    ///     magic_number: C0C0,
1631    ///     mug_size: u8,
1632    ///     temperature: u8,
1633    ///     marshmallows: [[u8; 2]],
1634    /// }
1635    ///
1636    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1637    ///
1638    /// let packet = Packet::try_ref_from_bytes(bytes).unwrap();
1639    ///
1640    /// assert_eq!(packet.mug_size, 240);
1641    /// assert_eq!(packet.temperature, 77);
1642    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1643    ///
1644    /// // These bytes are not valid instance of `Packet`.
1645    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1646    /// assert!(Packet::try_ref_from_bytes(bytes).is_err());
1647    /// ```
1648    #[must_use = "has no side effects"]
1649    #[inline]
1650    fn try_ref_from_bytes(source: &[u8]) -> Result<&Self, TryCastError<&[u8], Self>>
1651    where
1652        Self: KnownLayout + Immutable,
1653    {
1654        static_assert_dst_is_not_zst!(Self);
1655        match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(None) {
1656            Ok(source) => {
1657                // This call may panic. If that happens, it doesn't cause any soundness
1658                // issues, as we have not generated any invalid state which we need to
1659                // fix before returning.
1660                //
1661                // Note that one panic or post-monomorphization error condition is
1662                // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
1663                // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
1664                // condition will not happen.
1665                match source.try_into_valid() {
1666                    Ok(valid) => Ok(valid.as_ref()),
1667                    Err(e) => {
1668                        Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
1669                    }
1670                }
1671            }
1672            Err(e) => Err(e.map_src(Ptr::as_ref).into()),
1673        }
1674    }
1675
1676    /// Attempts to interpret the prefix of the given `source` as a `&Self`.
1677    ///
1678    /// This method computes the [largest possible size of `Self`][valid-size]
1679    /// that can fit in the leading bytes of `source`. If that prefix is a valid
1680    /// instance of `Self`, this method returns a reference to those bytes
1681    /// interpreted as `Self`, and a reference to the remaining bytes. If there
1682    /// are insufficient bytes, or if `source` is not appropriately aligned, or
1683    /// if those bytes are not a valid instance of `Self`, this returns `Err`.
1684    /// If [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
1685    /// alignment error][ConvertError::from].
1686    ///
1687    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1688    ///
1689    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1690    /// [self-unaligned]: Unaligned
1691    /// [slice-dst]: KnownLayout#dynamically-sized-types
1692    ///
1693    /// # Compile-Time Assertions
1694    ///
1695    /// This method cannot yet be used on unsized types whose dynamically-sized
1696    /// component is zero-sized. Attempting to use this method on such types
1697    /// results in a compile-time assertion error; e.g.:
1698    ///
1699    /// ```compile_fail,E0080
1700    /// use zerocopy::*;
1701    /// # use zerocopy_derive::*;
1702    ///
1703    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1704    /// #[repr(C)]
1705    /// struct ZSTy {
1706    ///     leading_sized: u16,
1707    ///     trailing_dst: [()],
1708    /// }
1709    ///
1710    /// let _ = ZSTy::try_ref_from_prefix(0u16.as_bytes()); // âš  Compile Error!
1711    /// ```
1712    ///
1713    /// # Examples
1714    ///
1715    /// ```
1716    /// use zerocopy::TryFromBytes;
1717    /// # use zerocopy_derive::*;
1718    ///
1719    /// // The only valid value of this type is the byte `0xC0`
1720    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1721    /// #[repr(u8)]
1722    /// enum C0 { xC0 = 0xC0 }
1723    ///
1724    /// // The only valid value of this type is the bytes `0xC0C0`.
1725    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1726    /// #[repr(C)]
1727    /// struct C0C0(C0, C0);
1728    ///
1729    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1730    /// #[repr(C)]
1731    /// struct Packet {
1732    ///     magic_number: C0C0,
1733    ///     mug_size: u8,
1734    ///     temperature: u8,
1735    ///     marshmallows: [[u8; 2]],
1736    /// }
1737    ///
1738    /// // These are more bytes than are needed to encode a `Packet`.
1739    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1740    ///
1741    /// let (packet, suffix) = Packet::try_ref_from_prefix(bytes).unwrap();
1742    ///
1743    /// assert_eq!(packet.mug_size, 240);
1744    /// assert_eq!(packet.temperature, 77);
1745    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1746    /// assert_eq!(suffix, &[6u8][..]);
1747    ///
1748    /// // These bytes are not valid instance of `Packet`.
1749    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1750    /// assert!(Packet::try_ref_from_prefix(bytes).is_err());
1751    /// ```
1752    #[must_use = "has no side effects"]
1753    #[inline]
1754    fn try_ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
1755    where
1756        Self: KnownLayout + Immutable,
1757    {
1758        static_assert_dst_is_not_zst!(Self);
1759        try_ref_from_prefix_suffix(source, CastType::Prefix, None)
1760    }
1761
1762    /// Attempts to interpret the suffix of the given `source` as a `&Self`.
1763    ///
1764    /// This method computes the [largest possible size of `Self`][valid-size]
1765    /// that can fit in the trailing bytes of `source`. If that suffix is a
1766    /// valid instance of `Self`, this method returns a reference to those bytes
1767    /// interpreted as `Self`, and a reference to the preceding bytes. If there
1768    /// are insufficient bytes, or if the suffix of `source` would not be
1769    /// appropriately aligned, or if the suffix is not a valid instance of
1770    /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
1771    /// can [infallibly discard the alignment error][ConvertError::from].
1772    ///
1773    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1774    ///
1775    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1776    /// [self-unaligned]: Unaligned
1777    /// [slice-dst]: KnownLayout#dynamically-sized-types
1778    ///
1779    /// # Compile-Time Assertions
1780    ///
1781    /// This method cannot yet be used on unsized types whose dynamically-sized
1782    /// component is zero-sized. Attempting to use this method on such types
1783    /// results in a compile-time assertion error; e.g.:
1784    ///
1785    /// ```compile_fail,E0080
1786    /// use zerocopy::*;
1787    /// # use zerocopy_derive::*;
1788    ///
1789    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
1790    /// #[repr(C)]
1791    /// struct ZSTy {
1792    ///     leading_sized: u16,
1793    ///     trailing_dst: [()],
1794    /// }
1795    ///
1796    /// let _ = ZSTy::try_ref_from_suffix(0u16.as_bytes()); // âš  Compile Error!
1797    /// ```
1798    ///
1799    /// # Examples
1800    ///
1801    /// ```
1802    /// use zerocopy::TryFromBytes;
1803    /// # use zerocopy_derive::*;
1804    ///
1805    /// // The only valid value of this type is the byte `0xC0`
1806    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1807    /// #[repr(u8)]
1808    /// enum C0 { xC0 = 0xC0 }
1809    ///
1810    /// // The only valid value of this type is the bytes `0xC0C0`.
1811    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1812    /// #[repr(C)]
1813    /// struct C0C0(C0, C0);
1814    ///
1815    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
1816    /// #[repr(C)]
1817    /// struct Packet {
1818    ///     magic_number: C0C0,
1819    ///     mug_size: u8,
1820    ///     temperature: u8,
1821    ///     marshmallows: [[u8; 2]],
1822    /// }
1823    ///
1824    /// // These are more bytes than are needed to encode a `Packet`.
1825    /// let bytes = &[0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
1826    ///
1827    /// let (prefix, packet) = Packet::try_ref_from_suffix(bytes).unwrap();
1828    ///
1829    /// assert_eq!(packet.mug_size, 240);
1830    /// assert_eq!(packet.temperature, 77);
1831    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
1832    /// assert_eq!(prefix, &[0u8][..]);
1833    ///
1834    /// // These bytes are not valid instance of `Packet`.
1835    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
1836    /// assert!(Packet::try_ref_from_suffix(bytes).is_err());
1837    /// ```
1838    #[must_use = "has no side effects"]
1839    #[inline]
1840    fn try_ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
1841    where
1842        Self: KnownLayout + Immutable,
1843    {
1844        static_assert_dst_is_not_zst!(Self);
1845        try_ref_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
1846    }
1847
1848    /// Attempts to interpret the given `source` as a `&mut Self` without
1849    /// copying.
1850    ///
1851    /// If the bytes of `source` are a valid instance of `Self`, this method
1852    /// returns a reference to those bytes interpreted as a `Self`. If the
1853    /// length of `source` is not a [valid size of `Self`][valid-size], or if
1854    /// `source` is not appropriately aligned, or if `source` is not a valid
1855    /// instance of `Self`, this returns `Err`. If [`Self:
1856    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
1857    /// error][ConvertError::from].
1858    ///
1859    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1860    ///
1861    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1862    /// [self-unaligned]: Unaligned
1863    /// [slice-dst]: KnownLayout#dynamically-sized-types
1864    ///
1865    /// # Compile-Time Assertions
1866    ///
1867    /// This method cannot yet be used on unsized types whose dynamically-sized
1868    /// component is zero-sized. Attempting to use this method on such types
1869    /// results in a compile-time assertion error; e.g.:
1870    ///
1871    /// ```compile_fail,E0080
1872    /// use zerocopy::*;
1873    /// # use zerocopy_derive::*;
1874    ///
1875    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1876    /// #[repr(C, packed)]
1877    /// struct ZSTy {
1878    ///     leading_sized: [u8; 2],
1879    ///     trailing_dst: [()],
1880    /// }
1881    ///
1882    /// let mut source = [85, 85];
1883    /// let _ = ZSTy::try_mut_from_bytes(&mut source[..]); // âš  Compile Error!
1884    /// ```
1885    ///
1886    /// # Examples
1887    ///
1888    /// ```
1889    /// use zerocopy::TryFromBytes;
1890    /// # use zerocopy_derive::*;
1891    ///
1892    /// // The only valid value of this type is the byte `0xC0`
1893    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1894    /// #[repr(u8)]
1895    /// enum C0 { xC0 = 0xC0 }
1896    ///
1897    /// // The only valid value of this type is the bytes `0xC0C0`.
1898    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1899    /// #[repr(C)]
1900    /// struct C0C0(C0, C0);
1901    ///
1902    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1903    /// #[repr(C, packed)]
1904    /// struct Packet {
1905    ///     magic_number: C0C0,
1906    ///     mug_size: u8,
1907    ///     temperature: u8,
1908    ///     marshmallows: [[u8; 2]],
1909    /// }
1910    ///
1911    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5][..];
1912    ///
1913    /// let packet = Packet::try_mut_from_bytes(bytes).unwrap();
1914    ///
1915    /// assert_eq!(packet.mug_size, 240);
1916    /// assert_eq!(packet.temperature, 77);
1917    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
1918    ///
1919    /// packet.temperature = 111;
1920    ///
1921    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5]);
1922    ///
1923    /// // These bytes are not valid instance of `Packet`.
1924    /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
1925    /// assert!(Packet::try_mut_from_bytes(bytes).is_err());
1926    /// ```
1927    #[must_use = "has no side effects"]
1928    #[inline]
1929    fn try_mut_from_bytes(bytes: &mut [u8]) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
1930    where
1931        Self: KnownLayout + IntoBytes,
1932    {
1933        static_assert_dst_is_not_zst!(Self);
1934        match Ptr::from_mut(bytes).try_cast_into_no_leftover::<Self, BecauseExclusive>(None) {
1935            Ok(source) => {
1936                // This call may panic. If that happens, it doesn't cause any soundness
1937                // issues, as we have not generated any invalid state which we need to
1938                // fix before returning.
1939                //
1940                // Note that one panic or post-monomorphization error condition is
1941                // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
1942                // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
1943                // condition will not happen.
1944                match source.try_into_valid() {
1945                    Ok(source) => Ok(source.as_mut()),
1946                    Err(e) => {
1947                        Err(e.map_src(|src| src.as_bytes::<BecauseExclusive>().as_mut()).into())
1948                    }
1949                }
1950            }
1951            Err(e) => Err(e.map_src(Ptr::as_mut).into()),
1952        }
1953    }
1954
1955    /// Attempts to interpret the prefix of the given `source` as a `&mut
1956    /// Self`.
1957    ///
1958    /// This method computes the [largest possible size of `Self`][valid-size]
1959    /// that can fit in the leading bytes of `source`. If that prefix is a valid
1960    /// instance of `Self`, this method returns a reference to those bytes
1961    /// interpreted as `Self`, and a reference to the remaining bytes. If there
1962    /// are insufficient bytes, or if `source` is not appropriately aligned, or
1963    /// if the bytes are not a valid instance of `Self`, this returns `Err`. If
1964    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
1965    /// alignment error][ConvertError::from].
1966    ///
1967    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
1968    ///
1969    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
1970    /// [self-unaligned]: Unaligned
1971    /// [slice-dst]: KnownLayout#dynamically-sized-types
1972    ///
1973    /// # Compile-Time Assertions
1974    ///
1975    /// This method cannot yet be used on unsized types whose dynamically-sized
1976    /// component is zero-sized. Attempting to use this method on such types
1977    /// results in a compile-time assertion error; e.g.:
1978    ///
1979    /// ```compile_fail,E0080
1980    /// use zerocopy::*;
1981    /// # use zerocopy_derive::*;
1982    ///
1983    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
1984    /// #[repr(C, packed)]
1985    /// struct ZSTy {
1986    ///     leading_sized: [u8; 2],
1987    ///     trailing_dst: [()],
1988    /// }
1989    ///
1990    /// let mut source = [85, 85];
1991    /// let _ = ZSTy::try_mut_from_prefix(&mut source[..]); // âš  Compile Error!
1992    /// ```
1993    ///
1994    /// # Examples
1995    ///
1996    /// ```
1997    /// use zerocopy::TryFromBytes;
1998    /// # use zerocopy_derive::*;
1999    ///
2000    /// // The only valid value of this type is the byte `0xC0`
2001    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2002    /// #[repr(u8)]
2003    /// enum C0 { xC0 = 0xC0 }
2004    ///
2005    /// // The only valid value of this type is the bytes `0xC0C0`.
2006    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2007    /// #[repr(C)]
2008    /// struct C0C0(C0, C0);
2009    ///
2010    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2011    /// #[repr(C, packed)]
2012    /// struct Packet {
2013    ///     magic_number: C0C0,
2014    ///     mug_size: u8,
2015    ///     temperature: u8,
2016    ///     marshmallows: [[u8; 2]],
2017    /// }
2018    ///
2019    /// // These are more bytes than are needed to encode a `Packet`.
2020    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2021    ///
2022    /// let (packet, suffix) = Packet::try_mut_from_prefix(bytes).unwrap();
2023    ///
2024    /// assert_eq!(packet.mug_size, 240);
2025    /// assert_eq!(packet.temperature, 77);
2026    /// assert_eq!(packet.marshmallows, [[0, 1], [2, 3], [4, 5]]);
2027    /// assert_eq!(suffix, &[6u8][..]);
2028    ///
2029    /// packet.temperature = 111;
2030    /// suffix[0] = 222;
2031    ///
2032    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 0, 1, 2, 3, 4, 5, 222]);
2033    ///
2034    /// // These bytes are not valid instance of `Packet`.
2035    /// let bytes = &mut [0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2036    /// assert!(Packet::try_mut_from_prefix(bytes).is_err());
2037    /// ```
2038    #[must_use = "has no side effects"]
2039    #[inline]
2040    fn try_mut_from_prefix(
2041        source: &mut [u8],
2042    ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
2043    where
2044        Self: KnownLayout + IntoBytes,
2045    {
2046        static_assert_dst_is_not_zst!(Self);
2047        try_mut_from_prefix_suffix(source, CastType::Prefix, None)
2048    }
2049
2050    /// Attempts to interpret the suffix of the given `source` as a `&mut
2051    /// Self`.
2052    ///
2053    /// This method computes the [largest possible size of `Self`][valid-size]
2054    /// that can fit in the trailing bytes of `source`. If that suffix is a
2055    /// valid instance of `Self`, this method returns a reference to those bytes
2056    /// interpreted as `Self`, and a reference to the preceding bytes. If there
2057    /// are insufficient bytes, or if the suffix of `source` would not be
2058    /// appropriately aligned, or if the suffix is not a valid instance of
2059    /// `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned], you
2060    /// can [infallibly discard the alignment error][ConvertError::from].
2061    ///
2062    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
2063    ///
2064    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
2065    /// [self-unaligned]: Unaligned
2066    /// [slice-dst]: KnownLayout#dynamically-sized-types
2067    ///
2068    /// # Compile-Time Assertions
2069    ///
2070    /// This method cannot yet be used on unsized types whose dynamically-sized
2071    /// component is zero-sized. Attempting to use this method on such types
2072    /// results in a compile-time assertion error; e.g.:
2073    ///
2074    /// ```compile_fail,E0080
2075    /// use zerocopy::*;
2076    /// # use zerocopy_derive::*;
2077    ///
2078    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2079    /// #[repr(C, packed)]
2080    /// struct ZSTy {
2081    ///     leading_sized: u16,
2082    ///     trailing_dst: [()],
2083    /// }
2084    ///
2085    /// let mut source = [85, 85];
2086    /// let _ = ZSTy::try_mut_from_suffix(&mut source[..]); // âš  Compile Error!
2087    /// ```
2088    ///
2089    /// # Examples
2090    ///
2091    /// ```
2092    /// use zerocopy::TryFromBytes;
2093    /// # use zerocopy_derive::*;
2094    ///
2095    /// // The only valid value of this type is the byte `0xC0`
2096    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2097    /// #[repr(u8)]
2098    /// enum C0 { xC0 = 0xC0 }
2099    ///
2100    /// // The only valid value of this type is the bytes `0xC0C0`.
2101    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2102    /// #[repr(C)]
2103    /// struct C0C0(C0, C0);
2104    ///
2105    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2106    /// #[repr(C, packed)]
2107    /// struct Packet {
2108    ///     magic_number: C0C0,
2109    ///     mug_size: u8,
2110    ///     temperature: u8,
2111    ///     marshmallows: [[u8; 2]],
2112    /// }
2113    ///
2114    /// // These are more bytes than are needed to encode a `Packet`.
2115    /// let bytes = &mut [0, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2116    ///
2117    /// let (prefix, packet) = Packet::try_mut_from_suffix(bytes).unwrap();
2118    ///
2119    /// assert_eq!(packet.mug_size, 240);
2120    /// assert_eq!(packet.temperature, 77);
2121    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2122    /// assert_eq!(prefix, &[0u8][..]);
2123    ///
2124    /// prefix[0] = 111;
2125    /// packet.temperature = 222;
2126    ///
2127    /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2128    ///
2129    /// // These bytes are not valid instance of `Packet`.
2130    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0x10][..];
2131    /// assert!(Packet::try_mut_from_suffix(bytes).is_err());
2132    /// ```
2133    #[must_use = "has no side effects"]
2134    #[inline]
2135    fn try_mut_from_suffix(
2136        source: &mut [u8],
2137    ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2138    where
2139        Self: KnownLayout + IntoBytes,
2140    {
2141        static_assert_dst_is_not_zst!(Self);
2142        try_mut_from_prefix_suffix(source, CastType::Suffix, None).map(swap)
2143    }
2144
2145    /// Attempts to interpret the given `source` as a `&Self` with a DST length
2146    /// equal to `count`.
2147    ///
2148    /// This method attempts to return a reference to `source` interpreted as a
2149    /// `Self` with `count` trailing elements. If the length of `source` is not
2150    /// equal to the size of `Self` with `count` elements, if `source` is not
2151    /// appropriately aligned, or if `source` does not contain a valid instance
2152    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2153    /// you can [infallibly discard the alignment error][ConvertError::from].
2154    ///
2155    /// [self-unaligned]: Unaligned
2156    /// [slice-dst]: KnownLayout#dynamically-sized-types
2157    ///
2158    /// # Examples
2159    ///
2160    /// ```
2161    /// # #![allow(non_camel_case_types)] // For C0::xC0
2162    /// use zerocopy::TryFromBytes;
2163    /// # use zerocopy_derive::*;
2164    ///
2165    /// // The only valid value of this type is the byte `0xC0`
2166    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2167    /// #[repr(u8)]
2168    /// enum C0 { xC0 = 0xC0 }
2169    ///
2170    /// // The only valid value of this type is the bytes `0xC0C0`.
2171    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2172    /// #[repr(C)]
2173    /// struct C0C0(C0, C0);
2174    ///
2175    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2176    /// #[repr(C)]
2177    /// struct Packet {
2178    ///     magic_number: C0C0,
2179    ///     mug_size: u8,
2180    ///     temperature: u8,
2181    ///     marshmallows: [[u8; 2]],
2182    /// }
2183    ///
2184    /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2185    ///
2186    /// let packet = Packet::try_ref_from_bytes_with_elems(bytes, 3).unwrap();
2187    ///
2188    /// assert_eq!(packet.mug_size, 240);
2189    /// assert_eq!(packet.temperature, 77);
2190    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2191    ///
2192    /// // These bytes are not valid instance of `Packet`.
2193    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2194    /// assert!(Packet::try_ref_from_bytes_with_elems(bytes, 3).is_err());
2195    /// ```
2196    ///
2197    /// Since an explicit `count` is provided, this method supports types with
2198    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_bytes`]
2199    /// which do not take an explicit count do not support such types.
2200    ///
2201    /// ```
2202    /// use core::num::NonZeroU16;
2203    /// use zerocopy::*;
2204    /// # use zerocopy_derive::*;
2205    ///
2206    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2207    /// #[repr(C)]
2208    /// struct ZSTy {
2209    ///     leading_sized: NonZeroU16,
2210    ///     trailing_dst: [()],
2211    /// }
2212    ///
2213    /// let src = 0xCAFEu16.as_bytes();
2214    /// let zsty = ZSTy::try_ref_from_bytes_with_elems(src, 42).unwrap();
2215    /// assert_eq!(zsty.trailing_dst.len(), 42);
2216    /// ```
2217    ///
2218    /// [`try_ref_from_bytes`]: TryFromBytes::try_ref_from_bytes
2219    #[must_use = "has no side effects"]
2220    #[inline]
2221    fn try_ref_from_bytes_with_elems(
2222        source: &[u8],
2223        count: usize,
2224    ) -> Result<&Self, TryCastError<&[u8], Self>>
2225    where
2226        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2227    {
2228        match Ptr::from_ref(source).try_cast_into_no_leftover::<Self, BecauseImmutable>(Some(count))
2229        {
2230            Ok(source) => {
2231                // This call may panic. If that happens, it doesn't cause any soundness
2232                // issues, as we have not generated any invalid state which we need to
2233                // fix before returning.
2234                //
2235                // Note that one panic or post-monomorphization error condition is
2236                // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2237                // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2238                // condition will not happen.
2239                match source.try_into_valid() {
2240                    Ok(source) => Ok(source.as_ref()),
2241                    Err(e) => {
2242                        Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into())
2243                    }
2244                }
2245            }
2246            Err(e) => Err(e.map_src(Ptr::as_ref).into()),
2247        }
2248    }
2249
2250    /// Attempts to interpret the prefix of the given `source` as a `&Self` with
2251    /// a DST length equal to `count`.
2252    ///
2253    /// This method attempts to return a reference to the prefix of `source`
2254    /// interpreted as a `Self` with `count` trailing elements, and a reference
2255    /// to the remaining bytes. If the length of `source` is less than the size
2256    /// of `Self` with `count` elements, if `source` is not appropriately
2257    /// aligned, or if the prefix of `source` does not contain a valid instance
2258    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2259    /// you can [infallibly discard the alignment error][ConvertError::from].
2260    ///
2261    /// [self-unaligned]: Unaligned
2262    /// [slice-dst]: KnownLayout#dynamically-sized-types
2263    ///
2264    /// # Examples
2265    ///
2266    /// ```
2267    /// # #![allow(non_camel_case_types)] // For C0::xC0
2268    /// use zerocopy::TryFromBytes;
2269    /// # use zerocopy_derive::*;
2270    ///
2271    /// // The only valid value of this type is the byte `0xC0`
2272    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2273    /// #[repr(u8)]
2274    /// enum C0 { xC0 = 0xC0 }
2275    ///
2276    /// // The only valid value of this type is the bytes `0xC0C0`.
2277    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2278    /// #[repr(C)]
2279    /// struct C0C0(C0, C0);
2280    ///
2281    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2282    /// #[repr(C)]
2283    /// struct Packet {
2284    ///     magic_number: C0C0,
2285    ///     mug_size: u8,
2286    ///     temperature: u8,
2287    ///     marshmallows: [[u8; 2]],
2288    /// }
2289    ///
2290    /// let bytes = &[0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2291    ///
2292    /// let (packet, suffix) = Packet::try_ref_from_prefix_with_elems(bytes, 3).unwrap();
2293    ///
2294    /// assert_eq!(packet.mug_size, 240);
2295    /// assert_eq!(packet.temperature, 77);
2296    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2297    /// assert_eq!(suffix, &[8u8][..]);
2298    ///
2299    /// // These bytes are not valid instance of `Packet`.
2300    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2301    /// assert!(Packet::try_ref_from_prefix_with_elems(bytes, 3).is_err());
2302    /// ```
2303    ///
2304    /// Since an explicit `count` is provided, this method supports types with
2305    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2306    /// which do not take an explicit count do not support such types.
2307    ///
2308    /// ```
2309    /// use core::num::NonZeroU16;
2310    /// use zerocopy::*;
2311    /// # use zerocopy_derive::*;
2312    ///
2313    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2314    /// #[repr(C)]
2315    /// struct ZSTy {
2316    ///     leading_sized: NonZeroU16,
2317    ///     trailing_dst: [()],
2318    /// }
2319    ///
2320    /// let src = 0xCAFEu16.as_bytes();
2321    /// let (zsty, _) = ZSTy::try_ref_from_prefix_with_elems(src, 42).unwrap();
2322    /// assert_eq!(zsty.trailing_dst.len(), 42);
2323    /// ```
2324    ///
2325    /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2326    #[must_use = "has no side effects"]
2327    #[inline]
2328    fn try_ref_from_prefix_with_elems(
2329        source: &[u8],
2330        count: usize,
2331    ) -> Result<(&Self, &[u8]), TryCastError<&[u8], Self>>
2332    where
2333        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2334    {
2335        try_ref_from_prefix_suffix(source, CastType::Prefix, Some(count))
2336    }
2337
2338    /// Attempts to interpret the suffix of the given `source` as a `&Self` with
2339    /// a DST length equal to `count`.
2340    ///
2341    /// This method attempts to return a reference to the suffix of `source`
2342    /// interpreted as a `Self` with `count` trailing elements, and a reference
2343    /// to the preceding bytes. If the length of `source` is less than the size
2344    /// of `Self` with `count` elements, if the suffix of `source` is not
2345    /// appropriately aligned, or if the suffix of `source` does not contain a
2346    /// valid instance of `Self`, this returns `Err`. If [`Self:
2347    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2348    /// error][ConvertError::from].
2349    ///
2350    /// [self-unaligned]: Unaligned
2351    /// [slice-dst]: KnownLayout#dynamically-sized-types
2352    ///
2353    /// # Examples
2354    ///
2355    /// ```
2356    /// # #![allow(non_camel_case_types)] // For C0::xC0
2357    /// use zerocopy::TryFromBytes;
2358    /// # use zerocopy_derive::*;
2359    ///
2360    /// // The only valid value of this type is the byte `0xC0`
2361    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2362    /// #[repr(u8)]
2363    /// enum C0 { xC0 = 0xC0 }
2364    ///
2365    /// // The only valid value of this type is the bytes `0xC0C0`.
2366    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2367    /// #[repr(C)]
2368    /// struct C0C0(C0, C0);
2369    ///
2370    /// #[derive(TryFromBytes, KnownLayout, Immutable)]
2371    /// #[repr(C)]
2372    /// struct Packet {
2373    ///     magic_number: C0C0,
2374    ///     mug_size: u8,
2375    ///     temperature: u8,
2376    ///     marshmallows: [[u8; 2]],
2377    /// }
2378    ///
2379    /// let bytes = &[123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2380    ///
2381    /// let (prefix, packet) = Packet::try_ref_from_suffix_with_elems(bytes, 3).unwrap();
2382    ///
2383    /// assert_eq!(packet.mug_size, 240);
2384    /// assert_eq!(packet.temperature, 77);
2385    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2386    /// assert_eq!(prefix, &[123u8][..]);
2387    ///
2388    /// // These bytes are not valid instance of `Packet`.
2389    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2390    /// assert!(Packet::try_ref_from_suffix_with_elems(bytes, 3).is_err());
2391    /// ```
2392    ///
2393    /// Since an explicit `count` is provided, this method supports types with
2394    /// zero-sized trailing slice elements. Methods such as [`try_ref_from_prefix`]
2395    /// which do not take an explicit count do not support such types.
2396    ///
2397    /// ```
2398    /// use core::num::NonZeroU16;
2399    /// use zerocopy::*;
2400    /// # use zerocopy_derive::*;
2401    ///
2402    /// #[derive(TryFromBytes, Immutable, KnownLayout)]
2403    /// #[repr(C)]
2404    /// struct ZSTy {
2405    ///     leading_sized: NonZeroU16,
2406    ///     trailing_dst: [()],
2407    /// }
2408    ///
2409    /// let src = 0xCAFEu16.as_bytes();
2410    /// let (_, zsty) = ZSTy::try_ref_from_suffix_with_elems(src, 42).unwrap();
2411    /// assert_eq!(zsty.trailing_dst.len(), 42);
2412    /// ```
2413    ///
2414    /// [`try_ref_from_prefix`]: TryFromBytes::try_ref_from_prefix
2415    #[must_use = "has no side effects"]
2416    #[inline]
2417    fn try_ref_from_suffix_with_elems(
2418        source: &[u8],
2419        count: usize,
2420    ) -> Result<(&[u8], &Self), TryCastError<&[u8], Self>>
2421    where
2422        Self: KnownLayout<PointerMetadata = usize> + Immutable,
2423    {
2424        try_ref_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2425    }
2426
2427    /// Attempts to interpret the given `source` as a `&mut Self` with a DST
2428    /// length equal to `count`.
2429    ///
2430    /// This method attempts to return a reference to `source` interpreted as a
2431    /// `Self` with `count` trailing elements. If the length of `source` is not
2432    /// equal to the size of `Self` with `count` elements, if `source` is not
2433    /// appropriately aligned, or if `source` does not contain a valid instance
2434    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2435    /// you can [infallibly discard the alignment error][ConvertError::from].
2436    ///
2437    /// [self-unaligned]: Unaligned
2438    /// [slice-dst]: KnownLayout#dynamically-sized-types
2439    ///
2440    /// # Examples
2441    ///
2442    /// ```
2443    /// # #![allow(non_camel_case_types)] // For C0::xC0
2444    /// use zerocopy::TryFromBytes;
2445    /// # use zerocopy_derive::*;
2446    ///
2447    /// // The only valid value of this type is the byte `0xC0`
2448    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2449    /// #[repr(u8)]
2450    /// enum C0 { xC0 = 0xC0 }
2451    ///
2452    /// // The only valid value of this type is the bytes `0xC0C0`.
2453    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2454    /// #[repr(C)]
2455    /// struct C0C0(C0, C0);
2456    ///
2457    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2458    /// #[repr(C, packed)]
2459    /// struct Packet {
2460    ///     magic_number: C0C0,
2461    ///     mug_size: u8,
2462    ///     temperature: u8,
2463    ///     marshmallows: [[u8; 2]],
2464    /// }
2465    ///
2466    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2467    ///
2468    /// let packet = Packet::try_mut_from_bytes_with_elems(bytes, 3).unwrap();
2469    ///
2470    /// assert_eq!(packet.mug_size, 240);
2471    /// assert_eq!(packet.temperature, 77);
2472    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2473    ///
2474    /// packet.temperature = 111;
2475    ///
2476    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7]);
2477    ///
2478    /// // These bytes are not valid instance of `Packet`.
2479    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 77, 240, 0xC0, 0xC0][..];
2480    /// assert!(Packet::try_mut_from_bytes_with_elems(bytes, 3).is_err());
2481    /// ```
2482    ///
2483    /// Since an explicit `count` is provided, this method supports types with
2484    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_bytes`]
2485    /// which do not take an explicit count do not support such types.
2486    ///
2487    /// ```
2488    /// use core::num::NonZeroU16;
2489    /// use zerocopy::*;
2490    /// # use zerocopy_derive::*;
2491    ///
2492    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2493    /// #[repr(C, packed)]
2494    /// struct ZSTy {
2495    ///     leading_sized: NonZeroU16,
2496    ///     trailing_dst: [()],
2497    /// }
2498    ///
2499    /// let mut src = 0xCAFEu16;
2500    /// let src = src.as_mut_bytes();
2501    /// let zsty = ZSTy::try_mut_from_bytes_with_elems(src, 42).unwrap();
2502    /// assert_eq!(zsty.trailing_dst.len(), 42);
2503    /// ```
2504    ///
2505    /// [`try_mut_from_bytes`]: TryFromBytes::try_mut_from_bytes
2506    #[must_use = "has no side effects"]
2507    #[inline]
2508    fn try_mut_from_bytes_with_elems(
2509        source: &mut [u8],
2510        count: usize,
2511    ) -> Result<&mut Self, TryCastError<&mut [u8], Self>>
2512    where
2513        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2514    {
2515        match Ptr::from_mut(source).try_cast_into_no_leftover::<Self, BecauseExclusive>(Some(count))
2516        {
2517            Ok(source) => {
2518                // This call may panic. If that happens, it doesn't cause any soundness
2519                // issues, as we have not generated any invalid state which we need to
2520                // fix before returning.
2521                //
2522                // Note that one panic or post-monomorphization error condition is
2523                // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2524                // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2525                // condition will not happen.
2526                match source.try_into_valid() {
2527                    Ok(source) => Ok(source.as_mut()),
2528                    Err(e) => {
2529                        Err(e.map_src(|src| src.as_bytes::<BecauseExclusive>().as_mut()).into())
2530                    }
2531                }
2532            }
2533            Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2534        }
2535    }
2536
2537    /// Attempts to interpret the prefix of the given `source` as a `&mut Self`
2538    /// with a DST length equal to `count`.
2539    ///
2540    /// This method attempts to return a reference to the prefix of `source`
2541    /// interpreted as a `Self` with `count` trailing elements, and a reference
2542    /// to the remaining bytes. If the length of `source` is less than the size
2543    /// of `Self` with `count` elements, if `source` is not appropriately
2544    /// aligned, or if the prefix of `source` does not contain a valid instance
2545    /// of `Self`, this returns `Err`. If [`Self: Unaligned`][self-unaligned],
2546    /// you can [infallibly discard the alignment error][ConvertError::from].
2547    ///
2548    /// [self-unaligned]: Unaligned
2549    /// [slice-dst]: KnownLayout#dynamically-sized-types
2550    ///
2551    /// # Examples
2552    ///
2553    /// ```
2554    /// # #![allow(non_camel_case_types)] // For C0::xC0
2555    /// use zerocopy::TryFromBytes;
2556    /// # use zerocopy_derive::*;
2557    ///
2558    /// // The only valid value of this type is the byte `0xC0`
2559    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2560    /// #[repr(u8)]
2561    /// enum C0 { xC0 = 0xC0 }
2562    ///
2563    /// // The only valid value of this type is the bytes `0xC0C0`.
2564    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2565    /// #[repr(C)]
2566    /// struct C0C0(C0, C0);
2567    ///
2568    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2569    /// #[repr(C, packed)]
2570    /// struct Packet {
2571    ///     magic_number: C0C0,
2572    ///     mug_size: u8,
2573    ///     temperature: u8,
2574    ///     marshmallows: [[u8; 2]],
2575    /// }
2576    ///
2577    /// let bytes = &mut [0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7, 8][..];
2578    ///
2579    /// let (packet, suffix) = Packet::try_mut_from_prefix_with_elems(bytes, 3).unwrap();
2580    ///
2581    /// assert_eq!(packet.mug_size, 240);
2582    /// assert_eq!(packet.temperature, 77);
2583    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2584    /// assert_eq!(suffix, &[8u8][..]);
2585    ///
2586    /// packet.temperature = 111;
2587    /// suffix[0] = 222;
2588    ///
2589    /// assert_eq!(bytes, [0xC0, 0xC0, 240, 111, 2, 3, 4, 5, 6, 7, 222]);
2590    ///
2591    /// // These bytes are not valid instance of `Packet`.
2592    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2593    /// assert!(Packet::try_mut_from_prefix_with_elems(bytes, 3).is_err());
2594    /// ```
2595    ///
2596    /// Since an explicit `count` is provided, this method supports types with
2597    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2598    /// which do not take an explicit count do not support such types.
2599    ///
2600    /// ```
2601    /// use core::num::NonZeroU16;
2602    /// use zerocopy::*;
2603    /// # use zerocopy_derive::*;
2604    ///
2605    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2606    /// #[repr(C, packed)]
2607    /// struct ZSTy {
2608    ///     leading_sized: NonZeroU16,
2609    ///     trailing_dst: [()],
2610    /// }
2611    ///
2612    /// let mut src = 0xCAFEu16;
2613    /// let src = src.as_mut_bytes();
2614    /// let (zsty, _) = ZSTy::try_mut_from_prefix_with_elems(src, 42).unwrap();
2615    /// assert_eq!(zsty.trailing_dst.len(), 42);
2616    /// ```
2617    ///
2618    /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2619    #[must_use = "has no side effects"]
2620    #[inline]
2621    fn try_mut_from_prefix_with_elems(
2622        source: &mut [u8],
2623        count: usize,
2624    ) -> Result<(&mut Self, &mut [u8]), TryCastError<&mut [u8], Self>>
2625    where
2626        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2627    {
2628        try_mut_from_prefix_suffix(source, CastType::Prefix, Some(count))
2629    }
2630
2631    /// Attempts to interpret the suffix of the given `source` as a `&mut Self`
2632    /// with a DST length equal to `count`.
2633    ///
2634    /// This method attempts to return a reference to the suffix of `source`
2635    /// interpreted as a `Self` with `count` trailing elements, and a reference
2636    /// to the preceding bytes. If the length of `source` is less than the size
2637    /// of `Self` with `count` elements, if the suffix of `source` is not
2638    /// appropriately aligned, or if the suffix of `source` does not contain a
2639    /// valid instance of `Self`, this returns `Err`. If [`Self:
2640    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
2641    /// error][ConvertError::from].
2642    ///
2643    /// [self-unaligned]: Unaligned
2644    /// [slice-dst]: KnownLayout#dynamically-sized-types
2645    ///
2646    /// # Examples
2647    ///
2648    /// ```
2649    /// # #![allow(non_camel_case_types)] // For C0::xC0
2650    /// use zerocopy::TryFromBytes;
2651    /// # use zerocopy_derive::*;
2652    ///
2653    /// // The only valid value of this type is the byte `0xC0`
2654    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2655    /// #[repr(u8)]
2656    /// enum C0 { xC0 = 0xC0 }
2657    ///
2658    /// // The only valid value of this type is the bytes `0xC0C0`.
2659    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2660    /// #[repr(C)]
2661    /// struct C0C0(C0, C0);
2662    ///
2663    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2664    /// #[repr(C, packed)]
2665    /// struct Packet {
2666    ///     magic_number: C0C0,
2667    ///     mug_size: u8,
2668    ///     temperature: u8,
2669    ///     marshmallows: [[u8; 2]],
2670    /// }
2671    ///
2672    /// let bytes = &mut [123, 0xC0, 0xC0, 240, 77, 2, 3, 4, 5, 6, 7][..];
2673    ///
2674    /// let (prefix, packet) = Packet::try_mut_from_suffix_with_elems(bytes, 3).unwrap();
2675    ///
2676    /// assert_eq!(packet.mug_size, 240);
2677    /// assert_eq!(packet.temperature, 77);
2678    /// assert_eq!(packet.marshmallows, [[2, 3], [4, 5], [6, 7]]);
2679    /// assert_eq!(prefix, &[123u8][..]);
2680    ///
2681    /// prefix[0] = 111;
2682    /// packet.temperature = 222;
2683    ///
2684    /// assert_eq!(bytes, [111, 0xC0, 0xC0, 240, 222, 2, 3, 4, 5, 6, 7]);
2685    ///
2686    /// // These bytes are not valid instance of `Packet`.
2687    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 77, 240, 0xC0, 0xC0][..];
2688    /// assert!(Packet::try_mut_from_suffix_with_elems(bytes, 3).is_err());
2689    /// ```
2690    ///
2691    /// Since an explicit `count` is provided, this method supports types with
2692    /// zero-sized trailing slice elements. Methods such as [`try_mut_from_prefix`]
2693    /// which do not take an explicit count do not support such types.
2694    ///
2695    /// ```
2696    /// use core::num::NonZeroU16;
2697    /// use zerocopy::*;
2698    /// # use zerocopy_derive::*;
2699    ///
2700    /// #[derive(TryFromBytes, IntoBytes, KnownLayout)]
2701    /// #[repr(C, packed)]
2702    /// struct ZSTy {
2703    ///     leading_sized: NonZeroU16,
2704    ///     trailing_dst: [()],
2705    /// }
2706    ///
2707    /// let mut src = 0xCAFEu16;
2708    /// let src = src.as_mut_bytes();
2709    /// let (_, zsty) = ZSTy::try_mut_from_suffix_with_elems(src, 42).unwrap();
2710    /// assert_eq!(zsty.trailing_dst.len(), 42);
2711    /// ```
2712    ///
2713    /// [`try_mut_from_prefix`]: TryFromBytes::try_mut_from_prefix
2714    #[must_use = "has no side effects"]
2715    #[inline]
2716    fn try_mut_from_suffix_with_elems(
2717        source: &mut [u8],
2718        count: usize,
2719    ) -> Result<(&mut [u8], &mut Self), TryCastError<&mut [u8], Self>>
2720    where
2721        Self: KnownLayout<PointerMetadata = usize> + IntoBytes,
2722    {
2723        try_mut_from_prefix_suffix(source, CastType::Suffix, Some(count)).map(swap)
2724    }
2725
2726    /// Attempts to read the given `source` as a `Self`.
2727    ///
2728    /// If `source.len() != size_of::<Self>()` or the bytes are not a valid
2729    /// instance of `Self`, this returns `Err`.
2730    ///
2731    /// # Examples
2732    ///
2733    /// ```
2734    /// use zerocopy::TryFromBytes;
2735    /// # use zerocopy_derive::*;
2736    ///
2737    /// // The only valid value of this type is the byte `0xC0`
2738    /// #[derive(TryFromBytes)]
2739    /// #[repr(u8)]
2740    /// enum C0 { xC0 = 0xC0 }
2741    ///
2742    /// // The only valid value of this type is the bytes `0xC0C0`.
2743    /// #[derive(TryFromBytes)]
2744    /// #[repr(C)]
2745    /// struct C0C0(C0, C0);
2746    ///
2747    /// #[derive(TryFromBytes)]
2748    /// #[repr(C)]
2749    /// struct Packet {
2750    ///     magic_number: C0C0,
2751    ///     mug_size: u8,
2752    ///     temperature: u8,
2753    /// }
2754    ///
2755    /// let bytes = &[0xC0, 0xC0, 240, 77][..];
2756    ///
2757    /// let packet = Packet::try_read_from_bytes(bytes).unwrap();
2758    ///
2759    /// assert_eq!(packet.mug_size, 240);
2760    /// assert_eq!(packet.temperature, 77);
2761    ///
2762    /// // These bytes are not valid instance of `Packet`.
2763    /// let bytes = &mut [0x10, 0xC0, 240, 77][..];
2764    /// assert!(Packet::try_read_from_bytes(bytes).is_err());
2765    /// ```
2766    #[must_use = "has no side effects"]
2767    #[inline]
2768    fn try_read_from_bytes(source: &[u8]) -> Result<Self, TryReadError<&[u8], Self>>
2769    where
2770        Self: Sized,
2771    {
2772        let candidate = match CoreMaybeUninit::<Self>::read_from_bytes(source) {
2773            Ok(candidate) => candidate,
2774            Err(e) => {
2775                return Err(TryReadError::Size(e.with_dst()));
2776            }
2777        };
2778        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2779        // its bytes are initialized.
2780        unsafe { try_read_from(source, candidate) }
2781    }
2782
2783    /// Attempts to read a `Self` from the prefix of the given `source`.
2784    ///
2785    /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
2786    /// of `source`, returning that `Self` and any remaining bytes. If
2787    /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
2788    /// of `Self`, it returns `Err`.
2789    ///
2790    /// # Examples
2791    ///
2792    /// ```
2793    /// use zerocopy::TryFromBytes;
2794    /// # use zerocopy_derive::*;
2795    ///
2796    /// // The only valid value of this type is the byte `0xC0`
2797    /// #[derive(TryFromBytes)]
2798    /// #[repr(u8)]
2799    /// enum C0 { xC0 = 0xC0 }
2800    ///
2801    /// // The only valid value of this type is the bytes `0xC0C0`.
2802    /// #[derive(TryFromBytes)]
2803    /// #[repr(C)]
2804    /// struct C0C0(C0, C0);
2805    ///
2806    /// #[derive(TryFromBytes)]
2807    /// #[repr(C)]
2808    /// struct Packet {
2809    ///     magic_number: C0C0,
2810    ///     mug_size: u8,
2811    ///     temperature: u8,
2812    /// }
2813    ///
2814    /// // These are more bytes than are needed to encode a `Packet`.
2815    /// let bytes = &[0xC0, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2816    ///
2817    /// let (packet, suffix) = Packet::try_read_from_prefix(bytes).unwrap();
2818    ///
2819    /// assert_eq!(packet.mug_size, 240);
2820    /// assert_eq!(packet.temperature, 77);
2821    /// assert_eq!(suffix, &[0u8, 1, 2, 3, 4, 5, 6][..]);
2822    ///
2823    /// // These bytes are not valid instance of `Packet`.
2824    /// let bytes = &[0x10, 0xC0, 240, 77, 0, 1, 2, 3, 4, 5, 6][..];
2825    /// assert!(Packet::try_read_from_prefix(bytes).is_err());
2826    /// ```
2827    #[must_use = "has no side effects"]
2828    #[inline]
2829    fn try_read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), TryReadError<&[u8], Self>>
2830    where
2831        Self: Sized,
2832    {
2833        let (candidate, suffix) = match CoreMaybeUninit::<Self>::read_from_prefix(source) {
2834            Ok(candidate) => candidate,
2835            Err(e) => {
2836                return Err(TryReadError::Size(e.with_dst()));
2837            }
2838        };
2839        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2840        // its bytes are initialized.
2841        unsafe { try_read_from(source, candidate).map(|slf| (slf, suffix)) }
2842    }
2843
2844    /// Attempts to read a `Self` from the suffix of the given `source`.
2845    ///
2846    /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
2847    /// of `source`, returning that `Self` and any preceding bytes. If
2848    /// `source.len() < size_of::<Self>()` or the bytes are not a valid instance
2849    /// of `Self`, it returns `Err`.
2850    ///
2851    /// # Examples
2852    ///
2853    /// ```
2854    /// # #![allow(non_camel_case_types)] // For C0::xC0
2855    /// use zerocopy::TryFromBytes;
2856    /// # use zerocopy_derive::*;
2857    ///
2858    /// // The only valid value of this type is the byte `0xC0`
2859    /// #[derive(TryFromBytes)]
2860    /// #[repr(u8)]
2861    /// enum C0 { xC0 = 0xC0 }
2862    ///
2863    /// // The only valid value of this type is the bytes `0xC0C0`.
2864    /// #[derive(TryFromBytes)]
2865    /// #[repr(C)]
2866    /// struct C0C0(C0, C0);
2867    ///
2868    /// #[derive(TryFromBytes)]
2869    /// #[repr(C)]
2870    /// struct Packet {
2871    ///     magic_number: C0C0,
2872    ///     mug_size: u8,
2873    ///     temperature: u8,
2874    /// }
2875    ///
2876    /// // These are more bytes than are needed to encode a `Packet`.
2877    /// let bytes = &[0, 1, 2, 3, 4, 5, 0xC0, 0xC0, 240, 77][..];
2878    ///
2879    /// let (prefix, packet) = Packet::try_read_from_suffix(bytes).unwrap();
2880    ///
2881    /// assert_eq!(packet.mug_size, 240);
2882    /// assert_eq!(packet.temperature, 77);
2883    /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
2884    ///
2885    /// // These bytes are not valid instance of `Packet`.
2886    /// let bytes = &[0, 1, 2, 3, 4, 5, 0x10, 0xC0, 240, 77][..];
2887    /// assert!(Packet::try_read_from_suffix(bytes).is_err());
2888    /// ```
2889    #[must_use = "has no side effects"]
2890    #[inline]
2891    fn try_read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), TryReadError<&[u8], Self>>
2892    where
2893        Self: Sized,
2894    {
2895        let (prefix, candidate) = match CoreMaybeUninit::<Self>::read_from_suffix(source) {
2896            Ok(candidate) => candidate,
2897            Err(e) => {
2898                return Err(TryReadError::Size(e.with_dst()));
2899            }
2900        };
2901        // SAFETY: `candidate` was copied from from `source: &[u8]`, so all of
2902        // its bytes are initialized.
2903        unsafe { try_read_from(source, candidate).map(|slf| (prefix, slf)) }
2904    }
2905}
2906
2907#[inline(always)]
2908fn try_ref_from_prefix_suffix<T: TryFromBytes + KnownLayout + Immutable + ?Sized>(
2909    source: &[u8],
2910    cast_type: CastType,
2911    meta: Option<T::PointerMetadata>,
2912) -> Result<(&T, &[u8]), TryCastError<&[u8], T>> {
2913    match Ptr::from_ref(source).try_cast_into::<T, BecauseImmutable>(cast_type, meta) {
2914        Ok((source, prefix_suffix)) => {
2915            // This call may panic. If that happens, it doesn't cause any soundness
2916            // issues, as we have not generated any invalid state which we need to
2917            // fix before returning.
2918            //
2919            // Note that one panic or post-monomorphization error condition is
2920            // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2921            // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2922            // condition will not happen.
2923            match source.try_into_valid() {
2924                Ok(valid) => Ok((valid.as_ref(), prefix_suffix.as_ref())),
2925                Err(e) => Err(e.map_src(|src| src.as_bytes::<BecauseImmutable>().as_ref()).into()),
2926            }
2927        }
2928        Err(e) => Err(e.map_src(Ptr::as_ref).into()),
2929    }
2930}
2931
2932#[inline(always)]
2933fn try_mut_from_prefix_suffix<T: IntoBytes + TryFromBytes + KnownLayout + ?Sized>(
2934    candidate: &mut [u8],
2935    cast_type: CastType,
2936    meta: Option<T::PointerMetadata>,
2937) -> Result<(&mut T, &mut [u8]), TryCastError<&mut [u8], T>> {
2938    match Ptr::from_mut(candidate).try_cast_into::<T, BecauseExclusive>(cast_type, meta) {
2939        Ok((candidate, prefix_suffix)) => {
2940            // This call may panic. If that happens, it doesn't cause any soundness
2941            // issues, as we have not generated any invalid state which we need to
2942            // fix before returning.
2943            //
2944            // Note that one panic or post-monomorphization error condition is
2945            // calling `try_into_valid` (and thus `is_bit_valid`) with a shared
2946            // pointer when `Self: !Immutable`. Since `Self: Immutable`, this panic
2947            // condition will not happen.
2948            match candidate.try_into_valid() {
2949                Ok(valid) => Ok((valid.as_mut(), prefix_suffix.as_mut())),
2950                Err(e) => Err(e.map_src(|src| src.as_bytes::<BecauseExclusive>().as_mut()).into()),
2951            }
2952        }
2953        Err(e) => Err(e.map_src(Ptr::as_mut).into()),
2954    }
2955}
2956
2957#[inline(always)]
2958fn swap<T, U>((t, u): (T, U)) -> (U, T) {
2959    (u, t)
2960}
2961
2962/// # Safety
2963///
2964/// All bytes of `candidate` must be initialized.
2965#[inline(always)]
2966unsafe fn try_read_from<S, T: TryFromBytes>(
2967    source: S,
2968    mut candidate: CoreMaybeUninit<T>,
2969) -> Result<T, TryReadError<S, T>> {
2970    // We use `from_mut` despite not mutating via `c_ptr` so that we don't need
2971    // to add a `T: Immutable` bound.
2972    let c_ptr = Ptr::from_mut(&mut candidate);
2973    // SAFETY: `c_ptr` has no uninitialized sub-ranges because it derived from
2974    // `candidate`, which the caller promises is entirely initialized. Since
2975    // `candidate` is a `MaybeUninit`, it has no validity requirements, and so
2976    // no values written to an `Initialized` `c_ptr` can violate its validity.
2977    // Since `c_ptr` has `Exclusive` aliasing, no mutations may happen except
2978    // via `c_ptr` so long as it is live, so we don't need to worry about the
2979    // fact that `c_ptr` may have more restricted validity than `candidate`.
2980    let c_ptr = unsafe { c_ptr.assume_validity::<invariant::Initialized>() };
2981    let c_ptr = c_ptr.transmute();
2982
2983    // Since we don't have `T: KnownLayout`, we hack around that by using
2984    // `Wrapping<T>`, which implements `KnownLayout` even if `T` doesn't.
2985    //
2986    // This call may panic. If that happens, it doesn't cause any soundness
2987    // issues, as we have not generated any invalid state which we need to fix
2988    // before returning.
2989    //
2990    // Note that one panic or post-monomorphization error condition is calling
2991    // `try_into_valid` (and thus `is_bit_valid`) with a shared pointer when
2992    // `Self: !Immutable`. Since `Self: Immutable`, this panic condition will
2993    // not happen.
2994    if !Wrapping::<T>::is_bit_valid(c_ptr.forget_aligned()) {
2995        return Err(ValidityError::new(source).into());
2996    }
2997
2998    fn _assert_same_size_and_validity<T>()
2999    where
3000        Wrapping<T>: pointer::TransmuteFrom<T, invariant::Valid, invariant::Valid>,
3001        T: pointer::TransmuteFrom<Wrapping<T>, invariant::Valid, invariant::Valid>,
3002    {
3003    }
3004
3005    _assert_same_size_and_validity::<T>();
3006
3007    // SAFETY: We just validated that `candidate` contains a valid
3008    // `Wrapping<T>`, which has the same size and bit validity as `T`, as
3009    // guaranteed by the preceding type assertion.
3010    Ok(unsafe { candidate.assume_init() })
3011}
3012
3013/// Types for which a sequence of `0` bytes is a valid instance.
3014///
3015/// Any memory region of the appropriate length which is guaranteed to contain
3016/// only zero bytes can be viewed as any `FromZeros` type with no runtime
3017/// overhead. This is useful whenever memory is known to be in a zeroed state,
3018/// such memory returned from some allocation routines.
3019///
3020/// # Warning: Padding bytes
3021///
3022/// Note that, when a value is moved or copied, only the non-padding bytes of
3023/// that value are guaranteed to be preserved. It is unsound to assume that
3024/// values written to padding bytes are preserved after a move or copy. For more
3025/// details, see the [`FromBytes` docs][frombytes-warning-padding-bytes].
3026///
3027/// [frombytes-warning-padding-bytes]: FromBytes#warning-padding-bytes
3028///
3029/// # Implementation
3030///
3031/// **Do not implement this trait yourself!** Instead, use
3032/// [`#[derive(FromZeros)]`][derive]; e.g.:
3033///
3034/// ```
3035/// # use zerocopy_derive::{FromZeros, Immutable};
3036/// #[derive(FromZeros)]
3037/// struct MyStruct {
3038/// # /*
3039///     ...
3040/// # */
3041/// }
3042///
3043/// #[derive(FromZeros)]
3044/// #[repr(u8)]
3045/// enum MyEnum {
3046/// #   Variant0,
3047/// # /*
3048///     ...
3049/// # */
3050/// }
3051///
3052/// #[derive(FromZeros, Immutable)]
3053/// union MyUnion {
3054/// #   variant: u8,
3055/// # /*
3056///     ...
3057/// # */
3058/// }
3059/// ```
3060///
3061/// This derive performs a sophisticated, compile-time safety analysis to
3062/// determine whether a type is `FromZeros`.
3063///
3064/// # Safety
3065///
3066/// *This section describes what is required in order for `T: FromZeros`, and
3067/// what unsafe code may assume of such types. If you don't plan on implementing
3068/// `FromZeros` manually, and you don't plan on writing unsafe code that
3069/// operates on `FromZeros` types, then you don't need to read this section.*
3070///
3071/// If `T: FromZeros`, then unsafe code may assume that it is sound to produce a
3072/// `T` whose bytes are all initialized to zero. If a type is marked as
3073/// `FromZeros` which violates this contract, it may cause undefined behavior.
3074///
3075/// `#[derive(FromZeros)]` only permits [types which satisfy these
3076/// requirements][derive-analysis].
3077///
3078#[cfg_attr(
3079    feature = "derive",
3080    doc = "[derive]: zerocopy_derive::FromZeros",
3081    doc = "[derive-analysis]: zerocopy_derive::FromZeros#analysis"
3082)]
3083#[cfg_attr(
3084    not(feature = "derive"),
3085    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html"),
3086    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromZeros.html#analysis"),
3087)]
3088#[cfg_attr(
3089    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
3090    diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromZeros)]` to `{Self}`")
3091)]
3092pub unsafe trait FromZeros: TryFromBytes {
3093    // The `Self: Sized` bound makes it so that `FromZeros` is still object
3094    // safe.
3095    #[doc(hidden)]
3096    fn only_derive_is_allowed_to_implement_this_trait()
3097    where
3098        Self: Sized;
3099
3100    /// Overwrites `self` with zeros.
3101    ///
3102    /// Sets every byte in `self` to 0. While this is similar to doing `*self =
3103    /// Self::new_zeroed()`, it differs in that `zero` does not semantically
3104    /// drop the current value and replace it with a new one — it simply
3105    /// modifies the bytes of the existing value.
3106    ///
3107    /// # Examples
3108    ///
3109    /// ```
3110    /// # use zerocopy::FromZeros;
3111    /// # use zerocopy_derive::*;
3112    /// #
3113    /// #[derive(FromZeros)]
3114    /// #[repr(C)]
3115    /// struct PacketHeader {
3116    ///     src_port: [u8; 2],
3117    ///     dst_port: [u8; 2],
3118    ///     length: [u8; 2],
3119    ///     checksum: [u8; 2],
3120    /// }
3121    ///
3122    /// let mut header = PacketHeader {
3123    ///     src_port: 100u16.to_be_bytes(),
3124    ///     dst_port: 200u16.to_be_bytes(),
3125    ///     length: 300u16.to_be_bytes(),
3126    ///     checksum: 400u16.to_be_bytes(),
3127    /// };
3128    ///
3129    /// header.zero();
3130    ///
3131    /// assert_eq!(header.src_port, [0, 0]);
3132    /// assert_eq!(header.dst_port, [0, 0]);
3133    /// assert_eq!(header.length, [0, 0]);
3134    /// assert_eq!(header.checksum, [0, 0]);
3135    /// ```
3136    #[inline(always)]
3137    fn zero(&mut self) {
3138        let slf: *mut Self = self;
3139        let len = mem::size_of_val(self);
3140        // SAFETY:
3141        // - `self` is guaranteed by the type system to be valid for writes of
3142        //   size `size_of_val(self)`.
3143        // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned
3144        //   as required by `u8`.
3145        // - Since `Self: FromZeros`, the all-zeros instance is a valid instance
3146        //   of `Self.`
3147        //
3148        // FIXME(#429): Add references to docs and quotes.
3149        unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) };
3150    }
3151
3152    /// Creates an instance of `Self` from zeroed bytes.
3153    ///
3154    /// # Examples
3155    ///
3156    /// ```
3157    /// # use zerocopy::FromZeros;
3158    /// # use zerocopy_derive::*;
3159    /// #
3160    /// #[derive(FromZeros)]
3161    /// #[repr(C)]
3162    /// struct PacketHeader {
3163    ///     src_port: [u8; 2],
3164    ///     dst_port: [u8; 2],
3165    ///     length: [u8; 2],
3166    ///     checksum: [u8; 2],
3167    /// }
3168    ///
3169    /// let header: PacketHeader = FromZeros::new_zeroed();
3170    ///
3171    /// assert_eq!(header.src_port, [0, 0]);
3172    /// assert_eq!(header.dst_port, [0, 0]);
3173    /// assert_eq!(header.length, [0, 0]);
3174    /// assert_eq!(header.checksum, [0, 0]);
3175    /// ```
3176    #[must_use = "has no side effects"]
3177    #[inline(always)]
3178    fn new_zeroed() -> Self
3179    where
3180        Self: Sized,
3181    {
3182        // SAFETY: `FromZeros` says that the all-zeros bit pattern is legal.
3183        unsafe { mem::zeroed() }
3184    }
3185
3186    /// Creates a `Box<Self>` from zeroed bytes.
3187    ///
3188    /// This function is useful for allocating large values on the heap and
3189    /// zero-initializing them, without ever creating a temporary instance of
3190    /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()`
3191    /// will allocate `[u8; 1048576]` directly on the heap; it does not require
3192    /// storing `[u8; 1048576]` in a temporary variable on the stack.
3193    ///
3194    /// On systems that use a heap implementation that supports allocating from
3195    /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may
3196    /// have performance benefits.
3197    ///
3198    /// # Errors
3199    ///
3200    /// Returns an error on allocation failure. Allocation failure is guaranteed
3201    /// never to cause a panic or an abort.
3202    #[must_use = "has no side effects (other than allocation)"]
3203    #[cfg(any(feature = "alloc", test))]
3204    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3205    #[inline]
3206    fn new_box_zeroed() -> Result<Box<Self>, AllocError>
3207    where
3208        Self: Sized,
3209    {
3210        // If `T` is a ZST, then return a proper boxed instance of it. There is
3211        // no allocation, but `Box` does require a correct dangling pointer.
3212        let layout = Layout::new::<Self>();
3213        if layout.size() == 0 {
3214            // Construct the `Box` from a dangling pointer to avoid calling
3215            // `Self::new_zeroed`. This ensures that stack space is never
3216            // allocated for `Self` even on lower opt-levels where this branch
3217            // might not get optimized out.
3218
3219            // SAFETY: Per [1], when `T` is a ZST, `Box<T>`'s only validity
3220            // requirements are that the pointer is non-null and sufficiently
3221            // aligned. Per [2], `NonNull::dangling` produces a pointer which
3222            // is sufficiently aligned. Since the produced pointer is a
3223            // `NonNull`, it is non-null.
3224            //
3225            // [1] Per https://doc.rust-lang.org/1.81.0/std/boxed/index.html#memory-layout:
3226            //
3227            //   For zero-sized values, the `Box` pointer has to be non-null and sufficiently aligned.
3228            //
3229            // [2] Per https://doc.rust-lang.org/std/ptr/struct.NonNull.html#method.dangling:
3230            //
3231            //   Creates a new `NonNull` that is dangling, but well-aligned.
3232            return Ok(unsafe { Box::from_raw(NonNull::dangling().as_ptr()) });
3233        }
3234
3235        // FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
3236        #[allow(clippy::undocumented_unsafe_blocks)]
3237        let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
3238        if ptr.is_null() {
3239            return Err(AllocError);
3240        }
3241        // FIXME(#429): Add a "SAFETY" comment and remove this `allow`.
3242        #[allow(clippy::undocumented_unsafe_blocks)]
3243        Ok(unsafe { Box::from_raw(ptr) })
3244    }
3245
3246    /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes.
3247    ///
3248    /// This function is useful for allocating large values of `[Self]` on the
3249    /// heap and zero-initializing them, without ever creating a temporary
3250    /// instance of `[Self; _]` on the stack. For example,
3251    /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on
3252    /// the heap; it does not require storing the slice on the stack.
3253    ///
3254    /// On systems that use a heap implementation that supports allocating from
3255    /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance
3256    /// benefits.
3257    ///
3258    /// If `Self` is a zero-sized type, then this function will return a
3259    /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any
3260    /// actual information, but its `len()` property will report the correct
3261    /// value.
3262    ///
3263    /// # Errors
3264    ///
3265    /// Returns an error on allocation failure. Allocation failure is
3266    /// guaranteed never to cause a panic or an abort.
3267    #[must_use = "has no side effects (other than allocation)"]
3268    #[cfg(feature = "alloc")]
3269    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3270    #[inline]
3271    fn new_box_zeroed_with_elems(count: usize) -> Result<Box<Self>, AllocError>
3272    where
3273        Self: KnownLayout<PointerMetadata = usize>,
3274    {
3275        // SAFETY: `alloc::alloc::alloc_zeroed` is a valid argument of
3276        // `new_box`. The referent of the pointer returned by `alloc_zeroed`
3277        // (and, consequently, the `Box` derived from it) is a valid instance of
3278        // `Self`, because `Self` is `FromZeros`.
3279        unsafe { crate::util::new_box(count, alloc::alloc::alloc_zeroed) }
3280    }
3281
3282    #[deprecated(since = "0.8.0", note = "renamed to `FromZeros::new_box_zeroed_with_elems`")]
3283    #[doc(hidden)]
3284    #[cfg(feature = "alloc")]
3285    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3286    #[must_use = "has no side effects (other than allocation)"]
3287    #[inline(always)]
3288    fn new_box_slice_zeroed(len: usize) -> Result<Box<[Self]>, AllocError>
3289    where
3290        Self: Sized,
3291    {
3292        <[Self]>::new_box_zeroed_with_elems(len)
3293    }
3294
3295    /// Creates a `Vec<Self>` from zeroed bytes.
3296    ///
3297    /// This function is useful for allocating large values of `Vec`s and
3298    /// zero-initializing them, without ever creating a temporary instance of
3299    /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For
3300    /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the
3301    /// heap; it does not require storing intermediate values on the stack.
3302    ///
3303    /// On systems that use a heap implementation that supports allocating from
3304    /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits.
3305    ///
3306    /// If `Self` is a zero-sized type, then this function will return a
3307    /// `Vec<Self>` that has the correct `len`. Such a `Vec` cannot contain any
3308    /// actual information, but its `len()` property will report the correct
3309    /// value.
3310    ///
3311    /// # Errors
3312    ///
3313    /// Returns an error on allocation failure. Allocation failure is
3314    /// guaranteed never to cause a panic or an abort.
3315    #[must_use = "has no side effects (other than allocation)"]
3316    #[cfg(feature = "alloc")]
3317    #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3318    #[inline(always)]
3319    fn new_vec_zeroed(len: usize) -> Result<Vec<Self>, AllocError>
3320    where
3321        Self: Sized,
3322    {
3323        <[Self]>::new_box_zeroed_with_elems(len).map(Into::into)
3324    }
3325
3326    /// Extends a `Vec<Self>` by pushing `additional` new items onto the end of
3327    /// the vector. The new items are initialized with zeros.
3328    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
3329    #[cfg(feature = "alloc")]
3330    #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3331    #[inline(always)]
3332    fn extend_vec_zeroed(v: &mut Vec<Self>, additional: usize) -> Result<(), AllocError>
3333    where
3334        Self: Sized,
3335    {
3336        // PANICS: We pass `v.len()` for `position`, so the `position > v.len()`
3337        // panic condition is not satisfied.
3338        <Self as FromZeros>::insert_vec_zeroed(v, v.len(), additional)
3339    }
3340
3341    /// Inserts `additional` new items into `Vec<Self>` at `position`. The new
3342    /// items are initialized with zeros.
3343    ///
3344    /// # Panics
3345    ///
3346    /// Panics if `position > v.len()`.
3347    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
3348    #[cfg(feature = "alloc")]
3349    #[cfg_attr(doc_cfg, doc(cfg(all(rust = "1.57.0", feature = "alloc"))))]
3350    #[inline]
3351    fn insert_vec_zeroed(
3352        v: &mut Vec<Self>,
3353        position: usize,
3354        additional: usize,
3355    ) -> Result<(), AllocError>
3356    where
3357        Self: Sized,
3358    {
3359        assert!(position <= v.len());
3360        // We only conditionally compile on versions on which `try_reserve` is
3361        // stable; the Clippy lint is a false positive.
3362        v.try_reserve(additional).map_err(|_| AllocError)?;
3363        // SAFETY: The `try_reserve` call guarantees that these cannot overflow:
3364        // * `ptr.add(position)`
3365        // * `position + additional`
3366        // * `v.len() + additional`
3367        //
3368        // `v.len() - position` cannot overflow because we asserted that
3369        // `position <= v.len()`.
3370        #[allow(clippy::multiple_unsafe_ops_per_block)]
3371        unsafe {
3372            // This is a potentially overlapping copy.
3373            let ptr = v.as_mut_ptr();
3374            #[allow(clippy::arithmetic_side_effects)]
3375            ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
3376            ptr.add(position).write_bytes(0, additional);
3377            #[allow(clippy::arithmetic_side_effects)]
3378            v.set_len(v.len() + additional);
3379        }
3380
3381        Ok(())
3382    }
3383}
3384
3385/// Analyzes whether a type is [`FromBytes`].
3386///
3387/// This derive analyzes, at compile time, whether the annotated type satisfies
3388/// the [safety conditions] of `FromBytes` and implements `FromBytes` and its
3389/// supertraits if it is sound to do so. This derive can be applied to structs,
3390/// enums, and unions;
3391/// e.g.:
3392///
3393/// ```
3394/// # use zerocopy_derive::{FromBytes, FromZeros, Immutable};
3395/// #[derive(FromBytes)]
3396/// struct MyStruct {
3397/// # /*
3398///     ...
3399/// # */
3400/// }
3401///
3402/// #[derive(FromBytes)]
3403/// #[repr(u8)]
3404/// enum MyEnum {
3405/// #   V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3406/// #   V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3407/// #   V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3408/// #   V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3409/// #   V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3410/// #   V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3411/// #   V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3412/// #   V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3413/// #   V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3414/// #   V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3415/// #   V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3416/// #   VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3417/// #   VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3418/// #   VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3419/// #   VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3420/// #   VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3421/// #   VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3422/// #   VFF,
3423/// # /*
3424///     ...
3425/// # */
3426/// }
3427///
3428/// #[derive(FromBytes, Immutable)]
3429/// union MyUnion {
3430/// #   variant: u8,
3431/// # /*
3432///     ...
3433/// # */
3434/// }
3435/// ```
3436///
3437/// [safety conditions]: trait@FromBytes#safety
3438///
3439/// # Analysis
3440///
3441/// *This section describes, roughly, the analysis performed by this derive to
3442/// determine whether it is sound to implement `FromBytes` for a given type.
3443/// Unless you are modifying the implementation of this derive, or attempting to
3444/// manually implement `FromBytes` for a type yourself, you don't need to read
3445/// this section.*
3446///
3447/// If a type has the following properties, then this derive can implement
3448/// `FromBytes` for that type:
3449///
3450/// - If the type is a struct, all of its fields must be `FromBytes`.
3451/// - If the type is an enum:
3452///   - It must have a defined representation which is one of `u8`, `u16`, `i8`,
3453///     or `i16`.
3454///   - The maximum number of discriminants must be used (so that every possible
3455///     bit pattern is a valid one).
3456///   - Its fields must be `FromBytes`.
3457///
3458/// This analysis is subject to change. Unsafe code may *only* rely on the
3459/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
3460/// implementation details of this derive.
3461///
3462/// ## Why isn't an explicit representation required for structs?
3463///
3464/// Neither this derive, nor the [safety conditions] of `FromBytes`, requires
3465/// that structs are marked with `#[repr(C)]`.
3466///
3467/// Per the [Rust reference](reference),
3468///
3469/// > The representation of a type can change the padding between fields, but
3470/// > does not change the layout of the fields themselves.
3471///
3472/// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
3473///
3474/// Since the layout of structs only consists of padding bytes and field bytes,
3475/// a struct is soundly `FromBytes` if:
3476/// 1. its padding is soundly `FromBytes`, and
3477/// 2. its fields are soundly `FromBytes`.
3478///
3479/// The answer to the first question is always yes: padding bytes do not have
3480/// any validity constraints. A [discussion] of this question in the Unsafe Code
3481/// Guidelines Working Group concluded that it would be virtually unimaginable
3482/// for future versions of rustc to add validity constraints to padding bytes.
3483///
3484/// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
3485///
3486/// Whether a struct is soundly `FromBytes` therefore solely depends on whether
3487/// its fields are `FromBytes`.
3488#[cfg(any(feature = "derive", test))]
3489#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
3490pub use zerocopy_derive::FromBytes;
3491
3492/// Types for which any bit pattern is valid.
3493///
3494/// Any memory region of the appropriate length which contains initialized bytes
3495/// can be viewed as any `FromBytes` type with no runtime overhead. This is
3496/// useful for efficiently parsing bytes as structured data.
3497///
3498/// # Warning: Padding bytes
3499///
3500/// Note that, when a value is moved or copied, only the non-padding bytes of
3501/// that value are guaranteed to be preserved. It is unsound to assume that
3502/// values written to padding bytes are preserved after a move or copy. For
3503/// example, the following is unsound:
3504///
3505/// ```rust,no_run
3506/// use core::mem::{size_of, transmute};
3507/// use zerocopy::FromZeros;
3508/// # use zerocopy_derive::*;
3509///
3510/// // Assume `Foo` is a type with padding bytes.
3511/// #[derive(FromZeros, Default)]
3512/// struct Foo {
3513/// # /*
3514///     ...
3515/// # */
3516/// }
3517///
3518/// let mut foo: Foo = Foo::default();
3519/// FromZeros::zero(&mut foo);
3520/// // UNSOUND: Although `FromZeros::zero` writes zeros to all bytes of `foo`,
3521/// // those writes are not guaranteed to be preserved in padding bytes when
3522/// // `foo` is moved, so this may expose padding bytes as `u8`s.
3523/// let foo_bytes: [u8; size_of::<Foo>()] = unsafe { transmute(foo) };
3524/// ```
3525///
3526/// # Implementation
3527///
3528/// **Do not implement this trait yourself!** Instead, use
3529/// [`#[derive(FromBytes)]`][derive]; e.g.:
3530///
3531/// ```
3532/// # use zerocopy_derive::{FromBytes, Immutable};
3533/// #[derive(FromBytes)]
3534/// struct MyStruct {
3535/// # /*
3536///     ...
3537/// # */
3538/// }
3539///
3540/// #[derive(FromBytes)]
3541/// #[repr(u8)]
3542/// enum MyEnum {
3543/// #   V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
3544/// #   V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
3545/// #   V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
3546/// #   V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
3547/// #   V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
3548/// #   V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
3549/// #   V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
3550/// #   V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
3551/// #   V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
3552/// #   V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
3553/// #   V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
3554/// #   VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
3555/// #   VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
3556/// #   VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
3557/// #   VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
3558/// #   VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
3559/// #   VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
3560/// #   VFF,
3561/// # /*
3562///     ...
3563/// # */
3564/// }
3565///
3566/// #[derive(FromBytes, Immutable)]
3567/// union MyUnion {
3568/// #   variant: u8,
3569/// # /*
3570///     ...
3571/// # */
3572/// }
3573/// ```
3574///
3575/// This derive performs a sophisticated, compile-time safety analysis to
3576/// determine whether a type is `FromBytes`.
3577///
3578/// # Safety
3579///
3580/// *This section describes what is required in order for `T: FromBytes`, and
3581/// what unsafe code may assume of such types. If you don't plan on implementing
3582/// `FromBytes` manually, and you don't plan on writing unsafe code that
3583/// operates on `FromBytes` types, then you don't need to read this section.*
3584///
3585/// If `T: FromBytes`, then unsafe code may assume that it is sound to produce a
3586/// `T` whose bytes are initialized to any sequence of valid `u8`s (in other
3587/// words, any byte value which is not uninitialized). If a type is marked as
3588/// `FromBytes` which violates this contract, it may cause undefined behavior.
3589///
3590/// `#[derive(FromBytes)]` only permits [types which satisfy these
3591/// requirements][derive-analysis].
3592///
3593#[cfg_attr(
3594    feature = "derive",
3595    doc = "[derive]: zerocopy_derive::FromBytes",
3596    doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis"
3597)]
3598#[cfg_attr(
3599    not(feature = "derive"),
3600    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html"),
3601    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.FromBytes.html#analysis"),
3602)]
3603#[cfg_attr(
3604    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
3605    diagnostic::on_unimplemented(note = "Consider adding `#[derive(FromBytes)]` to `{Self}`")
3606)]
3607pub unsafe trait FromBytes: FromZeros {
3608    // The `Self: Sized` bound makes it so that `FromBytes` is still object
3609    // safe.
3610    #[doc(hidden)]
3611    fn only_derive_is_allowed_to_implement_this_trait()
3612    where
3613        Self: Sized;
3614
3615    /// Interprets the given `source` as a `&Self`.
3616    ///
3617    /// This method attempts to return a reference to `source` interpreted as a
3618    /// `Self`. If the length of `source` is not a [valid size of
3619    /// `Self`][valid-size], or if `source` is not appropriately aligned, this
3620    /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
3621    /// [infallibly discard the alignment error][size-error-from].
3622    ///
3623    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3624    ///
3625    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3626    /// [self-unaligned]: Unaligned
3627    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3628    /// [slice-dst]: KnownLayout#dynamically-sized-types
3629    ///
3630    /// # Compile-Time Assertions
3631    ///
3632    /// This method cannot yet be used on unsized types whose dynamically-sized
3633    /// component is zero-sized. Attempting to use this method on such types
3634    /// results in a compile-time assertion error; e.g.:
3635    ///
3636    /// ```compile_fail,E0080
3637    /// use zerocopy::*;
3638    /// # use zerocopy_derive::*;
3639    ///
3640    /// #[derive(FromBytes, Immutable, KnownLayout)]
3641    /// #[repr(C)]
3642    /// struct ZSTy {
3643    ///     leading_sized: u16,
3644    ///     trailing_dst: [()],
3645    /// }
3646    ///
3647    /// let _ = ZSTy::ref_from_bytes(0u16.as_bytes()); // âš  Compile Error!
3648    /// ```
3649    ///
3650    /// # Examples
3651    ///
3652    /// ```
3653    /// use zerocopy::FromBytes;
3654    /// # use zerocopy_derive::*;
3655    ///
3656    /// #[derive(FromBytes, KnownLayout, Immutable)]
3657    /// #[repr(C)]
3658    /// struct PacketHeader {
3659    ///     src_port: [u8; 2],
3660    ///     dst_port: [u8; 2],
3661    ///     length: [u8; 2],
3662    ///     checksum: [u8; 2],
3663    /// }
3664    ///
3665    /// #[derive(FromBytes, KnownLayout, Immutable)]
3666    /// #[repr(C)]
3667    /// struct Packet {
3668    ///     header: PacketHeader,
3669    ///     body: [u8],
3670    /// }
3671    ///
3672    /// // These bytes encode a `Packet`.
3673    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11][..];
3674    ///
3675    /// let packet = Packet::ref_from_bytes(bytes).unwrap();
3676    ///
3677    /// assert_eq!(packet.header.src_port, [0, 1]);
3678    /// assert_eq!(packet.header.dst_port, [2, 3]);
3679    /// assert_eq!(packet.header.length, [4, 5]);
3680    /// assert_eq!(packet.header.checksum, [6, 7]);
3681    /// assert_eq!(packet.body, [8, 9, 10, 11]);
3682    /// ```
3683    #[must_use = "has no side effects"]
3684    #[inline]
3685    fn ref_from_bytes(source: &[u8]) -> Result<&Self, CastError<&[u8], Self>>
3686    where
3687        Self: KnownLayout + Immutable,
3688    {
3689        static_assert_dst_is_not_zst!(Self);
3690        match Ptr::from_ref(source).try_cast_into_no_leftover::<_, BecauseImmutable>(None) {
3691            Ok(ptr) => Ok(ptr.recall_validity().as_ref()),
3692            Err(err) => Err(err.map_src(|src| src.as_ref())),
3693        }
3694    }
3695
3696    /// Interprets the prefix of the given `source` as a `&Self` without
3697    /// copying.
3698    ///
3699    /// This method computes the [largest possible size of `Self`][valid-size]
3700    /// that can fit in the leading bytes of `source`, then attempts to return
3701    /// both a reference to those bytes interpreted as a `Self`, and a reference
3702    /// to the remaining bytes. If there are insufficient bytes, or if `source`
3703    /// is not appropriately aligned, this returns `Err`. If [`Self:
3704    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
3705    /// error][size-error-from].
3706    ///
3707    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3708    ///
3709    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3710    /// [self-unaligned]: Unaligned
3711    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3712    /// [slice-dst]: KnownLayout#dynamically-sized-types
3713    ///
3714    /// # Compile-Time Assertions
3715    ///
3716    /// This method cannot yet be used on unsized types whose dynamically-sized
3717    /// component is zero-sized. See [`ref_from_prefix_with_elems`], which does
3718    /// support such types. Attempting to use this method on such types results
3719    /// in a compile-time assertion error; e.g.:
3720    ///
3721    /// ```compile_fail,E0080
3722    /// use zerocopy::*;
3723    /// # use zerocopy_derive::*;
3724    ///
3725    /// #[derive(FromBytes, Immutable, KnownLayout)]
3726    /// #[repr(C)]
3727    /// struct ZSTy {
3728    ///     leading_sized: u16,
3729    ///     trailing_dst: [()],
3730    /// }
3731    ///
3732    /// let _ = ZSTy::ref_from_prefix(0u16.as_bytes()); // âš  Compile Error!
3733    /// ```
3734    ///
3735    /// [`ref_from_prefix_with_elems`]: FromBytes::ref_from_prefix_with_elems
3736    ///
3737    /// # Examples
3738    ///
3739    /// ```
3740    /// use zerocopy::FromBytes;
3741    /// # use zerocopy_derive::*;
3742    ///
3743    /// #[derive(FromBytes, KnownLayout, Immutable)]
3744    /// #[repr(C)]
3745    /// struct PacketHeader {
3746    ///     src_port: [u8; 2],
3747    ///     dst_port: [u8; 2],
3748    ///     length: [u8; 2],
3749    ///     checksum: [u8; 2],
3750    /// }
3751    ///
3752    /// #[derive(FromBytes, KnownLayout, Immutable)]
3753    /// #[repr(C)]
3754    /// struct Packet {
3755    ///     header: PacketHeader,
3756    ///     body: [[u8; 2]],
3757    /// }
3758    ///
3759    /// // These are more bytes than are needed to encode a `Packet`.
3760    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14][..];
3761    ///
3762    /// let (packet, suffix) = Packet::ref_from_prefix(bytes).unwrap();
3763    ///
3764    /// assert_eq!(packet.header.src_port, [0, 1]);
3765    /// assert_eq!(packet.header.dst_port, [2, 3]);
3766    /// assert_eq!(packet.header.length, [4, 5]);
3767    /// assert_eq!(packet.header.checksum, [6, 7]);
3768    /// assert_eq!(packet.body, [[8, 9], [10, 11], [12, 13]]);
3769    /// assert_eq!(suffix, &[14u8][..]);
3770    /// ```
3771    #[must_use = "has no side effects"]
3772    #[inline]
3773    fn ref_from_prefix(source: &[u8]) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
3774    where
3775        Self: KnownLayout + Immutable,
3776    {
3777        static_assert_dst_is_not_zst!(Self);
3778        ref_from_prefix_suffix(source, None, CastType::Prefix)
3779    }
3780
3781    /// Interprets the suffix of the given bytes as a `&Self`.
3782    ///
3783    /// This method computes the [largest possible size of `Self`][valid-size]
3784    /// that can fit in the trailing bytes of `source`, then attempts to return
3785    /// both a reference to those bytes interpreted as a `Self`, and a reference
3786    /// to the preceding bytes. If there are insufficient bytes, or if that
3787    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
3788    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
3789    /// alignment error][size-error-from].
3790    ///
3791    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3792    ///
3793    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3794    /// [self-unaligned]: Unaligned
3795    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3796    /// [slice-dst]: KnownLayout#dynamically-sized-types
3797    ///
3798    /// # Compile-Time Assertions
3799    ///
3800    /// This method cannot yet be used on unsized types whose dynamically-sized
3801    /// component is zero-sized. See [`ref_from_suffix_with_elems`], which does
3802    /// support such types. Attempting to use this method on such types results
3803    /// in a compile-time assertion error; e.g.:
3804    ///
3805    /// ```compile_fail,E0080
3806    /// use zerocopy::*;
3807    /// # use zerocopy_derive::*;
3808    ///
3809    /// #[derive(FromBytes, Immutable, KnownLayout)]
3810    /// #[repr(C)]
3811    /// struct ZSTy {
3812    ///     leading_sized: u16,
3813    ///     trailing_dst: [()],
3814    /// }
3815    ///
3816    /// let _ = ZSTy::ref_from_suffix(0u16.as_bytes()); // âš  Compile Error!
3817    /// ```
3818    ///
3819    /// [`ref_from_suffix_with_elems`]: FromBytes::ref_from_suffix_with_elems
3820    ///
3821    /// # Examples
3822    ///
3823    /// ```
3824    /// use zerocopy::FromBytes;
3825    /// # use zerocopy_derive::*;
3826    ///
3827    /// #[derive(FromBytes, Immutable, KnownLayout)]
3828    /// #[repr(C)]
3829    /// struct PacketTrailer {
3830    ///     frame_check_sequence: [u8; 4],
3831    /// }
3832    ///
3833    /// // These are more bytes than are needed to encode a `PacketTrailer`.
3834    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
3835    ///
3836    /// let (prefix, trailer) = PacketTrailer::ref_from_suffix(bytes).unwrap();
3837    ///
3838    /// assert_eq!(prefix, &[0, 1, 2, 3, 4, 5][..]);
3839    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
3840    /// ```
3841    #[must_use = "has no side effects"]
3842    #[inline]
3843    fn ref_from_suffix(source: &[u8]) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
3844    where
3845        Self: Immutable + KnownLayout,
3846    {
3847        static_assert_dst_is_not_zst!(Self);
3848        ref_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
3849    }
3850
3851    /// Interprets the given `source` as a `&mut Self`.
3852    ///
3853    /// This method attempts to return a reference to `source` interpreted as a
3854    /// `Self`. If the length of `source` is not a [valid size of
3855    /// `Self`][valid-size], or if `source` is not appropriately aligned, this
3856    /// returns `Err`. If [`Self: Unaligned`][self-unaligned], you can
3857    /// [infallibly discard the alignment error][size-error-from].
3858    ///
3859    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3860    ///
3861    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3862    /// [self-unaligned]: Unaligned
3863    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3864    /// [slice-dst]: KnownLayout#dynamically-sized-types
3865    ///
3866    /// # Compile-Time Assertions
3867    ///
3868    /// This method cannot yet be used on unsized types whose dynamically-sized
3869    /// component is zero-sized. See [`mut_from_prefix_with_elems`], which does
3870    /// support such types. Attempting to use this method on such types results
3871    /// in a compile-time assertion error; e.g.:
3872    ///
3873    /// ```compile_fail,E0080
3874    /// use zerocopy::*;
3875    /// # use zerocopy_derive::*;
3876    ///
3877    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
3878    /// #[repr(C, packed)]
3879    /// struct ZSTy {
3880    ///     leading_sized: [u8; 2],
3881    ///     trailing_dst: [()],
3882    /// }
3883    ///
3884    /// let mut source = [85, 85];
3885    /// let _ = ZSTy::mut_from_bytes(&mut source[..]); // âš  Compile Error!
3886    /// ```
3887    ///
3888    /// [`mut_from_prefix_with_elems`]: FromBytes::mut_from_prefix_with_elems
3889    ///
3890    /// # Examples
3891    ///
3892    /// ```
3893    /// use zerocopy::FromBytes;
3894    /// # use zerocopy_derive::*;
3895    ///
3896    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
3897    /// #[repr(C)]
3898    /// struct PacketHeader {
3899    ///     src_port: [u8; 2],
3900    ///     dst_port: [u8; 2],
3901    ///     length: [u8; 2],
3902    ///     checksum: [u8; 2],
3903    /// }
3904    ///
3905    /// // These bytes encode a `PacketHeader`.
3906    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
3907    ///
3908    /// let header = PacketHeader::mut_from_bytes(bytes).unwrap();
3909    ///
3910    /// assert_eq!(header.src_port, [0, 1]);
3911    /// assert_eq!(header.dst_port, [2, 3]);
3912    /// assert_eq!(header.length, [4, 5]);
3913    /// assert_eq!(header.checksum, [6, 7]);
3914    ///
3915    /// header.checksum = [0, 0];
3916    ///
3917    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]);
3918    /// ```
3919    #[must_use = "has no side effects"]
3920    #[inline]
3921    fn mut_from_bytes(source: &mut [u8]) -> Result<&mut Self, CastError<&mut [u8], Self>>
3922    where
3923        Self: IntoBytes + KnownLayout,
3924    {
3925        static_assert_dst_is_not_zst!(Self);
3926        match Ptr::from_mut(source).try_cast_into_no_leftover::<_, BecauseExclusive>(None) {
3927            Ok(ptr) => Ok(ptr.recall_validity::<_, (_, (_, _))>().as_mut()),
3928            Err(err) => Err(err.map_src(|src| src.as_mut())),
3929        }
3930    }
3931
3932    /// Interprets the prefix of the given `source` as a `&mut Self` without
3933    /// copying.
3934    ///
3935    /// This method computes the [largest possible size of `Self`][valid-size]
3936    /// that can fit in the leading bytes of `source`, then attempts to return
3937    /// both a reference to those bytes interpreted as a `Self`, and a reference
3938    /// to the remaining bytes. If there are insufficient bytes, or if `source`
3939    /// is not appropriately aligned, this returns `Err`. If [`Self:
3940    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
3941    /// error][size-error-from].
3942    ///
3943    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
3944    ///
3945    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
3946    /// [self-unaligned]: Unaligned
3947    /// [size-error-from]: error/struct.SizeError.html#method.from-1
3948    /// [slice-dst]: KnownLayout#dynamically-sized-types
3949    ///
3950    /// # Compile-Time Assertions
3951    ///
3952    /// This method cannot yet be used on unsized types whose dynamically-sized
3953    /// component is zero-sized. See [`mut_from_suffix_with_elems`], which does
3954    /// support such types. Attempting to use this method on such types results
3955    /// in a compile-time assertion error; e.g.:
3956    ///
3957    /// ```compile_fail,E0080
3958    /// use zerocopy::*;
3959    /// # use zerocopy_derive::*;
3960    ///
3961    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
3962    /// #[repr(C, packed)]
3963    /// struct ZSTy {
3964    ///     leading_sized: [u8; 2],
3965    ///     trailing_dst: [()],
3966    /// }
3967    ///
3968    /// let mut source = [85, 85];
3969    /// let _ = ZSTy::mut_from_prefix(&mut source[..]); // âš  Compile Error!
3970    /// ```
3971    ///
3972    /// [`mut_from_suffix_with_elems`]: FromBytes::mut_from_suffix_with_elems
3973    ///
3974    /// # Examples
3975    ///
3976    /// ```
3977    /// use zerocopy::FromBytes;
3978    /// # use zerocopy_derive::*;
3979    ///
3980    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
3981    /// #[repr(C)]
3982    /// struct PacketHeader {
3983    ///     src_port: [u8; 2],
3984    ///     dst_port: [u8; 2],
3985    ///     length: [u8; 2],
3986    ///     checksum: [u8; 2],
3987    /// }
3988    ///
3989    /// // These are more bytes than are needed to encode a `PacketHeader`.
3990    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
3991    ///
3992    /// let (header, body) = PacketHeader::mut_from_prefix(bytes).unwrap();
3993    ///
3994    /// assert_eq!(header.src_port, [0, 1]);
3995    /// assert_eq!(header.dst_port, [2, 3]);
3996    /// assert_eq!(header.length, [4, 5]);
3997    /// assert_eq!(header.checksum, [6, 7]);
3998    /// assert_eq!(body, &[8, 9][..]);
3999    ///
4000    /// header.checksum = [0, 0];
4001    /// body.fill(1);
4002    ///
4003    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 1, 1]);
4004    /// ```
4005    #[must_use = "has no side effects"]
4006    #[inline]
4007    fn mut_from_prefix(
4008        source: &mut [u8],
4009    ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
4010    where
4011        Self: IntoBytes + KnownLayout,
4012    {
4013        static_assert_dst_is_not_zst!(Self);
4014        mut_from_prefix_suffix(source, None, CastType::Prefix)
4015    }
4016
4017    /// Interprets the suffix of the given `source` as a `&mut Self` without
4018    /// copying.
4019    ///
4020    /// This method computes the [largest possible size of `Self`][valid-size]
4021    /// that can fit in the trailing bytes of `source`, then attempts to return
4022    /// both a reference to those bytes interpreted as a `Self`, and a reference
4023    /// to the preceding bytes. If there are insufficient bytes, or if that
4024    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4025    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4026    /// alignment error][size-error-from].
4027    ///
4028    /// `Self` may be a sized type, a slice, or a [slice DST][slice-dst].
4029    ///
4030    /// [valid-size]: crate::KnownLayout#what-is-a-valid-size
4031    /// [self-unaligned]: Unaligned
4032    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4033    /// [slice-dst]: KnownLayout#dynamically-sized-types
4034    ///
4035    /// # Compile-Time Assertions
4036    ///
4037    /// This method cannot yet be used on unsized types whose dynamically-sized
4038    /// component is zero-sized. Attempting to use this method on such types
4039    /// results in a compile-time assertion error; e.g.:
4040    ///
4041    /// ```compile_fail,E0080
4042    /// use zerocopy::*;
4043    /// # use zerocopy_derive::*;
4044    ///
4045    /// #[derive(FromBytes, Immutable, IntoBytes, KnownLayout)]
4046    /// #[repr(C, packed)]
4047    /// struct ZSTy {
4048    ///     leading_sized: [u8; 2],
4049    ///     trailing_dst: [()],
4050    /// }
4051    ///
4052    /// let mut source = [85, 85];
4053    /// let _ = ZSTy::mut_from_suffix(&mut source[..]); // âš  Compile Error!
4054    /// ```
4055    ///
4056    /// # Examples
4057    ///
4058    /// ```
4059    /// use zerocopy::FromBytes;
4060    /// # use zerocopy_derive::*;
4061    ///
4062    /// #[derive(FromBytes, IntoBytes, KnownLayout, Immutable)]
4063    /// #[repr(C)]
4064    /// struct PacketTrailer {
4065    ///     frame_check_sequence: [u8; 4],
4066    /// }
4067    ///
4068    /// // These are more bytes than are needed to encode a `PacketTrailer`.
4069    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4070    ///
4071    /// let (prefix, trailer) = PacketTrailer::mut_from_suffix(bytes).unwrap();
4072    ///
4073    /// assert_eq!(prefix, &[0u8, 1, 2, 3, 4, 5][..]);
4074    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
4075    ///
4076    /// prefix.fill(0);
4077    /// trailer.frame_check_sequence.fill(1);
4078    ///
4079    /// assert_eq!(bytes, [0, 0, 0, 0, 0, 0, 1, 1, 1, 1]);
4080    /// ```
4081    #[must_use = "has no side effects"]
4082    #[inline]
4083    fn mut_from_suffix(
4084        source: &mut [u8],
4085    ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
4086    where
4087        Self: IntoBytes + KnownLayout,
4088    {
4089        static_assert_dst_is_not_zst!(Self);
4090        mut_from_prefix_suffix(source, None, CastType::Suffix).map(swap)
4091    }
4092
4093    /// Interprets the given `source` as a `&Self` with a DST length equal to
4094    /// `count`.
4095    ///
4096    /// This method attempts to return a reference to `source` interpreted as a
4097    /// `Self` with `count` trailing elements. If the length of `source` is not
4098    /// equal to the size of `Self` with `count` elements, or if `source` is not
4099    /// appropriately aligned, this returns `Err`. If [`Self:
4100    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4101    /// error][size-error-from].
4102    ///
4103    /// [self-unaligned]: Unaligned
4104    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4105    ///
4106    /// # Examples
4107    ///
4108    /// ```
4109    /// use zerocopy::FromBytes;
4110    /// # use zerocopy_derive::*;
4111    ///
4112    /// # #[derive(Debug, PartialEq, Eq)]
4113    /// #[derive(FromBytes, Immutable)]
4114    /// #[repr(C)]
4115    /// struct Pixel {
4116    ///     r: u8,
4117    ///     g: u8,
4118    ///     b: u8,
4119    ///     a: u8,
4120    /// }
4121    ///
4122    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
4123    ///
4124    /// let pixels = <[Pixel]>::ref_from_bytes_with_elems(bytes, 2).unwrap();
4125    ///
4126    /// assert_eq!(pixels, &[
4127    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4128    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4129    /// ]);
4130    ///
4131    /// ```
4132    ///
4133    /// Since an explicit `count` is provided, this method supports types with
4134    /// zero-sized trailing slice elements. Methods such as [`ref_from_bytes`]
4135    /// which do not take an explicit count do not support such types.
4136    ///
4137    /// ```
4138    /// use zerocopy::*;
4139    /// # use zerocopy_derive::*;
4140    ///
4141    /// #[derive(FromBytes, Immutable, KnownLayout)]
4142    /// #[repr(C)]
4143    /// struct ZSTy {
4144    ///     leading_sized: [u8; 2],
4145    ///     trailing_dst: [()],
4146    /// }
4147    ///
4148    /// let src = &[85, 85][..];
4149    /// let zsty = ZSTy::ref_from_bytes_with_elems(src, 42).unwrap();
4150    /// assert_eq!(zsty.trailing_dst.len(), 42);
4151    /// ```
4152    ///
4153    /// [`ref_from_bytes`]: FromBytes::ref_from_bytes
4154    #[must_use = "has no side effects"]
4155    #[inline]
4156    fn ref_from_bytes_with_elems(
4157        source: &[u8],
4158        count: usize,
4159    ) -> Result<&Self, CastError<&[u8], Self>>
4160    where
4161        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4162    {
4163        let source = Ptr::from_ref(source);
4164        let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4165        match maybe_slf {
4166            Ok(slf) => Ok(slf.recall_validity().as_ref()),
4167            Err(err) => Err(err.map_src(|s| s.as_ref())),
4168        }
4169    }
4170
4171    /// Interprets the prefix of the given `source` as a DST `&Self` with length
4172    /// equal to `count`.
4173    ///
4174    /// This method attempts to return a reference to the prefix of `source`
4175    /// interpreted as a `Self` with `count` trailing elements, and a reference
4176    /// to the remaining bytes. If there are insufficient bytes, or if `source`
4177    /// is not appropriately aligned, this returns `Err`. If [`Self:
4178    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4179    /// error][size-error-from].
4180    ///
4181    /// [self-unaligned]: Unaligned
4182    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4183    ///
4184    /// # Examples
4185    ///
4186    /// ```
4187    /// use zerocopy::FromBytes;
4188    /// # use zerocopy_derive::*;
4189    ///
4190    /// # #[derive(Debug, PartialEq, Eq)]
4191    /// #[derive(FromBytes, Immutable)]
4192    /// #[repr(C)]
4193    /// struct Pixel {
4194    ///     r: u8,
4195    ///     g: u8,
4196    ///     b: u8,
4197    ///     a: u8,
4198    /// }
4199    ///
4200    /// // These are more bytes than are needed to encode two `Pixel`s.
4201    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4202    ///
4203    /// let (pixels, suffix) = <[Pixel]>::ref_from_prefix_with_elems(bytes, 2).unwrap();
4204    ///
4205    /// assert_eq!(pixels, &[
4206    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4207    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4208    /// ]);
4209    ///
4210    /// assert_eq!(suffix, &[8, 9]);
4211    /// ```
4212    ///
4213    /// Since an explicit `count` is provided, this method supports types with
4214    /// zero-sized trailing slice elements. Methods such as [`ref_from_prefix`]
4215    /// which do not take an explicit count do not support such types.
4216    ///
4217    /// ```
4218    /// use zerocopy::*;
4219    /// # use zerocopy_derive::*;
4220    ///
4221    /// #[derive(FromBytes, Immutable, KnownLayout)]
4222    /// #[repr(C)]
4223    /// struct ZSTy {
4224    ///     leading_sized: [u8; 2],
4225    ///     trailing_dst: [()],
4226    /// }
4227    ///
4228    /// let src = &[85, 85][..];
4229    /// let (zsty, _) = ZSTy::ref_from_prefix_with_elems(src, 42).unwrap();
4230    /// assert_eq!(zsty.trailing_dst.len(), 42);
4231    /// ```
4232    ///
4233    /// [`ref_from_prefix`]: FromBytes::ref_from_prefix
4234    #[must_use = "has no side effects"]
4235    #[inline]
4236    fn ref_from_prefix_with_elems(
4237        source: &[u8],
4238        count: usize,
4239    ) -> Result<(&Self, &[u8]), CastError<&[u8], Self>>
4240    where
4241        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4242    {
4243        ref_from_prefix_suffix(source, Some(count), CastType::Prefix)
4244    }
4245
4246    /// Interprets the suffix of the given `source` as a DST `&Self` with length
4247    /// equal to `count`.
4248    ///
4249    /// This method attempts to return a reference to the suffix of `source`
4250    /// interpreted as a `Self` with `count` trailing elements, and a reference
4251    /// to the preceding bytes. If there are insufficient bytes, or if that
4252    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4253    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4254    /// alignment error][size-error-from].
4255    ///
4256    /// [self-unaligned]: Unaligned
4257    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4258    ///
4259    /// # Examples
4260    ///
4261    /// ```
4262    /// use zerocopy::FromBytes;
4263    /// # use zerocopy_derive::*;
4264    ///
4265    /// # #[derive(Debug, PartialEq, Eq)]
4266    /// #[derive(FromBytes, Immutable)]
4267    /// #[repr(C)]
4268    /// struct Pixel {
4269    ///     r: u8,
4270    ///     g: u8,
4271    ///     b: u8,
4272    ///     a: u8,
4273    /// }
4274    ///
4275    /// // These are more bytes than are needed to encode two `Pixel`s.
4276    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4277    ///
4278    /// let (prefix, pixels) = <[Pixel]>::ref_from_suffix_with_elems(bytes, 2).unwrap();
4279    ///
4280    /// assert_eq!(prefix, &[0, 1]);
4281    ///
4282    /// assert_eq!(pixels, &[
4283    ///     Pixel { r: 2, g: 3, b: 4, a: 5 },
4284    ///     Pixel { r: 6, g: 7, b: 8, a: 9 },
4285    /// ]);
4286    /// ```
4287    ///
4288    /// Since an explicit `count` is provided, this method supports types with
4289    /// zero-sized trailing slice elements. Methods such as [`ref_from_suffix`]
4290    /// which do not take an explicit count do not support such types.
4291    ///
4292    /// ```
4293    /// use zerocopy::*;
4294    /// # use zerocopy_derive::*;
4295    ///
4296    /// #[derive(FromBytes, Immutable, KnownLayout)]
4297    /// #[repr(C)]
4298    /// struct ZSTy {
4299    ///     leading_sized: [u8; 2],
4300    ///     trailing_dst: [()],
4301    /// }
4302    ///
4303    /// let src = &[85, 85][..];
4304    /// let (_, zsty) = ZSTy::ref_from_suffix_with_elems(src, 42).unwrap();
4305    /// assert_eq!(zsty.trailing_dst.len(), 42);
4306    /// ```
4307    ///
4308    /// [`ref_from_suffix`]: FromBytes::ref_from_suffix
4309    #[must_use = "has no side effects"]
4310    #[inline]
4311    fn ref_from_suffix_with_elems(
4312        source: &[u8],
4313        count: usize,
4314    ) -> Result<(&[u8], &Self), CastError<&[u8], Self>>
4315    where
4316        Self: KnownLayout<PointerMetadata = usize> + Immutable,
4317    {
4318        ref_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4319    }
4320
4321    /// Interprets the given `source` as a `&mut Self` with a DST length equal
4322    /// to `count`.
4323    ///
4324    /// This method attempts to return a reference to `source` interpreted as a
4325    /// `Self` with `count` trailing elements. If the length of `source` is not
4326    /// equal to the size of `Self` with `count` elements, or if `source` is not
4327    /// appropriately aligned, this returns `Err`. If [`Self:
4328    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4329    /// error][size-error-from].
4330    ///
4331    /// [self-unaligned]: Unaligned
4332    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4333    ///
4334    /// # Examples
4335    ///
4336    /// ```
4337    /// use zerocopy::FromBytes;
4338    /// # use zerocopy_derive::*;
4339    ///
4340    /// # #[derive(Debug, PartialEq, Eq)]
4341    /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4342    /// #[repr(C)]
4343    /// struct Pixel {
4344    ///     r: u8,
4345    ///     g: u8,
4346    ///     b: u8,
4347    ///     a: u8,
4348    /// }
4349    ///
4350    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
4351    ///
4352    /// let pixels = <[Pixel]>::mut_from_bytes_with_elems(bytes, 2).unwrap();
4353    ///
4354    /// assert_eq!(pixels, &[
4355    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4356    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4357    /// ]);
4358    ///
4359    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4360    ///
4361    /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]);
4362    /// ```
4363    ///
4364    /// Since an explicit `count` is provided, this method supports types with
4365    /// zero-sized trailing slice elements. Methods such as [`mut_from_bytes`]
4366    /// which do not take an explicit count do not support such types.
4367    ///
4368    /// ```
4369    /// use zerocopy::*;
4370    /// # use zerocopy_derive::*;
4371    ///
4372    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4373    /// #[repr(C, packed)]
4374    /// struct ZSTy {
4375    ///     leading_sized: [u8; 2],
4376    ///     trailing_dst: [()],
4377    /// }
4378    ///
4379    /// let src = &mut [85, 85][..];
4380    /// let zsty = ZSTy::mut_from_bytes_with_elems(src, 42).unwrap();
4381    /// assert_eq!(zsty.trailing_dst.len(), 42);
4382    /// ```
4383    ///
4384    /// [`mut_from_bytes`]: FromBytes::mut_from_bytes
4385    #[must_use = "has no side effects"]
4386    #[inline]
4387    fn mut_from_bytes_with_elems(
4388        source: &mut [u8],
4389        count: usize,
4390    ) -> Result<&mut Self, CastError<&mut [u8], Self>>
4391    where
4392        Self: IntoBytes + KnownLayout<PointerMetadata = usize> + Immutable,
4393    {
4394        let source = Ptr::from_mut(source);
4395        let maybe_slf = source.try_cast_into_no_leftover::<_, BecauseImmutable>(Some(count));
4396        match maybe_slf {
4397            Ok(slf) => Ok(slf.recall_validity::<_, (_, (_, BecauseExclusive))>().as_mut()),
4398            Err(err) => Err(err.map_src(|s| s.as_mut())),
4399        }
4400    }
4401
4402    /// Interprets the prefix of the given `source` as a `&mut Self` with DST
4403    /// length equal to `count`.
4404    ///
4405    /// This method attempts to return a reference to the prefix of `source`
4406    /// interpreted as a `Self` with `count` trailing elements, and a reference
4407    /// to the preceding bytes. If there are insufficient bytes, or if `source`
4408    /// is not appropriately aligned, this returns `Err`. If [`Self:
4409    /// Unaligned`][self-unaligned], you can [infallibly discard the alignment
4410    /// error][size-error-from].
4411    ///
4412    /// [self-unaligned]: Unaligned
4413    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4414    ///
4415    /// # Examples
4416    ///
4417    /// ```
4418    /// use zerocopy::FromBytes;
4419    /// # use zerocopy_derive::*;
4420    ///
4421    /// # #[derive(Debug, PartialEq, Eq)]
4422    /// #[derive(KnownLayout, FromBytes, IntoBytes, Immutable)]
4423    /// #[repr(C)]
4424    /// struct Pixel {
4425    ///     r: u8,
4426    ///     g: u8,
4427    ///     b: u8,
4428    ///     a: u8,
4429    /// }
4430    ///
4431    /// // These are more bytes than are needed to encode two `Pixel`s.
4432    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4433    ///
4434    /// let (pixels, suffix) = <[Pixel]>::mut_from_prefix_with_elems(bytes, 2).unwrap();
4435    ///
4436    /// assert_eq!(pixels, &[
4437    ///     Pixel { r: 0, g: 1, b: 2, a: 3 },
4438    ///     Pixel { r: 4, g: 5, b: 6, a: 7 },
4439    /// ]);
4440    ///
4441    /// assert_eq!(suffix, &[8, 9]);
4442    ///
4443    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4444    /// suffix.fill(1);
4445    ///
4446    /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 1, 1]);
4447    /// ```
4448    ///
4449    /// Since an explicit `count` is provided, this method supports types with
4450    /// zero-sized trailing slice elements. Methods such as [`mut_from_prefix`]
4451    /// which do not take an explicit count do not support such types.
4452    ///
4453    /// ```
4454    /// use zerocopy::*;
4455    /// # use zerocopy_derive::*;
4456    ///
4457    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4458    /// #[repr(C, packed)]
4459    /// struct ZSTy {
4460    ///     leading_sized: [u8; 2],
4461    ///     trailing_dst: [()],
4462    /// }
4463    ///
4464    /// let src = &mut [85, 85][..];
4465    /// let (zsty, _) = ZSTy::mut_from_prefix_with_elems(src, 42).unwrap();
4466    /// assert_eq!(zsty.trailing_dst.len(), 42);
4467    /// ```
4468    ///
4469    /// [`mut_from_prefix`]: FromBytes::mut_from_prefix
4470    #[must_use = "has no side effects"]
4471    #[inline]
4472    fn mut_from_prefix_with_elems(
4473        source: &mut [u8],
4474        count: usize,
4475    ) -> Result<(&mut Self, &mut [u8]), CastError<&mut [u8], Self>>
4476    where
4477        Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4478    {
4479        mut_from_prefix_suffix(source, Some(count), CastType::Prefix)
4480    }
4481
4482    /// Interprets the suffix of the given `source` as a `&mut Self` with DST
4483    /// length equal to `count`.
4484    ///
4485    /// This method attempts to return a reference to the suffix of `source`
4486    /// interpreted as a `Self` with `count` trailing elements, and a reference
4487    /// to the remaining bytes. If there are insufficient bytes, or if that
4488    /// suffix of `source` is not appropriately aligned, this returns `Err`. If
4489    /// [`Self: Unaligned`][self-unaligned], you can [infallibly discard the
4490    /// alignment error][size-error-from].
4491    ///
4492    /// [self-unaligned]: Unaligned
4493    /// [size-error-from]: error/struct.SizeError.html#method.from-1
4494    ///
4495    /// # Examples
4496    ///
4497    /// ```
4498    /// use zerocopy::FromBytes;
4499    /// # use zerocopy_derive::*;
4500    ///
4501    /// # #[derive(Debug, PartialEq, Eq)]
4502    /// #[derive(FromBytes, IntoBytes, Immutable)]
4503    /// #[repr(C)]
4504    /// struct Pixel {
4505    ///     r: u8,
4506    ///     g: u8,
4507    ///     b: u8,
4508    ///     a: u8,
4509    /// }
4510    ///
4511    /// // These are more bytes than are needed to encode two `Pixel`s.
4512    /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4513    ///
4514    /// let (prefix, pixels) = <[Pixel]>::mut_from_suffix_with_elems(bytes, 2).unwrap();
4515    ///
4516    /// assert_eq!(prefix, &[0, 1]);
4517    ///
4518    /// assert_eq!(pixels, &[
4519    ///     Pixel { r: 2, g: 3, b: 4, a: 5 },
4520    ///     Pixel { r: 6, g: 7, b: 8, a: 9 },
4521    /// ]);
4522    ///
4523    /// prefix.fill(9);
4524    /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
4525    ///
4526    /// assert_eq!(bytes, [9, 9, 2, 3, 4, 5, 0, 0, 0, 0]);
4527    /// ```
4528    ///
4529    /// Since an explicit `count` is provided, this method supports types with
4530    /// zero-sized trailing slice elements. Methods such as [`mut_from_suffix`]
4531    /// which do not take an explicit count do not support such types.
4532    ///
4533    /// ```
4534    /// use zerocopy::*;
4535    /// # use zerocopy_derive::*;
4536    ///
4537    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
4538    /// #[repr(C, packed)]
4539    /// struct ZSTy {
4540    ///     leading_sized: [u8; 2],
4541    ///     trailing_dst: [()],
4542    /// }
4543    ///
4544    /// let src = &mut [85, 85][..];
4545    /// let (_, zsty) = ZSTy::mut_from_suffix_with_elems(src, 42).unwrap();
4546    /// assert_eq!(zsty.trailing_dst.len(), 42);
4547    /// ```
4548    ///
4549    /// [`mut_from_suffix`]: FromBytes::mut_from_suffix
4550    #[must_use = "has no side effects"]
4551    #[inline]
4552    fn mut_from_suffix_with_elems(
4553        source: &mut [u8],
4554        count: usize,
4555    ) -> Result<(&mut [u8], &mut Self), CastError<&mut [u8], Self>>
4556    where
4557        Self: IntoBytes + KnownLayout<PointerMetadata = usize>,
4558    {
4559        mut_from_prefix_suffix(source, Some(count), CastType::Suffix).map(swap)
4560    }
4561
4562    /// Reads a copy of `Self` from the given `source`.
4563    ///
4564    /// If `source.len() != size_of::<Self>()`, `read_from_bytes` returns `Err`.
4565    ///
4566    /// # Examples
4567    ///
4568    /// ```
4569    /// use zerocopy::FromBytes;
4570    /// # use zerocopy_derive::*;
4571    ///
4572    /// #[derive(FromBytes)]
4573    /// #[repr(C)]
4574    /// struct PacketHeader {
4575    ///     src_port: [u8; 2],
4576    ///     dst_port: [u8; 2],
4577    ///     length: [u8; 2],
4578    ///     checksum: [u8; 2],
4579    /// }
4580    ///
4581    /// // These bytes encode a `PacketHeader`.
4582    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7][..];
4583    ///
4584    /// let header = PacketHeader::read_from_bytes(bytes).unwrap();
4585    ///
4586    /// assert_eq!(header.src_port, [0, 1]);
4587    /// assert_eq!(header.dst_port, [2, 3]);
4588    /// assert_eq!(header.length, [4, 5]);
4589    /// assert_eq!(header.checksum, [6, 7]);
4590    /// ```
4591    #[must_use = "has no side effects"]
4592    #[inline]
4593    fn read_from_bytes(source: &[u8]) -> Result<Self, SizeError<&[u8], Self>>
4594    where
4595        Self: Sized,
4596    {
4597        match Ref::<_, Unalign<Self>>::sized_from(source) {
4598            Ok(r) => Ok(Ref::read(&r).into_inner()),
4599            Err(CastError::Size(e)) => Err(e.with_dst()),
4600            Err(CastError::Alignment(_)) => {
4601                // SAFETY: `Unalign<Self>` is trivially aligned, so
4602                // `Ref::sized_from` cannot fail due to unmet alignment
4603                // requirements.
4604                unsafe { core::hint::unreachable_unchecked() }
4605            }
4606            Err(CastError::Validity(i)) => match i {},
4607        }
4608    }
4609
4610    /// Reads a copy of `Self` from the prefix of the given `source`.
4611    ///
4612    /// This attempts to read a `Self` from the first `size_of::<Self>()` bytes
4613    /// of `source`, returning that `Self` and any remaining bytes. If
4614    /// `source.len() < size_of::<Self>()`, it returns `Err`.
4615    ///
4616    /// # Examples
4617    ///
4618    /// ```
4619    /// use zerocopy::FromBytes;
4620    /// # use zerocopy_derive::*;
4621    ///
4622    /// #[derive(FromBytes)]
4623    /// #[repr(C)]
4624    /// struct PacketHeader {
4625    ///     src_port: [u8; 2],
4626    ///     dst_port: [u8; 2],
4627    ///     length: [u8; 2],
4628    ///     checksum: [u8; 2],
4629    /// }
4630    ///
4631    /// // These are more bytes than are needed to encode a `PacketHeader`.
4632    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4633    ///
4634    /// let (header, body) = PacketHeader::read_from_prefix(bytes).unwrap();
4635    ///
4636    /// assert_eq!(header.src_port, [0, 1]);
4637    /// assert_eq!(header.dst_port, [2, 3]);
4638    /// assert_eq!(header.length, [4, 5]);
4639    /// assert_eq!(header.checksum, [6, 7]);
4640    /// assert_eq!(body, [8, 9]);
4641    /// ```
4642    #[must_use = "has no side effects"]
4643    #[inline]
4644    fn read_from_prefix(source: &[u8]) -> Result<(Self, &[u8]), SizeError<&[u8], Self>>
4645    where
4646        Self: Sized,
4647    {
4648        match Ref::<_, Unalign<Self>>::sized_from_prefix(source) {
4649            Ok((r, suffix)) => Ok((Ref::read(&r).into_inner(), suffix)),
4650            Err(CastError::Size(e)) => Err(e.with_dst()),
4651            Err(CastError::Alignment(_)) => {
4652                // SAFETY: `Unalign<Self>` is trivially aligned, so
4653                // `Ref::sized_from_prefix` cannot fail due to unmet alignment
4654                // requirements.
4655                unsafe { core::hint::unreachable_unchecked() }
4656            }
4657            Err(CastError::Validity(i)) => match i {},
4658        }
4659    }
4660
4661    /// Reads a copy of `Self` from the suffix of the given `source`.
4662    ///
4663    /// This attempts to read a `Self` from the last `size_of::<Self>()` bytes
4664    /// of `source`, returning that `Self` and any preceding bytes. If
4665    /// `source.len() < size_of::<Self>()`, it returns `Err`.
4666    ///
4667    /// # Examples
4668    ///
4669    /// ```
4670    /// use zerocopy::FromBytes;
4671    /// # use zerocopy_derive::*;
4672    ///
4673    /// #[derive(FromBytes)]
4674    /// #[repr(C)]
4675    /// struct PacketTrailer {
4676    ///     frame_check_sequence: [u8; 4],
4677    /// }
4678    ///
4679    /// // These are more bytes than are needed to encode a `PacketTrailer`.
4680    /// let bytes = &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
4681    ///
4682    /// let (prefix, trailer) = PacketTrailer::read_from_suffix(bytes).unwrap();
4683    ///
4684    /// assert_eq!(prefix, [0, 1, 2, 3, 4, 5]);
4685    /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
4686    /// ```
4687    #[must_use = "has no side effects"]
4688    #[inline]
4689    fn read_from_suffix(source: &[u8]) -> Result<(&[u8], Self), SizeError<&[u8], Self>>
4690    where
4691        Self: Sized,
4692    {
4693        match Ref::<_, Unalign<Self>>::sized_from_suffix(source) {
4694            Ok((prefix, r)) => Ok((prefix, Ref::read(&r).into_inner())),
4695            Err(CastError::Size(e)) => Err(e.with_dst()),
4696            Err(CastError::Alignment(_)) => {
4697                // SAFETY: `Unalign<Self>` is trivially aligned, so
4698                // `Ref::sized_from_suffix` cannot fail due to unmet alignment
4699                // requirements.
4700                unsafe { core::hint::unreachable_unchecked() }
4701            }
4702            Err(CastError::Validity(i)) => match i {},
4703        }
4704    }
4705
4706    /// Reads a copy of `self` from an `io::Read`.
4707    ///
4708    /// This is useful for interfacing with operating system byte sinks (files,
4709    /// sockets, etc.).
4710    ///
4711    /// # Examples
4712    ///
4713    /// ```no_run
4714    /// use zerocopy::{byteorder::big_endian::*, FromBytes};
4715    /// use std::fs::File;
4716    /// # use zerocopy_derive::*;
4717    ///
4718    /// #[derive(FromBytes)]
4719    /// #[repr(C)]
4720    /// struct BitmapFileHeader {
4721    ///     signature: [u8; 2],
4722    ///     size: U32,
4723    ///     reserved: U64,
4724    ///     offset: U64,
4725    /// }
4726    ///
4727    /// let mut file = File::open("image.bin").unwrap();
4728    /// let header = BitmapFileHeader::read_from_io(&mut file).unwrap();
4729    /// ```
4730    #[cfg(feature = "std")]
4731    #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
4732    #[inline(always)]
4733    fn read_from_io<R>(mut src: R) -> io::Result<Self>
4734    where
4735        Self: Sized,
4736        R: io::Read,
4737    {
4738        // NOTE(#2319, #2320): We do `buf.zero()` separately rather than
4739        // constructing `let buf = CoreMaybeUninit::zeroed()` because, if `Self`
4740        // contains padding bytes, then a typed copy of `CoreMaybeUninit<Self>`
4741        // will not necessarily preserve zeros written to those padding byte
4742        // locations, and so `buf` could contain uninitialized bytes.
4743        let mut buf = CoreMaybeUninit::<Self>::uninit();
4744        buf.zero();
4745
4746        let ptr = Ptr::from_mut(&mut buf);
4747        // SAFETY: After `buf.zero()`, `buf` consists entirely of initialized,
4748        // zeroed bytes. Since `MaybeUninit` has no validity requirements, `ptr`
4749        // cannot be used to write values which will violate `buf`'s bit
4750        // validity. Since `ptr` has `Exclusive` aliasing, nothing other than
4751        // `ptr` may be used to mutate `ptr`'s referent, and so its bit validity
4752        // cannot be violated even though `buf` may have more permissive bit
4753        // validity than `ptr`.
4754        let ptr = unsafe { ptr.assume_validity::<invariant::Initialized>() };
4755        let ptr = ptr.as_bytes::<BecauseExclusive>();
4756        src.read_exact(ptr.as_mut())?;
4757        // SAFETY: `buf` entirely consists of initialized bytes, and `Self` is
4758        // `FromBytes`.
4759        Ok(unsafe { buf.assume_init() })
4760    }
4761
4762    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_bytes`")]
4763    #[doc(hidden)]
4764    #[must_use = "has no side effects"]
4765    #[inline(always)]
4766    fn ref_from(source: &[u8]) -> Option<&Self>
4767    where
4768        Self: KnownLayout + Immutable,
4769    {
4770        Self::ref_from_bytes(source).ok()
4771    }
4772
4773    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_bytes`")]
4774    #[doc(hidden)]
4775    #[must_use = "has no side effects"]
4776    #[inline(always)]
4777    fn mut_from(source: &mut [u8]) -> Option<&mut Self>
4778    where
4779        Self: KnownLayout + IntoBytes,
4780    {
4781        Self::mut_from_bytes(source).ok()
4782    }
4783
4784    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_prefix_with_elems`")]
4785    #[doc(hidden)]
4786    #[must_use = "has no side effects"]
4787    #[inline(always)]
4788    fn slice_from_prefix(source: &[u8], count: usize) -> Option<(&[Self], &[u8])>
4789    where
4790        Self: Sized + Immutable,
4791    {
4792        <[Self]>::ref_from_prefix_with_elems(source, count).ok()
4793    }
4794
4795    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::ref_from_suffix_with_elems`")]
4796    #[doc(hidden)]
4797    #[must_use = "has no side effects"]
4798    #[inline(always)]
4799    fn slice_from_suffix(source: &[u8], count: usize) -> Option<(&[u8], &[Self])>
4800    where
4801        Self: Sized + Immutable,
4802    {
4803        <[Self]>::ref_from_suffix_with_elems(source, count).ok()
4804    }
4805
4806    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_prefix_with_elems`")]
4807    #[doc(hidden)]
4808    #[must_use = "has no side effects"]
4809    #[inline(always)]
4810    fn mut_slice_from_prefix(source: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])>
4811    where
4812        Self: Sized + IntoBytes,
4813    {
4814        <[Self]>::mut_from_prefix_with_elems(source, count).ok()
4815    }
4816
4817    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::mut_from_suffix_with_elems`")]
4818    #[doc(hidden)]
4819    #[must_use = "has no side effects"]
4820    #[inline(always)]
4821    fn mut_slice_from_suffix(source: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])>
4822    where
4823        Self: Sized + IntoBytes,
4824    {
4825        <[Self]>::mut_from_suffix_with_elems(source, count).ok()
4826    }
4827
4828    #[deprecated(since = "0.8.0", note = "renamed to `FromBytes::read_from_bytes`")]
4829    #[doc(hidden)]
4830    #[must_use = "has no side effects"]
4831    #[inline(always)]
4832    fn read_from(source: &[u8]) -> Option<Self>
4833    where
4834        Self: Sized,
4835    {
4836        Self::read_from_bytes(source).ok()
4837    }
4838}
4839
4840/// Interprets the given affix of the given bytes as a `&Self`.
4841///
4842/// This method computes the largest possible size of `Self` that can fit in the
4843/// prefix or suffix bytes of `source`, then attempts to return both a reference
4844/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
4845/// If there are insufficient bytes, or if that affix of `source` is not
4846/// appropriately aligned, this returns `Err`.
4847#[inline(always)]
4848fn ref_from_prefix_suffix<T: FromBytes + KnownLayout + Immutable + ?Sized>(
4849    source: &[u8],
4850    meta: Option<T::PointerMetadata>,
4851    cast_type: CastType,
4852) -> Result<(&T, &[u8]), CastError<&[u8], T>> {
4853    let (slf, prefix_suffix) = Ptr::from_ref(source)
4854        .try_cast_into::<_, BecauseImmutable>(cast_type, meta)
4855        .map_err(|err| err.map_src(|s| s.as_ref()))?;
4856    Ok((slf.recall_validity().as_ref(), prefix_suffix.as_ref()))
4857}
4858
4859/// Interprets the given affix of the given bytes as a `&mut Self` without
4860/// copying.
4861///
4862/// This method computes the largest possible size of `Self` that can fit in the
4863/// prefix or suffix bytes of `source`, then attempts to return both a reference
4864/// to those bytes interpreted as a `Self`, and a reference to the excess bytes.
4865/// If there are insufficient bytes, or if that affix of `source` is not
4866/// appropriately aligned, this returns `Err`.
4867#[inline(always)]
4868fn mut_from_prefix_suffix<T: FromBytes + IntoBytes + KnownLayout + ?Sized>(
4869    source: &mut [u8],
4870    meta: Option<T::PointerMetadata>,
4871    cast_type: CastType,
4872) -> Result<(&mut T, &mut [u8]), CastError<&mut [u8], T>> {
4873    let (slf, prefix_suffix) = Ptr::from_mut(source)
4874        .try_cast_into::<_, BecauseExclusive>(cast_type, meta)
4875        .map_err(|err| err.map_src(|s| s.as_mut()))?;
4876    Ok((slf.recall_validity::<_, (_, (_, _))>().as_mut(), prefix_suffix.as_mut()))
4877}
4878
4879/// Analyzes whether a type is [`IntoBytes`].
4880///
4881/// This derive analyzes, at compile time, whether the annotated type satisfies
4882/// the [safety conditions] of `IntoBytes` and implements `IntoBytes` if it is
4883/// sound to do so. This derive can be applied to structs and enums (see below
4884/// for union support); e.g.:
4885///
4886/// ```
4887/// # use zerocopy_derive::{IntoBytes};
4888/// #[derive(IntoBytes)]
4889/// #[repr(C)]
4890/// struct MyStruct {
4891/// # /*
4892///     ...
4893/// # */
4894/// }
4895///
4896/// #[derive(IntoBytes)]
4897/// #[repr(u8)]
4898/// enum MyEnum {
4899/// #   Variant,
4900/// # /*
4901///     ...
4902/// # */
4903/// }
4904/// ```
4905///
4906/// [safety conditions]: trait@IntoBytes#safety
4907///
4908/// # Error Messages
4909///
4910/// On Rust toolchains prior to 1.78.0, due to the way that the custom derive
4911/// for `IntoBytes` is implemented, you may get an error like this:
4912///
4913/// ```text
4914/// error[E0277]: the trait bound `(): PaddingFree<Foo, true>` is not satisfied
4915///   --> lib.rs:23:10
4916///    |
4917///  1 | #[derive(IntoBytes)]
4918///    |          ^^^^^^^^^ the trait `PaddingFree<Foo, true>` is not implemented for `()`
4919///    |
4920///    = help: the following implementations were found:
4921///                   <() as PaddingFree<T, false>>
4922/// ```
4923///
4924/// This error indicates that the type being annotated has padding bytes, which
4925/// is illegal for `IntoBytes` types. Consider reducing the alignment of some
4926/// fields by using types in the [`byteorder`] module, wrapping field types in
4927/// [`Unalign`], adding explicit struct fields where those padding bytes would
4928/// be, or using `#[repr(packed)]`. See the Rust Reference's page on [type
4929/// layout] for more information about type layout and padding.
4930///
4931/// [type layout]: https://doc.rust-lang.org/reference/type-layout.html
4932///
4933/// # Unions
4934///
4935/// Currently, union bit validity is [up in the air][union-validity], and so
4936/// zerocopy does not support `#[derive(IntoBytes)]` on unions by default.
4937/// However, implementing `IntoBytes` on a union type is likely sound on all
4938/// existing Rust toolchains - it's just that it may become unsound in the
4939/// future. You can opt-in to `#[derive(IntoBytes)]` support on unions by
4940/// passing the unstable `zerocopy_derive_union_into_bytes` cfg:
4941///
4942/// ```shell
4943/// $ RUSTFLAGS='--cfg zerocopy_derive_union_into_bytes' cargo build
4944/// ```
4945///
4946/// However, it is your responsibility to ensure that this derive is sound on
4947/// the specific versions of the Rust toolchain you are using! We make no
4948/// stability or soundness guarantees regarding this cfg, and may remove it at
4949/// any point.
4950///
4951/// We are actively working with Rust to stabilize the necessary language
4952/// guarantees to support this in a forwards-compatible way, which will enable
4953/// us to remove the cfg gate. As part of this effort, we need to know how much
4954/// demand there is for this feature. If you would like to use `IntoBytes` on
4955/// unions, [please let us know][discussion].
4956///
4957/// [union-validity]: https://github.com/rust-lang/unsafe-code-guidelines/issues/438
4958/// [discussion]: https://github.com/google/zerocopy/discussions/1802
4959///
4960/// # Analysis
4961///
4962/// *This section describes, roughly, the analysis performed by this derive to
4963/// determine whether it is sound to implement `IntoBytes` for a given type.
4964/// Unless you are modifying the implementation of this derive, or attempting to
4965/// manually implement `IntoBytes` for a type yourself, you don't need to read
4966/// this section.*
4967///
4968/// If a type has the following properties, then this derive can implement
4969/// `IntoBytes` for that type:
4970///
4971/// - If the type is a struct, its fields must be [`IntoBytes`]. Additionally:
4972///     - if the type is `repr(transparent)` or `repr(packed)`, it is
4973///       [`IntoBytes`] if its fields are [`IntoBytes`]; else,
4974///     - if the type is `repr(C)` with at most one field, it is [`IntoBytes`]
4975///       if its field is [`IntoBytes`]; else,
4976///     - if the type has no generic parameters, it is [`IntoBytes`] if the type
4977///       is sized and has no padding bytes; else,
4978///     - if the type is `repr(C)`, its fields must be [`Unaligned`].
4979/// - If the type is an enum:
4980///   - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
4981///     `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
4982///   - It must have no padding bytes.
4983///   - Its fields must be [`IntoBytes`].
4984///
4985/// This analysis is subject to change. Unsafe code may *only* rely on the
4986/// documented [safety conditions] of `FromBytes`, and must *not* rely on the
4987/// implementation details of this derive.
4988///
4989/// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html
4990#[cfg(any(feature = "derive", test))]
4991#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
4992pub use zerocopy_derive::IntoBytes;
4993
4994/// Types that can be converted to an immutable slice of initialized bytes.
4995///
4996/// Any `IntoBytes` type can be converted to a slice of initialized bytes of the
4997/// same size. This is useful for efficiently serializing structured data as raw
4998/// bytes.
4999///
5000/// # Implementation
5001///
5002/// **Do not implement this trait yourself!** Instead, use
5003/// [`#[derive(IntoBytes)]`][derive]; e.g.:
5004///
5005/// ```
5006/// # use zerocopy_derive::IntoBytes;
5007/// #[derive(IntoBytes)]
5008/// #[repr(C)]
5009/// struct MyStruct {
5010/// # /*
5011///     ...
5012/// # */
5013/// }
5014///
5015/// #[derive(IntoBytes)]
5016/// #[repr(u8)]
5017/// enum MyEnum {
5018/// #   Variant0,
5019/// # /*
5020///     ...
5021/// # */
5022/// }
5023/// ```
5024///
5025/// This derive performs a sophisticated, compile-time safety analysis to
5026/// determine whether a type is `IntoBytes`. See the [derive
5027/// documentation][derive] for guidance on how to interpret error messages
5028/// produced by the derive's analysis.
5029///
5030/// # Safety
5031///
5032/// *This section describes what is required in order for `T: IntoBytes`, and
5033/// what unsafe code may assume of such types. If you don't plan on implementing
5034/// `IntoBytes` manually, and you don't plan on writing unsafe code that
5035/// operates on `IntoBytes` types, then you don't need to read this section.*
5036///
5037/// If `T: IntoBytes`, then unsafe code may assume that it is sound to treat any
5038/// `t: T` as an immutable `[u8]` of length `size_of_val(t)`. If a type is
5039/// marked as `IntoBytes` which violates this contract, it may cause undefined
5040/// behavior.
5041///
5042/// `#[derive(IntoBytes)]` only permits [types which satisfy these
5043/// requirements][derive-analysis].
5044///
5045#[cfg_attr(
5046    feature = "derive",
5047    doc = "[derive]: zerocopy_derive::IntoBytes",
5048    doc = "[derive-analysis]: zerocopy_derive::IntoBytes#analysis"
5049)]
5050#[cfg_attr(
5051    not(feature = "derive"),
5052    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html"),
5053    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.IntoBytes.html#analysis"),
5054)]
5055#[cfg_attr(
5056    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
5057    diagnostic::on_unimplemented(note = "Consider adding `#[derive(IntoBytes)]` to `{Self}`")
5058)]
5059pub unsafe trait IntoBytes {
5060    // The `Self: Sized` bound makes it so that this function doesn't prevent
5061    // `IntoBytes` from being object safe. Note that other `IntoBytes` methods
5062    // prevent object safety, but those provide a benefit in exchange for object
5063    // safety. If at some point we remove those methods, change their type
5064    // signatures, or move them out of this trait so that `IntoBytes` is object
5065    // safe again, it's important that this function not prevent object safety.
5066    #[doc(hidden)]
5067    fn only_derive_is_allowed_to_implement_this_trait()
5068    where
5069        Self: Sized;
5070
5071    /// Gets the bytes of this value.
5072    ///
5073    /// # Examples
5074    ///
5075    /// ```
5076    /// use zerocopy::IntoBytes;
5077    /// # use zerocopy_derive::*;
5078    ///
5079    /// #[derive(IntoBytes, Immutable)]
5080    /// #[repr(C)]
5081    /// struct PacketHeader {
5082    ///     src_port: [u8; 2],
5083    ///     dst_port: [u8; 2],
5084    ///     length: [u8; 2],
5085    ///     checksum: [u8; 2],
5086    /// }
5087    ///
5088    /// let header = PacketHeader {
5089    ///     src_port: [0, 1],
5090    ///     dst_port: [2, 3],
5091    ///     length: [4, 5],
5092    ///     checksum: [6, 7],
5093    /// };
5094    ///
5095    /// let bytes = header.as_bytes();
5096    ///
5097    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5098    /// ```
5099    #[must_use = "has no side effects"]
5100    #[inline(always)]
5101    fn as_bytes(&self) -> &[u8]
5102    where
5103        Self: Immutable,
5104    {
5105        // Note that this method does not have a `Self: Sized` bound;
5106        // `size_of_val` works for unsized values too.
5107        let len = mem::size_of_val(self);
5108        let slf: *const Self = self;
5109
5110        // SAFETY:
5111        // - `slf.cast::<u8>()` is valid for reads for `len * size_of::<u8>()`
5112        //   many bytes because...
5113        //   - `slf` is the same pointer as `self`, and `self` is a reference
5114        //     which points to an object whose size is `len`. Thus...
5115        //     - The entire region of `len` bytes starting at `slf` is contained
5116        //       within a single allocation.
5117        //     - `slf` is non-null.
5118        //   - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5119        // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5120        //   initialized.
5121        // - Since `slf` is derived from `self`, and `self` is an immutable
5122        //   reference, the only other references to this memory region that
5123        //   could exist are other immutable references, and those don't allow
5124        //   mutation. `Self: Immutable` prohibits types which contain
5125        //   `UnsafeCell`s, which are the only types for which this rule
5126        //   wouldn't be sufficient.
5127        // - The total size of the resulting slice is no larger than
5128        //   `isize::MAX` because no allocation produced by safe code can be
5129        //   larger than `isize::MAX`.
5130        //
5131        // FIXME(#429): Add references to docs and quotes.
5132        unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) }
5133    }
5134
5135    /// Gets the bytes of this value mutably.
5136    ///
5137    /// # Examples
5138    ///
5139    /// ```
5140    /// use zerocopy::IntoBytes;
5141    /// # use zerocopy_derive::*;
5142    ///
5143    /// # #[derive(Eq, PartialEq, Debug)]
5144    /// #[derive(FromBytes, IntoBytes, Immutable)]
5145    /// #[repr(C)]
5146    /// struct PacketHeader {
5147    ///     src_port: [u8; 2],
5148    ///     dst_port: [u8; 2],
5149    ///     length: [u8; 2],
5150    ///     checksum: [u8; 2],
5151    /// }
5152    ///
5153    /// let mut header = PacketHeader {
5154    ///     src_port: [0, 1],
5155    ///     dst_port: [2, 3],
5156    ///     length: [4, 5],
5157    ///     checksum: [6, 7],
5158    /// };
5159    ///
5160    /// let bytes = header.as_mut_bytes();
5161    ///
5162    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5163    ///
5164    /// bytes.reverse();
5165    ///
5166    /// assert_eq!(header, PacketHeader {
5167    ///     src_port: [7, 6],
5168    ///     dst_port: [5, 4],
5169    ///     length: [3, 2],
5170    ///     checksum: [1, 0],
5171    /// });
5172    /// ```
5173    #[must_use = "has no side effects"]
5174    #[inline(always)]
5175    fn as_mut_bytes(&mut self) -> &mut [u8]
5176    where
5177        Self: FromBytes,
5178    {
5179        // Note that this method does not have a `Self: Sized` bound;
5180        // `size_of_val` works for unsized values too.
5181        let len = mem::size_of_val(self);
5182        let slf: *mut Self = self;
5183
5184        // SAFETY:
5185        // - `slf.cast::<u8>()` is valid for reads and writes for `len *
5186        //   size_of::<u8>()` many bytes because...
5187        //   - `slf` is the same pointer as `self`, and `self` is a reference
5188        //     which points to an object whose size is `len`. Thus...
5189        //     - The entire region of `len` bytes starting at `slf` is contained
5190        //       within a single allocation.
5191        //     - `slf` is non-null.
5192        //   - `slf` is trivially aligned to `align_of::<u8>() == 1`.
5193        // - `Self: IntoBytes` ensures that all of the bytes of `slf` are
5194        //   initialized.
5195        // - `Self: FromBytes` ensures that no write to this memory region
5196        //   could result in it containing an invalid `Self`.
5197        // - Since `slf` is derived from `self`, and `self` is a mutable
5198        //   reference, no other references to this memory region can exist.
5199        // - The total size of the resulting slice is no larger than
5200        //   `isize::MAX` because no allocation produced by safe code can be
5201        //   larger than `isize::MAX`.
5202        //
5203        // FIXME(#429): Add references to docs and quotes.
5204        unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) }
5205    }
5206
5207    /// Writes a copy of `self` to `dst`.
5208    ///
5209    /// If `dst.len() != size_of_val(self)`, `write_to` returns `Err`.
5210    ///
5211    /// # Examples
5212    ///
5213    /// ```
5214    /// use zerocopy::IntoBytes;
5215    /// # use zerocopy_derive::*;
5216    ///
5217    /// #[derive(IntoBytes, Immutable)]
5218    /// #[repr(C)]
5219    /// struct PacketHeader {
5220    ///     src_port: [u8; 2],
5221    ///     dst_port: [u8; 2],
5222    ///     length: [u8; 2],
5223    ///     checksum: [u8; 2],
5224    /// }
5225    ///
5226    /// let header = PacketHeader {
5227    ///     src_port: [0, 1],
5228    ///     dst_port: [2, 3],
5229    ///     length: [4, 5],
5230    ///     checksum: [6, 7],
5231    /// };
5232    ///
5233    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0];
5234    ///
5235    /// header.write_to(&mut bytes[..]);
5236    ///
5237    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
5238    /// ```
5239    ///
5240    /// If too many or too few target bytes are provided, `write_to` returns
5241    /// `Err` and leaves the target bytes unmodified:
5242    ///
5243    /// ```
5244    /// # use zerocopy::IntoBytes;
5245    /// # let header = u128::MAX;
5246    /// let mut excessive_bytes = &mut [0u8; 128][..];
5247    ///
5248    /// let write_result = header.write_to(excessive_bytes);
5249    ///
5250    /// assert!(write_result.is_err());
5251    /// assert_eq!(excessive_bytes, [0u8; 128]);
5252    /// ```
5253    #[must_use = "callers should check the return value to see if the operation succeeded"]
5254    #[inline]
5255    #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5256    fn write_to(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5257    where
5258        Self: Immutable,
5259    {
5260        let src = self.as_bytes();
5261        if dst.len() == src.len() {
5262            // SAFETY: Within this branch of the conditional, we have ensured
5263            // that `dst.len()` is equal to `src.len()`. Neither the size of the
5264            // source nor the size of the destination change between the above
5265            // size check and the invocation of `copy_unchecked`.
5266            unsafe { util::copy_unchecked(src, dst) }
5267            Ok(())
5268        } else {
5269            Err(SizeError::new(self))
5270        }
5271    }
5272
5273    /// Writes a copy of `self` to the prefix of `dst`.
5274    ///
5275    /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes
5276    /// of `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5277    ///
5278    /// # Examples
5279    ///
5280    /// ```
5281    /// use zerocopy::IntoBytes;
5282    /// # use zerocopy_derive::*;
5283    ///
5284    /// #[derive(IntoBytes, Immutable)]
5285    /// #[repr(C)]
5286    /// struct PacketHeader {
5287    ///     src_port: [u8; 2],
5288    ///     dst_port: [u8; 2],
5289    ///     length: [u8; 2],
5290    ///     checksum: [u8; 2],
5291    /// }
5292    ///
5293    /// let header = PacketHeader {
5294    ///     src_port: [0, 1],
5295    ///     dst_port: [2, 3],
5296    ///     length: [4, 5],
5297    ///     checksum: [6, 7],
5298    /// };
5299    ///
5300    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5301    ///
5302    /// header.write_to_prefix(&mut bytes[..]);
5303    ///
5304    /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]);
5305    /// ```
5306    ///
5307    /// If insufficient target bytes are provided, `write_to_prefix` returns
5308    /// `Err` and leaves the target bytes unmodified:
5309    ///
5310    /// ```
5311    /// # use zerocopy::IntoBytes;
5312    /// # let header = u128::MAX;
5313    /// let mut insufficient_bytes = &mut [0, 0][..];
5314    ///
5315    /// let write_result = header.write_to_suffix(insufficient_bytes);
5316    ///
5317    /// assert!(write_result.is_err());
5318    /// assert_eq!(insufficient_bytes, [0, 0]);
5319    /// ```
5320    #[must_use = "callers should check the return value to see if the operation succeeded"]
5321    #[inline]
5322    #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5323    fn write_to_prefix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5324    where
5325        Self: Immutable,
5326    {
5327        let src = self.as_bytes();
5328        match dst.get_mut(..src.len()) {
5329            Some(dst) => {
5330                // SAFETY: Within this branch of the `match`, we have ensured
5331                // through fallible subslicing that `dst.len()` is equal to
5332                // `src.len()`. Neither the size of the source nor the size of
5333                // the destination change between the above subslicing operation
5334                // and the invocation of `copy_unchecked`.
5335                unsafe { util::copy_unchecked(src, dst) }
5336                Ok(())
5337            }
5338            None => Err(SizeError::new(self)),
5339        }
5340    }
5341
5342    /// Writes a copy of `self` to the suffix of `dst`.
5343    ///
5344    /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of
5345    /// `dst`. If `dst.len() < size_of_val(self)`, it returns `Err`.
5346    ///
5347    /// # Examples
5348    ///
5349    /// ```
5350    /// use zerocopy::IntoBytes;
5351    /// # use zerocopy_derive::*;
5352    ///
5353    /// #[derive(IntoBytes, Immutable)]
5354    /// #[repr(C)]
5355    /// struct PacketHeader {
5356    ///     src_port: [u8; 2],
5357    ///     dst_port: [u8; 2],
5358    ///     length: [u8; 2],
5359    ///     checksum: [u8; 2],
5360    /// }
5361    ///
5362    /// let header = PacketHeader {
5363    ///     src_port: [0, 1],
5364    ///     dst_port: [2, 3],
5365    ///     length: [4, 5],
5366    ///     checksum: [6, 7],
5367    /// };
5368    ///
5369    /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
5370    ///
5371    /// header.write_to_suffix(&mut bytes[..]);
5372    ///
5373    /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
5374    ///
5375    /// let mut insufficient_bytes = &mut [0, 0][..];
5376    ///
5377    /// let write_result = header.write_to_suffix(insufficient_bytes);
5378    ///
5379    /// assert!(write_result.is_err());
5380    /// assert_eq!(insufficient_bytes, [0, 0]);
5381    /// ```
5382    ///
5383    /// If insufficient target bytes are provided, `write_to_suffix` returns
5384    /// `Err` and leaves the target bytes unmodified:
5385    ///
5386    /// ```
5387    /// # use zerocopy::IntoBytes;
5388    /// # let header = u128::MAX;
5389    /// let mut insufficient_bytes = &mut [0, 0][..];
5390    ///
5391    /// let write_result = header.write_to_suffix(insufficient_bytes);
5392    ///
5393    /// assert!(write_result.is_err());
5394    /// assert_eq!(insufficient_bytes, [0, 0]);
5395    /// ```
5396    #[must_use = "callers should check the return value to see if the operation succeeded"]
5397    #[inline]
5398    #[allow(clippy::mut_from_ref)] // False positive: `&self -> &mut [u8]`
5399    fn write_to_suffix(&self, dst: &mut [u8]) -> Result<(), SizeError<&Self, &mut [u8]>>
5400    where
5401        Self: Immutable,
5402    {
5403        let src = self.as_bytes();
5404        let start = if let Some(start) = dst.len().checked_sub(src.len()) {
5405            start
5406        } else {
5407            return Err(SizeError::new(self));
5408        };
5409        let dst = if let Some(dst) = dst.get_mut(start..) {
5410            dst
5411        } else {
5412            // get_mut() should never return None here. We return a `SizeError`
5413            // rather than .unwrap() because in the event the branch is not
5414            // optimized away, returning a value is generally lighter-weight
5415            // than panicking.
5416            return Err(SizeError::new(self));
5417        };
5418        // SAFETY: Through fallible subslicing of `dst`, we have ensured that
5419        // `dst.len()` is equal to `src.len()`. Neither the size of the source
5420        // nor the size of the destination change between the above subslicing
5421        // operation and the invocation of `copy_unchecked`.
5422        unsafe {
5423            util::copy_unchecked(src, dst);
5424        }
5425        Ok(())
5426    }
5427
5428    /// Writes a copy of `self` to an `io::Write`.
5429    ///
5430    /// This is a shorthand for `dst.write_all(self.as_bytes())`, and is useful
5431    /// for interfacing with operating system byte sinks (files, sockets, etc.).
5432    ///
5433    /// # Examples
5434    ///
5435    /// ```no_run
5436    /// use zerocopy::{byteorder::big_endian::U16, FromBytes, IntoBytes};
5437    /// use std::fs::File;
5438    /// # use zerocopy_derive::*;
5439    ///
5440    /// #[derive(FromBytes, IntoBytes, Immutable, KnownLayout)]
5441    /// #[repr(C, packed)]
5442    /// struct GrayscaleImage {
5443    ///     height: U16,
5444    ///     width: U16,
5445    ///     pixels: [U16],
5446    /// }
5447    ///
5448    /// let image = GrayscaleImage::ref_from_bytes(&[0, 0, 0, 0][..]).unwrap();
5449    /// let mut file = File::create("image.bin").unwrap();
5450    /// image.write_to_io(&mut file).unwrap();
5451    /// ```
5452    ///
5453    /// If the write fails, `write_to_io` returns `Err` and a partial write may
5454    /// have occurred; e.g.:
5455    ///
5456    /// ```
5457    /// # use zerocopy::IntoBytes;
5458    ///
5459    /// let src = u128::MAX;
5460    /// let mut dst = [0u8; 2];
5461    ///
5462    /// let write_result = src.write_to_io(&mut dst[..]);
5463    ///
5464    /// assert!(write_result.is_err());
5465    /// assert_eq!(dst, [255, 255]);
5466    /// ```
5467    #[cfg(feature = "std")]
5468    #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
5469    #[inline(always)]
5470    fn write_to_io<W>(&self, mut dst: W) -> io::Result<()>
5471    where
5472        Self: Immutable,
5473        W: io::Write,
5474    {
5475        dst.write_all(self.as_bytes())
5476    }
5477
5478    #[deprecated(since = "0.8.0", note = "`IntoBytes::as_bytes_mut` was renamed to `as_mut_bytes`")]
5479    #[doc(hidden)]
5480    #[inline]
5481    fn as_bytes_mut(&mut self) -> &mut [u8]
5482    where
5483        Self: FromBytes,
5484    {
5485        self.as_mut_bytes()
5486    }
5487}
5488
5489/// Analyzes whether a type is [`Unaligned`].
5490///
5491/// This derive analyzes, at compile time, whether the annotated type satisfies
5492/// the [safety conditions] of `Unaligned` and implements `Unaligned` if it is
5493/// sound to do so. This derive can be applied to structs, enums, and unions;
5494/// e.g.:
5495///
5496/// ```
5497/// # use zerocopy_derive::Unaligned;
5498/// #[derive(Unaligned)]
5499/// #[repr(C)]
5500/// struct MyStruct {
5501/// # /*
5502///     ...
5503/// # */
5504/// }
5505///
5506/// #[derive(Unaligned)]
5507/// #[repr(u8)]
5508/// enum MyEnum {
5509/// #   Variant0,
5510/// # /*
5511///     ...
5512/// # */
5513/// }
5514///
5515/// #[derive(Unaligned)]
5516/// #[repr(packed)]
5517/// union MyUnion {
5518/// #   variant: u8,
5519/// # /*
5520///     ...
5521/// # */
5522/// }
5523/// ```
5524///
5525/// # Analysis
5526///
5527/// *This section describes, roughly, the analysis performed by this derive to
5528/// determine whether it is sound to implement `Unaligned` for a given type.
5529/// Unless you are modifying the implementation of this derive, or attempting to
5530/// manually implement `Unaligned` for a type yourself, you don't need to read
5531/// this section.*
5532///
5533/// If a type has the following properties, then this derive can implement
5534/// `Unaligned` for that type:
5535///
5536/// - If the type is a struct or union:
5537///   - If `repr(align(N))` is provided, `N` must equal 1.
5538///   - If the type is `repr(C)` or `repr(transparent)`, all fields must be
5539///     [`Unaligned`].
5540///   - If the type is not `repr(C)` or `repr(transparent)`, it must be
5541///     `repr(packed)` or `repr(packed(1))`.
5542/// - If the type is an enum:
5543///   - If `repr(align(N))` is provided, `N` must equal 1.
5544///   - It must be a field-less enum (meaning that all variants have no fields).
5545///   - It must be `repr(i8)` or `repr(u8)`.
5546///
5547/// [safety conditions]: trait@Unaligned#safety
5548#[cfg(any(feature = "derive", test))]
5549#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5550pub use zerocopy_derive::Unaligned;
5551
5552/// Types with no alignment requirement.
5553///
5554/// If `T: Unaligned`, then `align_of::<T>() == 1`.
5555///
5556/// # Implementation
5557///
5558/// **Do not implement this trait yourself!** Instead, use
5559/// [`#[derive(Unaligned)]`][derive]; e.g.:
5560///
5561/// ```
5562/// # use zerocopy_derive::Unaligned;
5563/// #[derive(Unaligned)]
5564/// #[repr(C)]
5565/// struct MyStruct {
5566/// # /*
5567///     ...
5568/// # */
5569/// }
5570///
5571/// #[derive(Unaligned)]
5572/// #[repr(u8)]
5573/// enum MyEnum {
5574/// #   Variant0,
5575/// # /*
5576///     ...
5577/// # */
5578/// }
5579///
5580/// #[derive(Unaligned)]
5581/// #[repr(packed)]
5582/// union MyUnion {
5583/// #   variant: u8,
5584/// # /*
5585///     ...
5586/// # */
5587/// }
5588/// ```
5589///
5590/// This derive performs a sophisticated, compile-time safety analysis to
5591/// determine whether a type is `Unaligned`.
5592///
5593/// # Safety
5594///
5595/// *This section describes what is required in order for `T: Unaligned`, and
5596/// what unsafe code may assume of such types. If you don't plan on implementing
5597/// `Unaligned` manually, and you don't plan on writing unsafe code that
5598/// operates on `Unaligned` types, then you don't need to read this section.*
5599///
5600/// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a
5601/// reference to `T` at any memory location regardless of alignment. If a type
5602/// is marked as `Unaligned` which violates this contract, it may cause
5603/// undefined behavior.
5604///
5605/// `#[derive(Unaligned)]` only permits [types which satisfy these
5606/// requirements][derive-analysis].
5607///
5608#[cfg_attr(
5609    feature = "derive",
5610    doc = "[derive]: zerocopy_derive::Unaligned",
5611    doc = "[derive-analysis]: zerocopy_derive::Unaligned#analysis"
5612)]
5613#[cfg_attr(
5614    not(feature = "derive"),
5615    doc = concat!("[derive]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html"),
5616    doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", env!("CARGO_PKG_VERSION"), "/zerocopy/derive.Unaligned.html#analysis"),
5617)]
5618#[cfg_attr(
5619    not(no_zerocopy_diagnostic_on_unimplemented_1_78_0),
5620    diagnostic::on_unimplemented(note = "Consider adding `#[derive(Unaligned)]` to `{Self}`")
5621)]
5622pub unsafe trait Unaligned {
5623    // The `Self: Sized` bound makes it so that `Unaligned` is still object
5624    // safe.
5625    #[doc(hidden)]
5626    fn only_derive_is_allowed_to_implement_this_trait()
5627    where
5628        Self: Sized;
5629}
5630
5631/// Derives optimized [`PartialEq`] and [`Eq`] implementations.
5632///
5633/// This derive can be applied to structs and enums implementing both
5634/// [`Immutable`] and [`IntoBytes`]; e.g.:
5635///
5636/// ```
5637/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes};
5638/// #[derive(ByteEq, Immutable, IntoBytes)]
5639/// #[repr(C)]
5640/// struct MyStruct {
5641/// # /*
5642///     ...
5643/// # */
5644/// }
5645///
5646/// #[derive(ByteEq, Immutable, IntoBytes)]
5647/// #[repr(u8)]
5648/// enum MyEnum {
5649/// #   Variant,
5650/// # /*
5651///     ...
5652/// # */
5653/// }
5654/// ```
5655///
5656/// The standard library's [`derive(Eq, PartialEq)`][derive@PartialEq] computes
5657/// equality by individually comparing each field. Instead, the implementation
5658/// of [`PartialEq::eq`] emitted by `derive(ByteHash)` converts the entirety of
5659/// `self` and `other` to byte slices and compares those slices for equality.
5660/// This may have performance advantages.
5661#[cfg(any(feature = "derive", test))]
5662#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5663pub use zerocopy_derive::ByteEq;
5664/// Derives an optimized [`Hash`] implementation.
5665///
5666/// This derive can be applied to structs and enums implementing both
5667/// [`Immutable`] and [`IntoBytes`]; e.g.:
5668///
5669/// ```
5670/// # use zerocopy_derive::{ByteHash, Immutable, IntoBytes};
5671/// #[derive(ByteHash, Immutable, IntoBytes)]
5672/// #[repr(C)]
5673/// struct MyStruct {
5674/// # /*
5675///     ...
5676/// # */
5677/// }
5678///
5679/// #[derive(ByteHash, Immutable, IntoBytes)]
5680/// #[repr(u8)]
5681/// enum MyEnum {
5682/// #   Variant,
5683/// # /*
5684///     ...
5685/// # */
5686/// }
5687/// ```
5688///
5689/// The standard library's [`derive(Hash)`][derive@Hash] produces hashes by
5690/// individually hashing each field and combining the results. Instead, the
5691/// implementations of [`Hash::hash()`] and [`Hash::hash_slice()`] generated by
5692/// `derive(ByteHash)` convert the entirety of `self` to a byte slice and hashes
5693/// it in a single call to [`Hasher::write()`]. This may have performance
5694/// advantages.
5695///
5696/// [`Hash`]: core::hash::Hash
5697/// [`Hash::hash()`]: core::hash::Hash::hash()
5698/// [`Hash::hash_slice()`]: core::hash::Hash::hash_slice()
5699#[cfg(any(feature = "derive", test))]
5700#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5701pub use zerocopy_derive::ByteHash;
5702/// Implements [`SplitAt`].
5703///
5704/// This derive can be applied to structs; e.g.:
5705///
5706/// ```
5707/// # use zerocopy_derive::{ByteEq, Immutable, IntoBytes};
5708/// #[derive(ByteEq, Immutable, IntoBytes)]
5709/// #[repr(C)]
5710/// struct MyStruct {
5711/// # /*
5712///     ...
5713/// # */
5714/// }
5715/// ```
5716#[cfg(any(feature = "derive", test))]
5717#[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
5718pub use zerocopy_derive::SplitAt;
5719
5720#[cfg(feature = "alloc")]
5721#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
5722#[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
5723mod alloc_support {
5724    use super::*;
5725
5726    /// Extends a `Vec<T>` by pushing `additional` new items onto the end of the
5727    /// vector. The new items are initialized with zeros.
5728    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
5729    #[doc(hidden)]
5730    #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
5731    #[inline(always)]
5732    pub fn extend_vec_zeroed<T: FromZeros>(
5733        v: &mut Vec<T>,
5734        additional: usize,
5735    ) -> Result<(), AllocError> {
5736        <T as FromZeros>::extend_vec_zeroed(v, additional)
5737    }
5738
5739    /// Inserts `additional` new items into `Vec<T>` at `position`. The new
5740    /// items are initialized with zeros.
5741    ///
5742    /// # Panics
5743    ///
5744    /// Panics if `position > v.len()`.
5745    #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
5746    #[doc(hidden)]
5747    #[deprecated(since = "0.8.0", note = "moved to `FromZeros`")]
5748    #[inline(always)]
5749    pub fn insert_vec_zeroed<T: FromZeros>(
5750        v: &mut Vec<T>,
5751        position: usize,
5752        additional: usize,
5753    ) -> Result<(), AllocError> {
5754        <T as FromZeros>::insert_vec_zeroed(v, position, additional)
5755    }
5756}
5757
5758#[cfg(feature = "alloc")]
5759#[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
5760#[doc(hidden)]
5761pub use alloc_support::*;
5762
5763#[cfg(test)]
5764#[allow(clippy::assertions_on_result_states, clippy::unreadable_literal)]
5765mod tests {
5766    use static_assertions::assert_impl_all;
5767
5768    use super::*;
5769    use crate::util::testutil::*;
5770
5771    // An unsized type.
5772    //
5773    // This is used to test the custom derives of our traits. The `[u8]` type
5774    // gets a hand-rolled impl, so it doesn't exercise our custom derives.
5775    #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Unaligned, Immutable)]
5776    #[repr(transparent)]
5777    struct Unsized([u8]);
5778
5779    impl Unsized {
5780        fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized {
5781            // SAFETY: This *probably* sound - since the layouts of `[u8]` and
5782            // `Unsized` are the same, so are the layouts of `&mut [u8]` and
5783            // `&mut Unsized`. [1] Even if it turns out that this isn't actually
5784            // guaranteed by the language spec, we can just change this since
5785            // it's in test code.
5786            //
5787            // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375
5788            unsafe { mem::transmute(slc) }
5789        }
5790    }
5791
5792    #[test]
5793    fn test_known_layout() {
5794        // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout.
5795        // Test that `PhantomData<$ty>` has the same layout as `()` regardless
5796        // of `$ty`.
5797        macro_rules! test {
5798            ($ty:ty, $expect:expr) => {
5799                let expect = $expect;
5800                assert_eq!(<$ty as KnownLayout>::LAYOUT, expect);
5801                assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect);
5802                assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT);
5803            };
5804        }
5805
5806        let layout =
5807            |offset, align, trailing_slice_elem_size, statically_shallow_unpadded| DstLayout {
5808                align: NonZeroUsize::new(align).unwrap(),
5809                size_info: match trailing_slice_elem_size {
5810                    None => SizeInfo::Sized { size: offset },
5811                    Some(elem_size) => {
5812                        SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size })
5813                    }
5814                },
5815                statically_shallow_unpadded,
5816            };
5817
5818        test!((), layout(0, 1, None, false));
5819        test!(u8, layout(1, 1, None, false));
5820        // Use `align_of` because `u64` alignment may be smaller than 8 on some
5821        // platforms.
5822        test!(u64, layout(8, mem::align_of::<u64>(), None, false));
5823        test!(AU64, layout(8, 8, None, false));
5824
5825        test!(Option<&'static ()>, usize::LAYOUT);
5826
5827        test!([()], layout(0, 1, Some(0), true));
5828        test!([u8], layout(0, 1, Some(1), true));
5829        test!(str, layout(0, 1, Some(1), true));
5830    }
5831
5832    #[cfg(feature = "derive")]
5833    #[test]
5834    fn test_known_layout_derive() {
5835        // In this and other files (`late_compile_pass.rs`,
5836        // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure
5837        // modes of `derive(KnownLayout)` for the following combination of
5838        // properties:
5839        //
5840        // +------------+--------------------------------------+-----------+
5841        // |            |      trailing field properties       |           |
5842        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5843        // |------------+----------+----------------+----------+-----------|
5844        // |          N |        N |              N |        N |      KL00 |
5845        // |          N |        N |              N |        Y |      KL01 |
5846        // |          N |        N |              Y |        N |      KL02 |
5847        // |          N |        N |              Y |        Y |      KL03 |
5848        // |          N |        Y |              N |        N |      KL04 |
5849        // |          N |        Y |              N |        Y |      KL05 |
5850        // |          N |        Y |              Y |        N |      KL06 |
5851        // |          N |        Y |              Y |        Y |      KL07 |
5852        // |          Y |        N |              N |        N |      KL08 |
5853        // |          Y |        N |              N |        Y |      KL09 |
5854        // |          Y |        N |              Y |        N |      KL10 |
5855        // |          Y |        N |              Y |        Y |      KL11 |
5856        // |          Y |        Y |              N |        N |      KL12 |
5857        // |          Y |        Y |              N |        Y |      KL13 |
5858        // |          Y |        Y |              Y |        N |      KL14 |
5859        // |          Y |        Y |              Y |        Y |      KL15 |
5860        // +------------+----------+----------------+----------+-----------+
5861
5862        struct NotKnownLayout<T = ()> {
5863            _t: T,
5864        }
5865
5866        #[derive(KnownLayout)]
5867        #[repr(C)]
5868        struct AlignSize<const ALIGN: usize, const SIZE: usize>
5869        where
5870            elain::Align<ALIGN>: elain::Alignment,
5871        {
5872            _align: elain::Align<ALIGN>,
5873            size: [u8; SIZE],
5874        }
5875
5876        type AU16 = AlignSize<2, 2>;
5877        type AU32 = AlignSize<4, 4>;
5878
5879        fn _assert_kl<T: ?Sized + KnownLayout>(_: &T) {}
5880
5881        let sized_layout = |align, size| DstLayout {
5882            align: NonZeroUsize::new(align).unwrap(),
5883            size_info: SizeInfo::Sized { size },
5884            statically_shallow_unpadded: false,
5885        };
5886
5887        let unsized_layout = |align, elem_size, offset, statically_shallow_unpadded| DstLayout {
5888            align: NonZeroUsize::new(align).unwrap(),
5889            size_info: SizeInfo::SliceDst(TrailingSliceLayout { offset, elem_size }),
5890            statically_shallow_unpadded,
5891        };
5892
5893        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5894        // |          N |        N |              N |        Y |      KL01 |
5895        #[allow(dead_code)]
5896        #[derive(KnownLayout)]
5897        struct KL01(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5898
5899        let expected = DstLayout::for_type::<KL01>();
5900
5901        assert_eq!(<KL01 as KnownLayout>::LAYOUT, expected);
5902        assert_eq!(<KL01 as KnownLayout>::LAYOUT, sized_layout(4, 8));
5903
5904        // ...with `align(N)`:
5905        #[allow(dead_code)]
5906        #[derive(KnownLayout)]
5907        #[repr(align(64))]
5908        struct KL01Align(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5909
5910        let expected = DstLayout::for_type::<KL01Align>();
5911
5912        assert_eq!(<KL01Align as KnownLayout>::LAYOUT, expected);
5913        assert_eq!(<KL01Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
5914
5915        // ...with `packed`:
5916        #[allow(dead_code)]
5917        #[derive(KnownLayout)]
5918        #[repr(packed)]
5919        struct KL01Packed(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5920
5921        let expected = DstLayout::for_type::<KL01Packed>();
5922
5923        assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, expected);
5924        assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, sized_layout(1, 6));
5925
5926        // ...with `packed(N)`:
5927        #[allow(dead_code)]
5928        #[derive(KnownLayout)]
5929        #[repr(packed(2))]
5930        struct KL01PackedN(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
5931
5932        assert_impl_all!(KL01PackedN: KnownLayout);
5933
5934        let expected = DstLayout::for_type::<KL01PackedN>();
5935
5936        assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, expected);
5937        assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
5938
5939        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5940        // |          N |        N |              Y |        Y |      KL03 |
5941        #[allow(dead_code)]
5942        #[derive(KnownLayout)]
5943        struct KL03(NotKnownLayout, u8);
5944
5945        let expected = DstLayout::for_type::<KL03>();
5946
5947        assert_eq!(<KL03 as KnownLayout>::LAYOUT, expected);
5948        assert_eq!(<KL03 as KnownLayout>::LAYOUT, sized_layout(1, 1));
5949
5950        // ... with `align(N)`
5951        #[allow(dead_code)]
5952        #[derive(KnownLayout)]
5953        #[repr(align(64))]
5954        struct KL03Align(NotKnownLayout<AU32>, u8);
5955
5956        let expected = DstLayout::for_type::<KL03Align>();
5957
5958        assert_eq!(<KL03Align as KnownLayout>::LAYOUT, expected);
5959        assert_eq!(<KL03Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
5960
5961        // ... with `packed`:
5962        #[allow(dead_code)]
5963        #[derive(KnownLayout)]
5964        #[repr(packed)]
5965        struct KL03Packed(NotKnownLayout<AU32>, u8);
5966
5967        let expected = DstLayout::for_type::<KL03Packed>();
5968
5969        assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, expected);
5970        assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, sized_layout(1, 5));
5971
5972        // ... with `packed(N)`
5973        #[allow(dead_code)]
5974        #[derive(KnownLayout)]
5975        #[repr(packed(2))]
5976        struct KL03PackedN(NotKnownLayout<AU32>, u8);
5977
5978        assert_impl_all!(KL03PackedN: KnownLayout);
5979
5980        let expected = DstLayout::for_type::<KL03PackedN>();
5981
5982        assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, expected);
5983        assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
5984
5985        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5986        // |          N |        Y |              N |        Y |      KL05 |
5987        #[allow(dead_code)]
5988        #[derive(KnownLayout)]
5989        struct KL05<T>(u8, T);
5990
5991        fn _test_kl05<T>(t: T) -> impl KnownLayout {
5992            KL05(0u8, t)
5993        }
5994
5995        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
5996        // |          N |        Y |              Y |        Y |      KL07 |
5997        #[allow(dead_code)]
5998        #[derive(KnownLayout)]
5999        struct KL07<T: KnownLayout>(u8, T);
6000
6001        fn _test_kl07<T: KnownLayout>(t: T) -> impl KnownLayout {
6002            let _ = KL07(0u8, t);
6003        }
6004
6005        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6006        // |          Y |        N |              Y |        N |      KL10 |
6007        #[allow(dead_code)]
6008        #[derive(KnownLayout)]
6009        #[repr(C)]
6010        struct KL10(NotKnownLayout<AU32>, [u8]);
6011
6012        let expected = DstLayout::new_zst(None)
6013            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6014            .extend(<[u8] as KnownLayout>::LAYOUT, None)
6015            .pad_to_align();
6016
6017        assert_eq!(<KL10 as KnownLayout>::LAYOUT, expected);
6018        assert_eq!(<KL10 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 4, false));
6019
6020        // ...with `align(N)`:
6021        #[allow(dead_code)]
6022        #[derive(KnownLayout)]
6023        #[repr(C, align(64))]
6024        struct KL10Align(NotKnownLayout<AU32>, [u8]);
6025
6026        let repr_align = NonZeroUsize::new(64);
6027
6028        let expected = DstLayout::new_zst(repr_align)
6029            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6030            .extend(<[u8] as KnownLayout>::LAYOUT, None)
6031            .pad_to_align();
6032
6033        assert_eq!(<KL10Align as KnownLayout>::LAYOUT, expected);
6034        assert_eq!(<KL10Align as KnownLayout>::LAYOUT, unsized_layout(64, 1, 4, false));
6035
6036        // ...with `packed`:
6037        #[allow(dead_code)]
6038        #[derive(KnownLayout)]
6039        #[repr(C, packed)]
6040        struct KL10Packed(NotKnownLayout<AU32>, [u8]);
6041
6042        let repr_packed = NonZeroUsize::new(1);
6043
6044        let expected = DstLayout::new_zst(None)
6045            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6046            .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6047            .pad_to_align();
6048
6049        assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, expected);
6050        assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, unsized_layout(1, 1, 4, false));
6051
6052        // ...with `packed(N)`:
6053        #[allow(dead_code)]
6054        #[derive(KnownLayout)]
6055        #[repr(C, packed(2))]
6056        struct KL10PackedN(NotKnownLayout<AU32>, [u8]);
6057
6058        let repr_packed = NonZeroUsize::new(2);
6059
6060        let expected = DstLayout::new_zst(None)
6061            .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6062            .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6063            .pad_to_align();
6064
6065        assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, expected);
6066        assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4, false));
6067
6068        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6069        // |          Y |        N |              Y |        Y |      KL11 |
6070        #[allow(dead_code)]
6071        #[derive(KnownLayout)]
6072        #[repr(C)]
6073        struct KL11(NotKnownLayout<AU64>, u8);
6074
6075        let expected = DstLayout::new_zst(None)
6076            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6077            .extend(<u8 as KnownLayout>::LAYOUT, None)
6078            .pad_to_align();
6079
6080        assert_eq!(<KL11 as KnownLayout>::LAYOUT, expected);
6081        assert_eq!(<KL11 as KnownLayout>::LAYOUT, sized_layout(8, 16));
6082
6083        // ...with `align(N)`:
6084        #[allow(dead_code)]
6085        #[derive(KnownLayout)]
6086        #[repr(C, align(64))]
6087        struct KL11Align(NotKnownLayout<AU64>, u8);
6088
6089        let repr_align = NonZeroUsize::new(64);
6090
6091        let expected = DstLayout::new_zst(repr_align)
6092            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6093            .extend(<u8 as KnownLayout>::LAYOUT, None)
6094            .pad_to_align();
6095
6096        assert_eq!(<KL11Align as KnownLayout>::LAYOUT, expected);
6097        assert_eq!(<KL11Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6098
6099        // ...with `packed`:
6100        #[allow(dead_code)]
6101        #[derive(KnownLayout)]
6102        #[repr(C, packed)]
6103        struct KL11Packed(NotKnownLayout<AU64>, u8);
6104
6105        let repr_packed = NonZeroUsize::new(1);
6106
6107        let expected = DstLayout::new_zst(None)
6108            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6109            .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6110            .pad_to_align();
6111
6112        assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, expected);
6113        assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, sized_layout(1, 9));
6114
6115        // ...with `packed(N)`:
6116        #[allow(dead_code)]
6117        #[derive(KnownLayout)]
6118        #[repr(C, packed(2))]
6119        struct KL11PackedN(NotKnownLayout<AU64>, u8);
6120
6121        let repr_packed = NonZeroUsize::new(2);
6122
6123        let expected = DstLayout::new_zst(None)
6124            .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6125            .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6126            .pad_to_align();
6127
6128        assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, expected);
6129        assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, sized_layout(2, 10));
6130
6131        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6132        // |          Y |        Y |              Y |        N |      KL14 |
6133        #[allow(dead_code)]
6134        #[derive(KnownLayout)]
6135        #[repr(C)]
6136        struct KL14<T: ?Sized + KnownLayout>(u8, T);
6137
6138        fn _test_kl14<T: ?Sized + KnownLayout>(kl: &KL14<T>) {
6139            _assert_kl(kl)
6140        }
6141
6142        // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6143        // |          Y |        Y |              Y |        Y |      KL15 |
6144        #[allow(dead_code)]
6145        #[derive(KnownLayout)]
6146        #[repr(C)]
6147        struct KL15<T: KnownLayout>(u8, T);
6148
6149        fn _test_kl15<T: KnownLayout>(t: T) -> impl KnownLayout {
6150            let _ = KL15(0u8, t);
6151        }
6152
6153        // Test a variety of combinations of field types:
6154        //  - ()
6155        //  - u8
6156        //  - AU16
6157        //  - [()]
6158        //  - [u8]
6159        //  - [AU16]
6160
6161        #[allow(clippy::upper_case_acronyms, dead_code)]
6162        #[derive(KnownLayout)]
6163        #[repr(C)]
6164        struct KLTU<T, U: ?Sized>(T, U);
6165
6166        assert_eq!(<KLTU<(), ()> as KnownLayout>::LAYOUT, sized_layout(1, 0));
6167
6168        assert_eq!(<KLTU<(), u8> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6169
6170        assert_eq!(<KLTU<(), AU16> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6171
6172        assert_eq!(<KLTU<(), [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0, false));
6173
6174        assert_eq!(<KLTU<(), [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0, false));
6175
6176        assert_eq!(<KLTU<(), [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0, false));
6177
6178        assert_eq!(<KLTU<u8, ()> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6179
6180        assert_eq!(<KLTU<u8, u8> as KnownLayout>::LAYOUT, sized_layout(1, 2));
6181
6182        assert_eq!(<KLTU<u8, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6183
6184        assert_eq!(<KLTU<u8, [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1, false));
6185
6186        assert_eq!(<KLTU<u8, [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1, false));
6187
6188        assert_eq!(<KLTU<u8, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2, false));
6189
6190        assert_eq!(<KLTU<AU16, ()> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6191
6192        assert_eq!(<KLTU<AU16, u8> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6193
6194        assert_eq!(<KLTU<AU16, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6195
6196        assert_eq!(<KLTU<AU16, [()]> as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2, false));
6197
6198        assert_eq!(<KLTU<AU16, [u8]> as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2, false));
6199
6200        assert_eq!(<KLTU<AU16, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2, false));
6201
6202        // Test a variety of field counts.
6203
6204        #[derive(KnownLayout)]
6205        #[repr(C)]
6206        struct KLF0;
6207
6208        assert_eq!(<KLF0 as KnownLayout>::LAYOUT, sized_layout(1, 0));
6209
6210        #[derive(KnownLayout)]
6211        #[repr(C)]
6212        struct KLF1([u8]);
6213
6214        assert_eq!(<KLF1 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0, true));
6215
6216        #[derive(KnownLayout)]
6217        #[repr(C)]
6218        struct KLF2(NotKnownLayout<u8>, [u8]);
6219
6220        assert_eq!(<KLF2 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1, false));
6221
6222        #[derive(KnownLayout)]
6223        #[repr(C)]
6224        struct KLF3(NotKnownLayout<u8>, NotKnownLayout<AU16>, [u8]);
6225
6226        assert_eq!(<KLF3 as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4, false));
6227
6228        #[derive(KnownLayout)]
6229        #[repr(C)]
6230        struct KLF4(NotKnownLayout<u8>, NotKnownLayout<AU16>, NotKnownLayout<AU32>, [u8]);
6231
6232        assert_eq!(<KLF4 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 8, false));
6233    }
6234
6235    #[test]
6236    fn test_object_safety() {
6237        fn _takes_no_cell(_: &dyn Immutable) {}
6238        fn _takes_unaligned(_: &dyn Unaligned) {}
6239    }
6240
6241    #[test]
6242    fn test_from_zeros_only() {
6243        // Test types that implement `FromZeros` but not `FromBytes`.
6244
6245        assert!(!bool::new_zeroed());
6246        assert_eq!(char::new_zeroed(), '\0');
6247
6248        #[cfg(feature = "alloc")]
6249        {
6250            assert_eq!(bool::new_box_zeroed(), Ok(Box::new(false)));
6251            assert_eq!(char::new_box_zeroed(), Ok(Box::new('\0')));
6252
6253            assert_eq!(
6254                <[bool]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6255                [false, false, false]
6256            );
6257            assert_eq!(
6258                <[char]>::new_box_zeroed_with_elems(3).unwrap().as_ref(),
6259                ['\0', '\0', '\0']
6260            );
6261
6262            assert_eq!(bool::new_vec_zeroed(3).unwrap().as_ref(), [false, false, false]);
6263            assert_eq!(char::new_vec_zeroed(3).unwrap().as_ref(), ['\0', '\0', '\0']);
6264        }
6265
6266        let mut string = "hello".to_string();
6267        let s: &mut str = string.as_mut();
6268        assert_eq!(s, "hello");
6269        s.zero();
6270        assert_eq!(s, "\0\0\0\0\0");
6271    }
6272
6273    #[test]
6274    fn test_zst_count_preserved() {
6275        // Test that, when an explicit count is provided to for a type with a
6276        // ZST trailing slice element, that count is preserved. This is
6277        // important since, for such types, all element counts result in objects
6278        // of the same size, and so the correct behavior is ambiguous. However,
6279        // preserving the count as requested by the user is the behavior that we
6280        // document publicly.
6281
6282        // FromZeros methods
6283        #[cfg(feature = "alloc")]
6284        assert_eq!(<[()]>::new_box_zeroed_with_elems(3).unwrap().len(), 3);
6285        #[cfg(feature = "alloc")]
6286        assert_eq!(<()>::new_vec_zeroed(3).unwrap().len(), 3);
6287
6288        // FromBytes methods
6289        assert_eq!(<[()]>::ref_from_bytes_with_elems(&[][..], 3).unwrap().len(), 3);
6290        assert_eq!(<[()]>::ref_from_prefix_with_elems(&[][..], 3).unwrap().0.len(), 3);
6291        assert_eq!(<[()]>::ref_from_suffix_with_elems(&[][..], 3).unwrap().1.len(), 3);
6292        assert_eq!(<[()]>::mut_from_bytes_with_elems(&mut [][..], 3).unwrap().len(), 3);
6293        assert_eq!(<[()]>::mut_from_prefix_with_elems(&mut [][..], 3).unwrap().0.len(), 3);
6294        assert_eq!(<[()]>::mut_from_suffix_with_elems(&mut [][..], 3).unwrap().1.len(), 3);
6295    }
6296
6297    #[test]
6298    fn test_read_write() {
6299        const VAL: u64 = 0x12345678;
6300        #[cfg(target_endian = "big")]
6301        const VAL_BYTES: [u8; 8] = VAL.to_be_bytes();
6302        #[cfg(target_endian = "little")]
6303        const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
6304        const ZEROS: [u8; 8] = [0u8; 8];
6305
6306        // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`.
6307
6308        assert_eq!(u64::read_from_bytes(&VAL_BYTES[..]), Ok(VAL));
6309        // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all
6310        // zeros.
6311        let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6312        assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Ok((VAL, &ZEROS[..])));
6313        assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Ok((&VAL_BYTES[..], 0)));
6314        // The first 8 bytes are all zeros and the second 8 bytes are from
6315        // `VAL_BYTES`
6316        let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6317        assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Ok((0, &VAL_BYTES[..])));
6318        assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Ok((&ZEROS[..], VAL)));
6319
6320        // Test `IntoBytes::{write_to, write_to_prefix, write_to_suffix}`.
6321
6322        let mut bytes = [0u8; 8];
6323        assert_eq!(VAL.write_to(&mut bytes[..]), Ok(()));
6324        assert_eq!(bytes, VAL_BYTES);
6325        let mut bytes = [0u8; 16];
6326        assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Ok(()));
6327        let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6328        assert_eq!(bytes, want);
6329        let mut bytes = [0u8; 16];
6330        assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Ok(()));
6331        let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6332        assert_eq!(bytes, want);
6333    }
6334
6335    #[test]
6336    #[cfg(feature = "std")]
6337    fn test_read_io_with_padding_soundness() {
6338        // This test is designed to exhibit potential UB in
6339        // `FromBytes::read_from_io`. (see #2319, #2320).
6340
6341        // On most platforms (where `align_of::<u16>() == 2`), `WithPadding`
6342        // will have inter-field padding between `x` and `y`.
6343        #[derive(FromBytes)]
6344        #[repr(C)]
6345        struct WithPadding {
6346            x: u8,
6347            y: u16,
6348        }
6349        struct ReadsInRead;
6350        impl std::io::Read for ReadsInRead {
6351            fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
6352                // This body branches on every byte of `buf`, ensuring that it
6353                // exhibits UB if any byte of `buf` is uninitialized.
6354                if buf.iter().all(|&x| x == 0) {
6355                    Ok(buf.len())
6356                } else {
6357                    buf.iter_mut().for_each(|x| *x = 0);
6358                    Ok(buf.len())
6359                }
6360            }
6361        }
6362        assert!(matches!(WithPadding::read_from_io(ReadsInRead), Ok(WithPadding { x: 0, y: 0 })));
6363    }
6364
6365    #[test]
6366    #[cfg(feature = "std")]
6367    fn test_read_write_io() {
6368        let mut long_buffer = [0, 0, 0, 0];
6369        assert!(matches!(u16::MAX.write_to_io(&mut long_buffer[..]), Ok(())));
6370        assert_eq!(long_buffer, [255, 255, 0, 0]);
6371        assert!(matches!(u16::read_from_io(&long_buffer[..]), Ok(u16::MAX)));
6372
6373        let mut short_buffer = [0, 0];
6374        assert!(u32::MAX.write_to_io(&mut short_buffer[..]).is_err());
6375        assert_eq!(short_buffer, [255, 255]);
6376        assert!(u32::read_from_io(&short_buffer[..]).is_err());
6377    }
6378
6379    #[test]
6380    fn test_try_from_bytes_try_read_from() {
6381        assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[0]), Ok(false));
6382        assert_eq!(<bool as TryFromBytes>::try_read_from_bytes(&[1]), Ok(true));
6383
6384        assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[0, 2]), Ok((false, &[2][..])));
6385        assert_eq!(<bool as TryFromBytes>::try_read_from_prefix(&[1, 2]), Ok((true, &[2][..])));
6386
6387        assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 0]), Ok((&[2][..], false)));
6388        assert_eq!(<bool as TryFromBytes>::try_read_from_suffix(&[2, 1]), Ok((&[2][..], true)));
6389
6390        // If we don't pass enough bytes, it fails.
6391        assert!(matches!(
6392            <u8 as TryFromBytes>::try_read_from_bytes(&[]),
6393            Err(TryReadError::Size(_))
6394        ));
6395        assert!(matches!(
6396            <u8 as TryFromBytes>::try_read_from_prefix(&[]),
6397            Err(TryReadError::Size(_))
6398        ));
6399        assert!(matches!(
6400            <u8 as TryFromBytes>::try_read_from_suffix(&[]),
6401            Err(TryReadError::Size(_))
6402        ));
6403
6404        // If we pass too many bytes, it fails.
6405        assert!(matches!(
6406            <u8 as TryFromBytes>::try_read_from_bytes(&[0, 0]),
6407            Err(TryReadError::Size(_))
6408        ));
6409
6410        // If we pass an invalid value, it fails.
6411        assert!(matches!(
6412            <bool as TryFromBytes>::try_read_from_bytes(&[2]),
6413            Err(TryReadError::Validity(_))
6414        ));
6415        assert!(matches!(
6416            <bool as TryFromBytes>::try_read_from_prefix(&[2, 0]),
6417            Err(TryReadError::Validity(_))
6418        ));
6419        assert!(matches!(
6420            <bool as TryFromBytes>::try_read_from_suffix(&[0, 2]),
6421            Err(TryReadError::Validity(_))
6422        ));
6423
6424        // Reading from a misaligned buffer should still succeed. Since `AU64`'s
6425        // alignment is 8, and since we read from two adjacent addresses one
6426        // byte apart, it is guaranteed that at least one of them (though
6427        // possibly both) will be misaligned.
6428        let bytes: [u8; 9] = [0, 0, 0, 0, 0, 0, 0, 0, 0];
6429        assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[..8]), Ok(AU64(0)));
6430        assert_eq!(<AU64 as TryFromBytes>::try_read_from_bytes(&bytes[1..9]), Ok(AU64(0)));
6431
6432        assert_eq!(
6433            <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[..8]),
6434            Ok((AU64(0), &[][..]))
6435        );
6436        assert_eq!(
6437            <AU64 as TryFromBytes>::try_read_from_prefix(&bytes[1..9]),
6438            Ok((AU64(0), &[][..]))
6439        );
6440
6441        assert_eq!(
6442            <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[..8]),
6443            Ok((&[][..], AU64(0)))
6444        );
6445        assert_eq!(
6446            <AU64 as TryFromBytes>::try_read_from_suffix(&bytes[1..9]),
6447            Ok((&[][..], AU64(0)))
6448        );
6449    }
6450
6451    #[test]
6452    fn test_ref_from_mut_from_bytes() {
6453        // Test `FromBytes::{ref_from_bytes, mut_from_bytes}{,_prefix,Suffix}`
6454        // success cases. Exhaustive coverage for these methods is covered by
6455        // the `Ref` tests above, which these helper methods defer to.
6456
6457        let mut buf =
6458            Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
6459
6460        assert_eq!(
6461            AU64::ref_from_bytes(&buf.t[8..]).unwrap().0.to_ne_bytes(),
6462            [8, 9, 10, 11, 12, 13, 14, 15]
6463        );
6464        let suffix = AU64::mut_from_bytes(&mut buf.t[8..]).unwrap();
6465        suffix.0 = 0x0101010101010101;
6466        // The `[u8:9]` is a non-half size of the full buffer, which would catch
6467        // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511).
6468        assert_eq!(
6469            <[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(),
6470            (&[0, 1, 2, 3, 4, 5, 6][..], &[7u8, 1, 1, 1, 1, 1, 1, 1, 1])
6471        );
6472        let (prefix, suffix) = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap();
6473        assert_eq!(prefix, &mut [1u8, 2, 3, 4, 5, 6, 7][..]);
6474        suffix.0 = 0x0202020202020202;
6475        let (prefix, suffix) = <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap();
6476        assert_eq!(prefix, &mut [0u8, 1, 2, 3, 4, 5][..]);
6477        suffix[0] = 42;
6478        assert_eq!(
6479            <[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(),
6480            (&[0u8, 1, 2, 3, 4, 5, 42, 7, 2], &[2u8, 2, 2, 2, 2, 2, 2][..])
6481        );
6482        <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap().0[1] = 30;
6483        assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]);
6484    }
6485
6486    #[test]
6487    fn test_ref_from_mut_from_bytes_error() {
6488        // Test `FromBytes::{ref_from_bytes, mut_from_bytes}{,_prefix,Suffix}`
6489        // error cases.
6490
6491        // Fail because the buffer is too large.
6492        let mut buf = Align::<[u8; 16], AU64>::default();
6493        // `buf.t` should be aligned to 8, so only the length check should fail.
6494        assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
6495        assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
6496        assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
6497        assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
6498
6499        // Fail because the buffer is too small.
6500        let mut buf = Align::<[u8; 4], AU64>::default();
6501        assert!(AU64::ref_from_bytes(&buf.t[..]).is_err());
6502        assert!(AU64::mut_from_bytes(&mut buf.t[..]).is_err());
6503        assert!(<[u8; 8]>::ref_from_bytes(&buf.t[..]).is_err());
6504        assert!(<[u8; 8]>::mut_from_bytes(&mut buf.t[..]).is_err());
6505        assert!(AU64::ref_from_prefix(&buf.t[..]).is_err());
6506        assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_err());
6507        assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
6508        assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
6509        assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_err());
6510        assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_err());
6511        assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_err());
6512        assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_err());
6513
6514        // Fail because the alignment is insufficient.
6515        let mut buf = Align::<[u8; 13], AU64>::default();
6516        assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
6517        assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
6518        assert!(AU64::ref_from_bytes(&buf.t[1..]).is_err());
6519        assert!(AU64::mut_from_bytes(&mut buf.t[1..]).is_err());
6520        assert!(AU64::ref_from_prefix(&buf.t[1..]).is_err());
6521        assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_err());
6522        assert!(AU64::ref_from_suffix(&buf.t[..]).is_err());
6523        assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_err());
6524    }
6525
6526    #[test]
6527    fn test_to_methods() {
6528        /// Run a series of tests by calling `IntoBytes` methods on `t`.
6529        ///
6530        /// `bytes` is the expected byte sequence returned from `t.as_bytes()`
6531        /// before `t` has been modified. `post_mutation` is the expected
6532        /// sequence returned from `t.as_bytes()` after `t.as_mut_bytes()[0]`
6533        /// has had its bits flipped (by applying `^= 0xFF`).
6534        ///
6535        /// `N` is the size of `t` in bytes.
6536        fn test<T: FromBytes + IntoBytes + Immutable + Debug + Eq + ?Sized, const N: usize>(
6537            t: &mut T,
6538            bytes: &[u8],
6539            post_mutation: &T,
6540        ) {
6541            // Test that we can access the underlying bytes, and that we get the
6542            // right bytes and the right number of bytes.
6543            assert_eq!(t.as_bytes(), bytes);
6544
6545            // Test that changes to the underlying byte slices are reflected in
6546            // the original object.
6547            t.as_mut_bytes()[0] ^= 0xFF;
6548            assert_eq!(t, post_mutation);
6549            t.as_mut_bytes()[0] ^= 0xFF;
6550
6551            // `write_to` rejects slices that are too small or too large.
6552            assert!(t.write_to(&mut vec![0; N - 1][..]).is_err());
6553            assert!(t.write_to(&mut vec![0; N + 1][..]).is_err());
6554
6555            // `write_to` works as expected.
6556            let mut bytes = [0; N];
6557            assert_eq!(t.write_to(&mut bytes[..]), Ok(()));
6558            assert_eq!(bytes, t.as_bytes());
6559
6560            // `write_to_prefix` rejects slices that are too small.
6561            assert!(t.write_to_prefix(&mut vec![0; N - 1][..]).is_err());
6562
6563            // `write_to_prefix` works with exact-sized slices.
6564            let mut bytes = [0; N];
6565            assert_eq!(t.write_to_prefix(&mut bytes[..]), Ok(()));
6566            assert_eq!(bytes, t.as_bytes());
6567
6568            // `write_to_prefix` works with too-large slices, and any bytes past
6569            // the prefix aren't modified.
6570            let mut too_many_bytes = vec![0; N + 1];
6571            too_many_bytes[N] = 123;
6572            assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Ok(()));
6573            assert_eq!(&too_many_bytes[..N], t.as_bytes());
6574            assert_eq!(too_many_bytes[N], 123);
6575
6576            // `write_to_suffix` rejects slices that are too small.
6577            assert!(t.write_to_suffix(&mut vec![0; N - 1][..]).is_err());
6578
6579            // `write_to_suffix` works with exact-sized slices.
6580            let mut bytes = [0; N];
6581            assert_eq!(t.write_to_suffix(&mut bytes[..]), Ok(()));
6582            assert_eq!(bytes, t.as_bytes());
6583
6584            // `write_to_suffix` works with too-large slices, and any bytes
6585            // before the suffix aren't modified.
6586            let mut too_many_bytes = vec![0; N + 1];
6587            too_many_bytes[0] = 123;
6588            assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Ok(()));
6589            assert_eq!(&too_many_bytes[1..], t.as_bytes());
6590            assert_eq!(too_many_bytes[0], 123);
6591        }
6592
6593        #[derive(Debug, Eq, PartialEq, FromBytes, IntoBytes, Immutable)]
6594        #[repr(C)]
6595        struct Foo {
6596            a: u32,
6597            b: Wrapping<u32>,
6598            c: Option<NonZeroU32>,
6599        }
6600
6601        let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") {
6602            vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
6603        } else {
6604            vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0]
6605        };
6606        let post_mutation_expected_a =
6607            if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 };
6608        test::<_, 12>(
6609            &mut Foo { a: 1, b: Wrapping(2), c: None },
6610            expected_bytes.as_bytes(),
6611            &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None },
6612        );
6613        test::<_, 3>(
6614            Unsized::from_mut_slice(&mut [1, 2, 3]),
6615            &[1, 2, 3],
6616            Unsized::from_mut_slice(&mut [0xFE, 2, 3]),
6617        );
6618    }
6619
6620    #[test]
6621    fn test_array() {
6622        #[derive(FromBytes, IntoBytes, Immutable)]
6623        #[repr(C)]
6624        struct Foo {
6625            a: [u16; 33],
6626        }
6627
6628        let foo = Foo { a: [0xFFFF; 33] };
6629        let expected = [0xFFu8; 66];
6630        assert_eq!(foo.as_bytes(), &expected[..]);
6631    }
6632
6633    #[test]
6634    fn test_new_zeroed() {
6635        assert!(!bool::new_zeroed());
6636        assert_eq!(u64::new_zeroed(), 0);
6637        // This test exists in order to exercise unsafe code, especially when
6638        // running under Miri.
6639        #[allow(clippy::unit_cmp)]
6640        {
6641            assert_eq!(<()>::new_zeroed(), ());
6642        }
6643    }
6644
6645    #[test]
6646    fn test_transparent_packed_generic_struct() {
6647        #[derive(IntoBytes, FromBytes, Unaligned)]
6648        #[repr(transparent)]
6649        #[allow(dead_code)] // We never construct this type
6650        struct Foo<T> {
6651            _t: T,
6652            _phantom: PhantomData<()>,
6653        }
6654
6655        assert_impl_all!(Foo<u32>: FromZeros, FromBytes, IntoBytes);
6656        assert_impl_all!(Foo<u8>: Unaligned);
6657
6658        #[derive(IntoBytes, FromBytes, Unaligned)]
6659        #[repr(C, packed)]
6660        #[allow(dead_code)] // We never construct this type
6661        struct Bar<T, U> {
6662            _t: T,
6663            _u: U,
6664        }
6665
6666        assert_impl_all!(Bar<u8, AU64>: FromZeros, FromBytes, IntoBytes, Unaligned);
6667    }
6668
6669    #[cfg(feature = "alloc")]
6670    mod alloc {
6671        use super::*;
6672
6673        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6674        #[test]
6675        fn test_extend_vec_zeroed() {
6676            // Test extending when there is an existing allocation.
6677            let mut v = vec![100u16, 200, 300];
6678            FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
6679            assert_eq!(v.len(), 6);
6680            assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
6681            drop(v);
6682
6683            // Test extending when there is no existing allocation.
6684            let mut v: Vec<u64> = Vec::new();
6685            FromZeros::extend_vec_zeroed(&mut v, 3).unwrap();
6686            assert_eq!(v.len(), 3);
6687            assert_eq!(&*v, &[0, 0, 0]);
6688            drop(v);
6689        }
6690
6691        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6692        #[test]
6693        fn test_extend_vec_zeroed_zst() {
6694            // Test extending when there is an existing (fake) allocation.
6695            let mut v = vec![(), (), ()];
6696            <()>::extend_vec_zeroed(&mut v, 3).unwrap();
6697            assert_eq!(v.len(), 6);
6698            assert_eq!(&*v, &[(), (), (), (), (), ()]);
6699            drop(v);
6700
6701            // Test extending when there is no existing (fake) allocation.
6702            let mut v: Vec<()> = Vec::new();
6703            <()>::extend_vec_zeroed(&mut v, 3).unwrap();
6704            assert_eq!(&*v, &[(), (), ()]);
6705            drop(v);
6706        }
6707
6708        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6709        #[test]
6710        fn test_insert_vec_zeroed() {
6711            // Insert at start (no existing allocation).
6712            let mut v: Vec<u64> = Vec::new();
6713            u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6714            assert_eq!(v.len(), 2);
6715            assert_eq!(&*v, &[0, 0]);
6716            drop(v);
6717
6718            // Insert at start.
6719            let mut v = vec![100u64, 200, 300];
6720            u64::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6721            assert_eq!(v.len(), 5);
6722            assert_eq!(&*v, &[0, 0, 100, 200, 300]);
6723            drop(v);
6724
6725            // Insert at middle.
6726            let mut v = vec![100u64, 200, 300];
6727            u64::insert_vec_zeroed(&mut v, 1, 1).unwrap();
6728            assert_eq!(v.len(), 4);
6729            assert_eq!(&*v, &[100, 0, 200, 300]);
6730            drop(v);
6731
6732            // Insert at end.
6733            let mut v = vec![100u64, 200, 300];
6734            u64::insert_vec_zeroed(&mut v, 3, 1).unwrap();
6735            assert_eq!(v.len(), 4);
6736            assert_eq!(&*v, &[100, 200, 300, 0]);
6737            drop(v);
6738        }
6739
6740        #[cfg(not(no_zerocopy_panic_in_const_and_vec_try_reserve_1_57_0))]
6741        #[test]
6742        fn test_insert_vec_zeroed_zst() {
6743            // Insert at start (no existing fake allocation).
6744            let mut v: Vec<()> = Vec::new();
6745            <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6746            assert_eq!(v.len(), 2);
6747            assert_eq!(&*v, &[(), ()]);
6748            drop(v);
6749
6750            // Insert at start.
6751            let mut v = vec![(), (), ()];
6752            <()>::insert_vec_zeroed(&mut v, 0, 2).unwrap();
6753            assert_eq!(v.len(), 5);
6754            assert_eq!(&*v, &[(), (), (), (), ()]);
6755            drop(v);
6756
6757            // Insert at middle.
6758            let mut v = vec![(), (), ()];
6759            <()>::insert_vec_zeroed(&mut v, 1, 1).unwrap();
6760            assert_eq!(v.len(), 4);
6761            assert_eq!(&*v, &[(), (), (), ()]);
6762            drop(v);
6763
6764            // Insert at end.
6765            let mut v = vec![(), (), ()];
6766            <()>::insert_vec_zeroed(&mut v, 3, 1).unwrap();
6767            assert_eq!(v.len(), 4);
6768            assert_eq!(&*v, &[(), (), (), ()]);
6769            drop(v);
6770        }
6771
6772        #[test]
6773        fn test_new_box_zeroed() {
6774            assert_eq!(u64::new_box_zeroed(), Ok(Box::new(0)));
6775        }
6776
6777        #[test]
6778        fn test_new_box_zeroed_array() {
6779            drop(<[u32; 0x1000]>::new_box_zeroed());
6780        }
6781
6782        #[test]
6783        fn test_new_box_zeroed_zst() {
6784            // This test exists in order to exercise unsafe code, especially
6785            // when running under Miri.
6786            #[allow(clippy::unit_cmp)]
6787            {
6788                assert_eq!(<()>::new_box_zeroed(), Ok(Box::new(())));
6789            }
6790        }
6791
6792        #[test]
6793        fn test_new_box_zeroed_with_elems() {
6794            let mut s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(3).unwrap();
6795            assert_eq!(s.len(), 3);
6796            assert_eq!(&*s, &[0, 0, 0]);
6797            s[1] = 3;
6798            assert_eq!(&*s, &[0, 3, 0]);
6799        }
6800
6801        #[test]
6802        fn test_new_box_zeroed_with_elems_empty() {
6803            let s: Box<[u64]> = <[u64]>::new_box_zeroed_with_elems(0).unwrap();
6804            assert_eq!(s.len(), 0);
6805        }
6806
6807        #[test]
6808        fn test_new_box_zeroed_with_elems_zst() {
6809            let mut s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(3).unwrap();
6810            assert_eq!(s.len(), 3);
6811            assert!(s.get(10).is_none());
6812            // This test exists in order to exercise unsafe code, especially
6813            // when running under Miri.
6814            #[allow(clippy::unit_cmp)]
6815            {
6816                assert_eq!(s[1], ());
6817            }
6818            s[2] = ();
6819        }
6820
6821        #[test]
6822        fn test_new_box_zeroed_with_elems_zst_empty() {
6823            let s: Box<[()]> = <[()]>::new_box_zeroed_with_elems(0).unwrap();
6824            assert_eq!(s.len(), 0);
6825        }
6826
6827        #[test]
6828        fn new_box_zeroed_with_elems_errors() {
6829            assert_eq!(<[u16]>::new_box_zeroed_with_elems(usize::MAX), Err(AllocError));
6830
6831            let max = <usize as core::convert::TryFrom<_>>::try_from(isize::MAX).unwrap();
6832            assert_eq!(
6833                <[u16]>::new_box_zeroed_with_elems((max / mem::size_of::<u16>()) + 1),
6834                Err(AllocError)
6835            );
6836        }
6837    }
6838
6839    #[test]
6840    #[allow(deprecated)]
6841    fn test_deprecated_from_bytes() {
6842        let val = 0u32;
6843        let bytes = val.as_bytes();
6844
6845        assert!(u32::ref_from(bytes).is_some());
6846        // mut_from needs mut bytes
6847        let mut val = 0u32;
6848        let mut_bytes = val.as_mut_bytes();
6849        assert!(u32::mut_from(mut_bytes).is_some());
6850
6851        assert!(u32::read_from(bytes).is_some());
6852
6853        let (slc, rest) = <u32>::slice_from_prefix(bytes, 0).unwrap();
6854        assert!(slc.is_empty());
6855        assert_eq!(rest.len(), 4);
6856
6857        let (rest, slc) = <u32>::slice_from_suffix(bytes, 0).unwrap();
6858        assert!(slc.is_empty());
6859        assert_eq!(rest.len(), 4);
6860
6861        let (slc, rest) = <u32>::mut_slice_from_prefix(mut_bytes, 0).unwrap();
6862        assert!(slc.is_empty());
6863        assert_eq!(rest.len(), 4);
6864
6865        let (rest, slc) = <u32>::mut_slice_from_suffix(mut_bytes, 0).unwrap();
6866        assert!(slc.is_empty());
6867        assert_eq!(rest.len(), 4);
6868    }
6869
6870    #[test]
6871    fn test_try_ref_from_prefix_suffix() {
6872        use crate::util::testutil::Align;
6873        let bytes = &Align::<[u8; 4], u32>::new([0u8; 4]).t[..];
6874        let (r, rest): (&u32, &[u8]) = u32::try_ref_from_prefix(bytes).unwrap();
6875        assert_eq!(*r, 0);
6876        assert_eq!(rest.len(), 0);
6877
6878        let (rest, r): (&[u8], &u32) = u32::try_ref_from_suffix(bytes).unwrap();
6879        assert_eq!(*r, 0);
6880        assert_eq!(rest.len(), 0);
6881    }
6882
6883    #[test]
6884    fn test_raw_dangling() {
6885        use crate::util::AsAddress;
6886        let ptr: NonNull<u32> = u32::raw_dangling();
6887        assert_eq!(AsAddress::addr(ptr), 1);
6888
6889        let ptr: NonNull<[u32]> = <[u32]>::raw_dangling();
6890        assert_eq!(AsAddress::addr(ptr), 1);
6891    }
6892
6893    #[test]
6894    fn test_try_ref_from_prefix_with_elems() {
6895        use crate::util::testutil::Align;
6896        let bytes = &Align::<[u8; 8], u32>::new([0u8; 8]).t[..];
6897        let (r, rest): (&[u32], &[u8]) = <[u32]>::try_ref_from_prefix_with_elems(bytes, 2).unwrap();
6898        assert_eq!(r.len(), 2);
6899        assert_eq!(rest.len(), 0);
6900    }
6901
6902    #[test]
6903    fn test_try_ref_from_suffix_with_elems() {
6904        use crate::util::testutil::Align;
6905        let bytes = &Align::<[u8; 8], u32>::new([0u8; 8]).t[..];
6906        let (rest, r): (&[u8], &[u32]) = <[u32]>::try_ref_from_suffix_with_elems(bytes, 2).unwrap();
6907        assert_eq!(r.len(), 2);
6908        assert_eq!(rest.len(), 0);
6909    }
6910}