Segment

Struct Segment 

Source
#[repr(transparent)]
pub struct Segment<M: AnyFrameMeta + ?Sized> { pub range: Range<Paddr>, pub _marker: PhantomData<M>, }
Expand description

A contiguous range of homogeneous physical memory frames.

This is a handle to multiple contiguous frames. It will be more lightweight than owning an array of frame handles.

The ownership is achieved by the reference counting mechanism of frames. When constructing a Segment, the frame handles are created then forgotten, leaving the reference count. When dropping a it, the frame handles are restored and dropped, decrementing the reference count.

All the metadata of the frames are homogeneous, i.e., they are of the same type.

Fields§

§range: Range<Paddr>

The physical address range of the segment.

§_marker: PhantomData<M>

Marker for the metadata type.

Implementations§

Source§

impl<M: AnyFrameMeta + ?Sized> Segment<M>

Source

pub open spec fn inv_with(&self, owner: &SegmentOwner<M>) -> bool

{
    &&& self.inv()
    &&& owner.perms.len() * PAGE_SIZE() == self.range.end - self.range.start
    &&& forall |i: int| {
        0 <= i < owner.perms.len() as int
            ==> owner.perms[i].addr()
                == meta_addr(
                    frame_to_index((self.range.start + i * PAGE_SIZE()) as usize),
                )
    }

}
Source§

impl<M: AnyFrameMeta> Segment<M>

Source

pub open spec fn start_paddr_spec(&self) -> Paddr

recommends
self.inv(),
{ self.range.start }
Source

pub open spec fn end_paddr_spec(&self) -> Paddr

recommends
self.inv(),
{ self.range.end }
Source

pub open spec fn size_spec(&self) -> usize

recommends
self.inv(),
{ (self.range.end - self.range.start) as usize }
Source

pub open spec fn nrpage_spec(&self) -> usize

recommends
self.inv(),
{ self.size_spec() / PAGE_SIZE() }
Source

pub open spec fn split_spec(self, offset: usize) -> (Self, Self)

recommends
self.inv(),
offset % PAGE_SIZE() == 0,
0 < offset < self.size_spec(),
{
    let at = (self.range.start + offset) as usize;
    let idx = at / PAGE_SIZE();
    (
        Self {
            range: self.range.start..at,
            _marker: core::marker::PhantomData,
        },
        Self {
            range: at..self.range.end,
            _marker: core::marker::PhantomData,
        },
    )
}
Source

pub exec fn start_paddr(&self) -> res : Paddr

requires
self.inv(),

Gets the start physical address of the contiguous frames.

Source

pub exec fn end_paddr(&self) -> res : Paddr

requires
self.inv(),

Gets the end physical address of the contiguous frames.

Source

pub exec fn size(&self) -> res : usize

requires
self.inv(),

Gets the length in bytes of the contiguous frames.

Source

pub open spec fn from_unused_requires( regions: MetaRegionOwners, range: Range<Paddr>, metadata_fn: impl Fn(Paddr) -> (Paddr, M), ) -> bool

{
    &&& regions.inv()
    &&& range.start % PAGE_SIZE() == 0
    &&& range.end % PAGE_SIZE() == 0
    &&& range.start <= range.end < MAX_PADDR()
    &&& forall |paddr_in: Paddr| {
        (range.start <= paddr_in < range.end && paddr_in % PAGE_SIZE() == 0)
            ==> {
                &&& metadata_fn.requires((paddr_in,))

            }
    }
    &&& forall |paddr_in: Paddr, paddr_out: Paddr, m: M| {
        metadata_fn.ensures((paddr_in,), (paddr_out, m))
            ==> {
                &&& paddr_out < MAX_PADDR()
                &&& paddr_out % PAGE_SIZE() == 0
                &&& paddr_in == paddr_out
                &&& regions.slots.contains_key(frame_to_index(paddr_out))
                &&& !regions.dropped_slots.contains_key(frame_to_index(paddr_out))
                &&& regions.slot_owners[frame_to_index(paddr_out)].usage is Unused
                &&& regions.slot_owners[frame_to_index(paddr_out)].in_list.points_to(0)

            }
    }

}
Source

pub open spec fn from_unused_ensures( old_regions: MetaRegionOwners, new_regions: MetaRegionOwners, owner: Option<SegmentOwner<M>>, range: Range<Paddr>, metadata_fn: impl Fn(Paddr) -> (Paddr, M), r: Result<Self, GetFrameError>, ) -> bool

{
    &&& r matches Ok(
        r,
    ) ==> {
        &&& new_regions.inv()
        &&& r.range.start == range.start
        &&& r.range.end == range.end
        &&& owner matches Some(
            owner,
        ) && {
            &&& r.inv_with(&owner)
            &&& forall |i: int| {
                0 <= i < owner.perms.len() as int
                    ==> {
                        &&& owner.perms[i].addr()
                            == meta_addr(
                                frame_to_index_spec(
                                    (range.start + i * PAGE_SIZE()) as usize,
                                ),
                            )

                    }
            }
            &&& new_regions.paddr_range_not_in_region(range)

        }

    }

}
Source

pub open spec fn split_requires(self, owner: SegmentOwner<M>, offset: usize) -> bool

{
    &&& self.inv_with(&owner)
    &&& offset % PAGE_SIZE() == 0
    &&& 0 < offset < self.size()

}
Source

pub open spec fn split_ensures( self, offset: usize, lhs: Self, rhs: Self, ori_owner: SegmentOwner<M>, lhs_owner: SegmentOwner<M>, rhs_owner: SegmentOwner<M>, ) -> bool

{
    &&& lhs.inv_with(&lhs_owner)
    &&& rhs.inv_with(&rhs_owner)
    &&& (lhs, rhs) == self.split_spec(offset)
    &&& ori_owner.perms =~= (lhs_owner.perms + rhs_owner.perms)

}
Source

pub open spec fn into_raw_requires( self, regions: MetaRegionOwners, owner: SegmentOwner<M>, ) -> bool

{
    &&& self.inv_with(&owner)
    &&& regions.inv()
    &&& owner.inv()
    &&& owner.is_disjoint_with_meta_region(&regions)

}
Source

pub open spec fn into_raw_ensures( self, regions: MetaRegionOwners, owner: SegmentOwner<M>, r: Range<Paddr>, ) -> bool

{
    &&& r == self.range
    &&& regions.inv()
    &&& regions.paddr_range_in_dropped_region(self.range)

}
Source

pub open spec fn from_raw_requires(regions: MetaRegionOwners, range: Range<Paddr>) -> bool

{
    &&& regions.inv()
    &&& regions.paddr_range_in_dropped_region(range)
    &&& range.start % PAGE_SIZE() == 0
    &&& range.end % PAGE_SIZE() == 0
    &&& range.start < range.end < MAX_PADDR()

}
Source

pub open spec fn from_raw_ensures( self, old_regions: MetaRegionOwners, new_regions: MetaRegionOwners, owner: SegmentOwner<M>, range: Range<Paddr>, ) -> bool

{
    &&& self.inv_with(&owner)
    &&& self.range == range
    &&& new_regions.inv()
    &&& new_regions.paddr_range_not_in_region(range)

}
Source

pub open spec fn slice_requires(self, owner: SegmentOwner<M>, range: Range<Paddr>) -> bool

{
    &&& self.inv_with(&owner)
    &&& range.start % PAGE_SIZE() == 0
    &&& range.end % PAGE_SIZE() == 0
    &&& self.range.start + range.start <= self.range.start + range.end <= self.range.end

}
Source

pub open spec fn slice_ensures( self, owner: SegmentOwner<M>, range: Range<Paddr>, res: Self, ) -> bool

{
    &&& res
        .inv_with(
            &SegmentOwner {
                perms: owner
                    .perms
                    .subrange(
                        (range.start / PAGE_SIZE()) as int,
                        (range.end / PAGE_SIZE()) as int,
                    ),
            },
        )

}
Source

pub open spec fn next_requires(self, regions: MetaRegionOwners) -> bool

{
    &&& self.inv()
    &&& regions.inv()
    &&& regions.dropped_slots.contains_key(frame_to_index(self.range.start))
    &&& !regions.slots.contains_key(frame_to_index(self.range.start))

}
Source

pub open spec fn next_ensures( old_self: Self, new_self: Self, old_regions: MetaRegionOwners, new_regions: MetaRegionOwners, res: Option<Frame<M>>, ) -> bool

{
    &&& new_regions.inv()
    &&& new_self.inv()
    &&& match res {
        None => (
            &&& new_self.range.start == old_self.range.end

        ),
        Some(f) => (
            &&& new_self.range.start == old_self.range.start + PAGE_SIZE()
            &&& f.paddr() == old_self.range.start
            &&& forall |i: usize| {
                &&& i != frame_to_index(f.paddr())
                    ==> old_regions.dropped_slots[i] == new_regions.dropped_slots[i]
                &&& i == frame_to_index(f.paddr())
                    ==> !new_regions.dropped_slots.contains_key(i)

            }

        ),
    }

}
Source

pub exec fn from_unused( range: Range<Paddr>, metadata_fn: impl Fn(Paddr) -> (Paddr, M), ) -> res : Result<Self, GetFrameError>

with
Tracked(regions): Tracked <& mut MetaRegionOwners>,
->
owner: Tracked <Option <SegmentOwner <M>>>,
requires
Self::from_unused_requires(*old(regions), range, metadata_fn),
ensures
Self::from_unused_ensures(*old(regions), *regions, owner@, range, metadata_fn, r),

Creates a new Segment from unused frames.

The caller must provide a closure to initialize metadata for all the frames. The closure receives the physical address of the frame and returns the metadata, which is similar to core::array::from_fn.

It returns an error if:

  • any of the frames cannot be created with a specific reason.
Source

pub exec fn split(self, offset: usize) -> r : (Self, Self)

with
Tracked(owner): Tracked <SegmentOwner <M>>,
Tracked(regions): Tracked <& mut MetaRegionOwners>,
->
frame_perms: (Tracked <SegmentOwner <M>>,Tracked <SegmentOwner <M>>),
requires
Self::split_requires(self, owner, offset),
ensures
Self::split_ensures(self, offset, r.0, r.1, owner, frame_perms.0@, frame_perms.1@),

Splits the frames into two at the given byte offset from the start.

The resulting frames cannot be empty. So the offset cannot be neither zero nor the length of the frames.

§Panics

The function panics if the offset is out of bounds, at either ends, or not base-page-aligned.

Source

pub exec fn slice(&self, range: &Range<Paddr>) -> r : Self

with
Tracked(owner): Tracked <& SegmentOwner <M>>,
requires
Self::slice_requires(*self, *owner, *range),
ensures
Self::slice_ensures(*self, *owner, *range, r),

Gets an extra handle to the frames in the byte offset range.

The sliced byte offset range in indexed by the offset from the start of the contiguous frames. The resulting frames holds extra reference counts.

§Panics

The function panics if the byte offset range is out of bounds, or if any of the ends of the byte offset range is not base-page aligned.

Source

pub exec fn next(&mut self) -> res : Option<Frame<M>>

with
Tracked(regions): Tracked <& mut MetaRegionOwners>,
Tracked(owner): Tracked <& mut SegmentOwner <M>>,
requires
Self::next_requires(*old(self), *old(regions)),
old(owner).perms.len() > 0,
ensures
Self::next_ensures(*old(self), *self, *old(regions), *regions, res),

Gets the next frame in the segment (raw), if any.

Since the segments here must be “non-active” where there is no extra Verus-tracked permission SegmentOwner associated with it; Segment becomes a kind of “zombie” container through which we can only iterate the frames and get the frame out of the regions instead.

Trait Implementations§

Source§

impl<M: AnyFrameMeta + ?Sized> Inv for Segment<M>

Source§

open spec fn inv(self) -> bool

{
    &&& self.range.start % PAGE_SIZE() == 0
    &&& self.range.end % PAGE_SIZE() == 0
    &&& self.range.start <= self.range.end < MAX_PADDR()

}

The invariant of a Segment.

Source§

impl<M: AnyFrameMeta + ?Sized> Undroppable for Segment<M>

Source§

open spec fn constructor_requires(self, s: Self::State) -> bool

{ true }
Source§

open spec fn constructor_ensures(self, s0: Self::State, s1: Self::State) -> bool

{ s0 =~= s1 }
Source§

proof fn constructor_spec(self, tracked s: &mut Self::State)

Source§

type State = MetaRegionOwners

Auto Trait Implementations§

§

impl<M> Freeze for Segment<M>

§

impl<M> RefUnwindSafe for Segment<M>
where M: RefUnwindSafe,

§

impl<M> Send for Segment<M>
where M: Send,

§

impl<M> Sync for Segment<M>
where M: Sync,

§

impl<M> Unpin for Segment<M>
where M: Unpin,

§

impl<M> UnwindSafe for Segment<M>
where M: UnwindSafe,

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

§

impl<T, VERUS_SPEC__A> FromSpec<T> for VERUS_SPEC__A
where VERUS_SPEC__A: From<T>,

§

fn obeys_from_spec() -> bool

§

fn from_spec(v: T) -> VERUS_SPEC__A

Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

§

impl<T, VERUS_SPEC__A> IntoSpec<T> for VERUS_SPEC__A
where VERUS_SPEC__A: Into<T>,

§

fn obeys_into_spec() -> bool

§

fn into_spec(self) -> T

§

impl<T, U> IntoSpecImpl<U> for T
where U: From<T>,

§

fn obeys_into_spec() -> bool

§

fn into_spec(self) -> U

Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
§

impl<T, VERUS_SPEC__A> TryFromSpec<T> for VERUS_SPEC__A
where VERUS_SPEC__A: TryFrom<T>,

§

fn obeys_try_from_spec() -> bool

§

fn try_from_spec( v: T, ) -> Result<VERUS_SPEC__A, <VERUS_SPEC__A as TryFrom<T>>::Error>

Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
§

impl<T, VERUS_SPEC__A> TryIntoSpec<T> for VERUS_SPEC__A
where VERUS_SPEC__A: TryInto<T>,

§

fn obeys_try_into_spec() -> bool

§

fn try_into_spec(self) -> Result<T, <VERUS_SPEC__A as TryInto<T>>::Error>

§

impl<T, U> TryIntoSpecImpl<U> for T
where U: TryFrom<T>,

§

fn obeys_try_into_spec() -> bool

§

fn try_into_spec(self) -> Result<U, <U as TryFrom<T>>::Error>