Skip to main content

ostd/mm/frame/
segment.rs

1// SPDX-License-Identifier: MPL-2.0
2//! A contiguous range of frames.
3use vstd::prelude::*;
4use vstd_extra::seq_extra::seq_tracked_split_at;
5
6use core::{fmt::Debug, ops::Range};
7
8use crate::mm::frame::{has_safe_slot, untyped::AnyUFrameMeta, Frame};
9use crate::mm::page_table::RCClone;
10
11use vstd_extra::cast_ptr::*;
12use vstd_extra::cast_ptr::*;
13use vstd_extra::ownership::*;
14
15use super::meta::mapping::{frame_to_index, frame_to_index_spec, frame_to_meta, meta_addr};
16use super::{AnyFrameMeta, GetFrameError, MetaPerm, MetaSlot};
17use crate::mm::{paddr_to_vaddr, Paddr, PagingLevel, Vaddr};
18use crate::specs::arch::mm::{MAX_NR_PAGES, MAX_PADDR, PAGE_SIZE};
19use crate::specs::mm::frame::meta_owners::*;
20use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
21use crate::specs::mm::virt_mem_newer::MemView;
22use vstd_extra::drop_tracking::*;
23
24verus! {
25
26/// A contiguous range of homogeneous physical memory frames.
27///
28/// This is a handle to multiple contiguous frames. It will be more lightweight
29/// than owning an array of frame handles.
30///
31/// The ownership is achieved by the reference counting mechanism of frames.
32/// When constructing a [`Segment`], the frame handles are created then
33/// forgotten, leaving the reference count. When dropping a it, the frame
34/// handles are restored and dropped, decrementing the reference count.
35///
36/// All the metadata of the frames are homogeneous, i.e., they are of the same
37/// type.
38#[repr(transparent)]
39pub struct Segment<M: AnyFrameMeta + ?Sized> {
40    /// The physical address range of the segment.
41    pub range: Range<Paddr>,
42    /// Marker for the metadata type.
43    pub _marker: core::marker::PhantomData<M>,
44}
45
46impl<M: AnyFrameMeta + ?Sized> TrackDrop for Segment<M> {
47    type State = (MetaRegionOwners, SegmentOwner<M>);
48
49    open spec fn constructor_requires(self, s: Self::State) -> bool {
50        true
51    }
52
53    open spec fn constructor_ensures(self, s0: Self::State, s1: Self::State) -> bool {
54        s0 =~= s1
55    }
56
57    proof fn constructor_spec(self, tracked s: &mut Self::State) {
58    }
59
60    open spec fn drop_requires(self, s: Self::State) -> bool {
61        let (regions, owner) = s;
62        &&& self.inv_with(&owner)
63        &&& owner.inv()
64        &&& regions.inv()
65        // Each frame's slot must be accessible with the right properties
66        &&& forall|i: int|
67            #![trigger owner.perms[i]]
68            0 <= i < owner.perms.len() as int ==> {
69                let idx = frame_to_index((self.range.start + i * PAGE_SIZE) as usize);
70                &&& regions.slot_owners.contains_key(idx)
71                &&& regions.slot_owners[idx].inner_perms.ref_count.value() > 0
72                &&& regions.slot_owners[idx].inner_perms.ref_count.value() != super::meta::REF_COUNT_UNUSED
73                &&& regions.slot_owners[idx].raw_count == 0
74                &&& regions.slot_owners[idx].self_addr == meta_addr(idx)
75                // The perm's PointsTo matches the slot
76                &&& owner.perms[i].points_to.is_init()
77                &&& owner.perms[i].points_to.value().wf(regions.slot_owners[idx])
78                // Last-ref condition
79                &&& regions.slot_owners[idx].inner_perms.ref_count.value() == 1 ==> {
80                    &&& regions.slot_owners[idx].inner_perms.storage.is_init()
81                    &&& regions.slot_owners[idx].inner_perms.in_list.value() == 0
82                }
83            }
84        // Distinct slot indices for different frames
85        &&& forall|i: int, j: int|
86            #![trigger owner.perms[i], owner.perms[j]]
87            0 <= i < j < owner.perms.len() as int ==>
88                frame_to_index((self.range.start + i * PAGE_SIZE) as usize)
89                    != frame_to_index((self.range.start + j * PAGE_SIZE) as usize)
90    }
91
92    open spec fn drop_ensures(self, s0: Self::State, s1: Self::State) -> bool {
93        true
94    }
95
96    proof fn drop_tracked(self, tracked s: &mut Self::State) {
97    }
98}
99
100/// A [`SegmentOwner<M>`] holds the permission tokens for all frames in the
101/// [`Segment<M>`] for verification purposes.
102pub tracked struct SegmentOwner<M: AnyFrameMeta + ?Sized> {
103    /// The permissions for all frames in the segment, which must be well-formed and valid.
104    pub perms: Seq<MetaPerm<M>>,
105}
106
107impl<M: AnyFrameMeta + ?Sized> Inv for Segment<M> {
108    /// The invariant of a [`Segment`]:
109    ///
110    /// - the physical addresses of the frames are aligned and within bounds.
111    /// - the range is well-formed, i.e., the start is less than or equal to the end.
112    open spec fn inv(self) -> bool {
113        &&& self.range.start % PAGE_SIZE == 0
114        &&& self.range.end % PAGE_SIZE == 0
115        &&& self.range.start <= self.range.end < MAX_PADDR
116    }
117}
118
119impl<M: AnyFrameMeta + ?Sized> Inv for SegmentOwner<M> {
120    /// The invariant of a [`Segment`]:
121    ///
122    /// - the permissions are well-formed and valid;
123    /// - the physical addresses of the permissions are aligned and within bounds.
124    open spec fn inv(self) -> bool {
125        &&& forall|i: int|
126            #![trigger self.perms[i]]
127            0 <= i < self.perms.len() as int ==> {
128                &&& self.perms[i].addr() % PAGE_SIZE == 0
129                &&& self.perms[i].addr() < MAX_PADDR
130                &&& self.perms[i].wf(&self.perms[i].inner_perms)
131                &&& self.perms[i].is_init()
132            }
133    }
134}
135
136impl<M: AnyFrameMeta + ?Sized> Segment<M> {
137    /// The invariant of a [`Segment`] with a specific owner, such that besides [`Self::inv`]:
138    ///
139    /// - the number of permissions in the owner matches the number of frames in the segment;
140    /// - the physical address of each permission matches the corresponding frame in the segment.
141    ///
142    /// Interested readers are encouraged to see [`frame_to_index`] and [`meta_addr`] for how
143    /// we convert between physical addresses and meta region indices.
144    pub open spec fn inv_with(&self, owner: &SegmentOwner<M>) -> bool {
145        &&& self.inv()
146        &&& owner.perms.len() * PAGE_SIZE == self.range.end - self.range.start
147        &&& forall|i: int|
148            #![trigger owner.perms[i]]
149            0 <= i < owner.perms.len() as int ==> owner.perms[i].addr() == meta_addr(
150                frame_to_index((self.range.start + i * PAGE_SIZE) as usize),
151            )
152    }
153
154    /// Whether a [`MemView`] covers the segment through the kernel direct mapping.
155    ///
156    /// This predicate only describes the virtual-to-physical relation and the
157    /// presence of initialized backing frame contents.
158    pub open spec fn kernel_mem_view_covers(&self, view: &MemView) -> bool {
159        &&& self.inv()
160        &&& view.mappings.finite()
161        &&& view.mappings_are_disjoint()
162        &&& forall|vaddr: Vaddr|
163            #![trigger view.addr_transl(vaddr)]
164            paddr_to_vaddr(self.range.start) <= vaddr < paddr_to_vaddr(self.range.start)
165                + self.range.end - self.range.start ==> {
166                &&& view.addr_transl(vaddr) is Some
167                &&& view.memory.contains_key(view.addr_transl(vaddr).unwrap().0)
168                &&& view.memory[view.addr_transl(vaddr).unwrap().0].inv()
169                &&& view.memory[view.addr_transl(vaddr).unwrap().0].contents[view.addr_transl(
170                    vaddr,
171                ).unwrap().1 as int] is Init
172            }
173        &&& forall|paddr: Paddr|
174            #![trigger paddr_to_vaddr(paddr)]
175            self.range.start <= paddr < self.range.end ==> {
176                let vaddr = paddr_to_vaddr(paddr);
177                &&& view.addr_transl(vaddr) is Some
178                &&& view.addr_transl(vaddr).unwrap().0 <= paddr
179                &&& paddr < view.addr_transl(vaddr).unwrap().0 + view.memory[view.addr_transl(
180                    vaddr,
181                ).unwrap().0].size@
182                &&& view.addr_transl(vaddr).unwrap().1 == paddr - view.addr_transl(vaddr).unwrap().0
183                &&& view.memory.contains_key(view.addr_transl(vaddr).unwrap().0)
184                &&& view.memory[view.addr_transl(vaddr).unwrap().0].inv()
185                &&& view.memory[view.addr_transl(vaddr).unwrap().0].contents[view.addr_transl(
186                    vaddr,
187                ).unwrap().1 as int] is Init
188            }
189    }
190}
191
192impl<M: AnyFrameMeta + ?Sized> SegmentOwner<M> {
193    /// Produces a kernel direct-mapping memory view for the segment.
194    ///
195    /// This is a proof bridge from segment ownership to the VM I/O memory-view
196    /// model. It should eventually be justified by a real frame-content owner
197    /// instead of metadata permissions alone.
198    #[verifier::external_body]
199    pub proof fn produce_kernel_mem_view(tracked &self, segment: Segment<M>) -> (tracked view:
200        MemView)
201        requires
202            self.inv(),
203            segment.inv_with(self),
204        ensures
205            segment.kernel_mem_view_covers(&view),
206    {
207        arbitrary()
208    }
209
210    /// Borrows a kernel direct-mapping memory view for the segment.
211    ///
212    /// This is the read-side counterpart of [`Self::produce_kernel_mem_view`],
213    /// used when the VM I/O owner only needs a shared read view.
214    #[verifier::external_body]
215    pub proof fn borrow_kernel_mem_view<'a>(tracked &'a self, segment: Segment<M>) -> (tracked view:
216        &'a MemView)
217        requires
218            self.inv(),
219            segment.inv_with(self),
220        ensures
221            segment.kernel_mem_view_covers(view),
222    {
223        arbitrary()
224    }
225}
226
227/// A contiguous range of homogeneous untyped physical memory frames that have any metadata.
228///
229/// In other words, the metadata of the frames are of the same type, and they
230/// are untyped, but the type of metadata is not known at compile time. An
231/// [`USegment`] as a parameter accepts any untyped segments.
232///
233/// The usage of this frame will not be changed while this object is alive.
234pub type USegment = Segment<dyn AnyUFrameMeta>;
235
236#[verus_verify]
237impl<M: AnyFrameMeta + Repr<MetaSlotStorage> + OwnerOf> Segment<M> {
238    /// Returns the starting physical address of the contiguous frames.
239    #[verifier::inline]
240    pub open spec fn start_paddr_spec(&self) -> Paddr
241        recommends
242            self.inv(),
243    {
244        self.range.start
245    }
246
247    /// Returns the ending physical address of the contiguous frames.
248    #[verifier::inline]
249    pub open spec fn end_paddr_spec(&self) -> Paddr
250        recommends
251            self.inv(),
252    {
253        self.range.end
254    }
255
256    /// Returns the length in bytes of the contiguous frames.
257    #[verifier::inline]
258    pub open spec fn size_spec(&self) -> usize
259        recommends
260            self.inv(),
261    {
262        (self.range.end - self.range.start) as usize
263    }
264
265    /// Returns the number of pages of the contiguous frames.
266    #[verifier::inline]
267    pub open spec fn nrpage_spec(&self) -> usize
268        recommends
269            self.inv(),
270    {
271        self.size_spec() / PAGE_SIZE
272    }
273
274    /// Splits the contiguous frames into two at the given byte offset from the start in spec mode.
275    pub open spec fn split_spec(self, offset: usize) -> (Self, Self)
276        recommends
277            self.inv(),
278            offset % PAGE_SIZE == 0,
279            0 < offset < self.size_spec(),
280    {
281        let at = (self.range.start + offset) as usize;
282        let idx = at / PAGE_SIZE;
283        (
284            Self { range: self.range.start..at, _marker: core::marker::PhantomData },
285            Self { range: at..self.range.end, _marker: core::marker::PhantomData },
286        )
287    }
288
289    /// Gets the start physical address of the contiguous frames.
290    #[inline(always)]
291    #[verifier::when_used_as_spec(start_paddr_spec)]
292    pub fn start_paddr(&self) -> (res: Paddr)
293        requires
294            self.inv(),
295        returns
296            self.start_paddr_spec(),
297    {
298        self.range.start
299    }
300
301    /// Gets the end physical address of the contiguous frames.
302    #[inline(always)]
303    #[verifier::when_used_as_spec(end_paddr_spec)]
304    pub fn end_paddr(&self) -> (res: Paddr)
305        requires
306            self.inv(),
307        returns
308            self.end_paddr_spec(),
309    {
310        self.range.end
311    }
312
313    /// Gets the length in bytes of the contiguous frames.
314    #[inline(always)]
315    #[verifier::when_used_as_spec(size_spec)]
316    pub fn size(&self) -> (res: usize)
317        requires
318            self.inv(),
319        returns
320            self.size_spec(),
321    {
322        self.range.end - self.range.start
323    }
324
325    /// The wrapper for the precondition for [`Self::from_unused`]:
326    ///
327    /// - the metadata function must be well-formed and valid for all frames in the range;
328    /// - the metadata function must ensure that the frames can be created and owned by the segment.
329    /// - for any frame created via the closure `metadata_fn`, the corresponding slot in `regions`
330    ///   must be unused and not dropped in the owner ([`MetaRegionOwners`]).
331    pub open spec fn from_unused_requires(
332        regions: MetaRegionOwners,
333        range: Range<Paddr>,
334        metadata_fn: impl Fn(Paddr) -> (Paddr, M),
335    ) -> bool {
336        &&& regions.inv()
337        &&& range.start % PAGE_SIZE == 0
338        &&& range.end % PAGE_SIZE == 0
339        &&& range.start <= range.end < MAX_PADDR
340        &&& forall|paddr_in: Paddr|
341            (range.start <= paddr_in < range.end && paddr_in % PAGE_SIZE == 0) ==> {
342                &&& metadata_fn.requires((paddr_in,))
343            }
344        &&& forall|paddr_in: Paddr, paddr_out: Paddr, m: M|
345            metadata_fn.ensures((paddr_in,), (paddr_out, m)) ==> {
346                &&& paddr_out < MAX_PADDR
347                &&& paddr_out % PAGE_SIZE == 0
348                &&& paddr_in == paddr_out
349                &&& regions.slots.contains_key(frame_to_index(paddr_out))
350                &&& regions.slot_owners[frame_to_index(paddr_out)].usage is Unused
351                &&& regions.slot_owners[frame_to_index(paddr_out)].inner_perms.in_list.points_to(0)
352            }
353    }
354
355    /// The wrapper for the postcondition for [`Self::from_unused`]:
356    ///
357    /// - if the result is `Ok`, then the returned segment must satisfy the invariant with the owner;
358    /// - the returned segment must have the same physical address range as the input;
359    /// - the returned owner must be `Some` if the result is `Ok`, and the owner must satisfy the invariant;
360    pub open spec fn from_unused_ensures(
361        old_regions: MetaRegionOwners,
362        new_regions: MetaRegionOwners,
363        owner: Option<SegmentOwner<M>>,
364        range: Range<Paddr>,
365        metadata_fn: impl Fn(Paddr) -> (Paddr, M),
366        r: Result<Self, GetFrameError>,
367    ) -> bool {
368        &&& r matches Ok(r) ==> {
369            &&& new_regions.inv()
370            &&& r.range.start == range.start
371            &&& r.range.end == range.end
372            &&& owner matches Some(owner) && {
373                &&& r.inv_with(&owner)
374                &&& forall|i: int|
375                    #![trigger owner.perms[i]]
376                    0 <= i < owner.perms.len() as int ==> {
377                        &&& owner.perms[i].addr() == meta_addr(
378                            frame_to_index_spec((range.start + i * PAGE_SIZE) as usize),
379                        )
380                    }
381                &&& forall|paddr: Paddr|
382                    #![trigger frame_to_index_spec(paddr)]
383                    (range.start <= paddr < range.end && paddr % PAGE_SIZE == 0)
384                        ==> !new_regions.slots.contains_key(frame_to_index_spec(paddr))
385            }
386        }
387    }
388
389    /// The wrapper for the precondition for [`Self::split`]:
390    ///
391    /// - the segment must satisfy the invariant with the owner ([`Self::inv_with`])
392    /// - the offset must be aligned and within bounds.
393    pub open spec fn split_requires(self, owner: SegmentOwner<M>, offset: usize) -> bool {
394        &&& self.inv_with(&owner)
395        &&& offset % PAGE_SIZE == 0
396        &&& 0 < offset < self.size()
397    }
398
399    /// The wrapper for the postcondition for [`Self::split`]:
400    ///
401    /// - the resulting segments must satisfy the invariant with the corresponding owners;
402    /// - the resulting segments must be the same as the result of [`Self::split_spec`];
403    /// - the permissions in the original owner must be split into the resulting owners.
404    pub open spec fn split_ensures(
405        self,
406        offset: usize,
407        lhs: Self,
408        rhs: Self,
409        ori_owner: SegmentOwner<M>,
410        lhs_owner: SegmentOwner<M>,
411        rhs_owner: SegmentOwner<M>,
412    ) -> bool {
413        &&& lhs.inv_with(&lhs_owner)
414        &&& rhs.inv_with(&rhs_owner)
415        &&& (lhs, rhs) == self.split_spec(offset)
416        &&& ori_owner.perms =~= (lhs_owner.perms + rhs_owner.perms)
417    }
418
419    /// The wrapper for the precondition for [`Self::into_raw`]:
420    ///
421    /// - the segment must satisfy the invariant with the owner;
422    /// - the meta region in `regions` must satisfy the invariant.
423    pub open spec fn into_raw_requires(
424        self,
425        regions: MetaRegionOwners,
426        owner: SegmentOwner<M>,
427    ) -> bool {
428        &&& self.inv_with(&owner)
429        &&& regions.inv()
430        &&& owner.inv()
431    }
432
433    /// The wrapper for the postcondition for [`Self::into_raw`]:
434    ///
435    /// - the returned physical address range must be the same as the segment's range;
436    /// - the meta region is unchanged.
437    pub open spec fn into_raw_ensures(
438        self,
439        old_regions: MetaRegionOwners,
440        regions: MetaRegionOwners,
441        r: Range<Paddr>,
442    ) -> bool {
443        &&& r == self.range
444        &&& regions.inv()
445        &&& regions =~= old_regions
446    }
447
448    /// The wrapper for the precondition for [`Self::from_raw`]:
449    ///
450    /// - the range must be a forgotten [`Segment`] that matches the type `M`;
451    /// - the caller must provide the owner with valid permissions;
452    /// - the range must be aligned and within bounds.
453    pub open spec fn from_raw_requires(
454        regions: MetaRegionOwners,
455        range: Range<Paddr>,
456        owner: SegmentOwner<M>,
457    ) -> bool {
458        &&& regions.inv()
459        &&& range.start % PAGE_SIZE == 0
460        &&& range.end % PAGE_SIZE == 0
461        &&& range.start < range.end < MAX_PADDR
462    }
463
464    /// The wrapper for the postcondition for [`Self::from_raw`]:
465    ///
466    /// - the returned segment must have the same physical address range as the input;
467    /// - the meta region is unchanged.
468    pub open spec fn from_raw_ensures(
469        self,
470        old_regions: MetaRegionOwners,
471        new_regions: MetaRegionOwners,
472        owner: SegmentOwner<M>,
473        range: Range<Paddr>,
474    ) -> bool {
475        &&& self.range == range
476        &&& new_regions.inv()
477        &&& new_regions =~= old_regions
478    }
479
480    /// The wrapper for the precondition for slicing a segment with a given range:
481    ///
482    /// - the segment must satisfy the invariant with the owner ([`Self::inv_with`])
483    /// - the slicing range must be aligned and within bounds of the segment.
484    pub open spec fn slice_requires(self, owner: SegmentOwner<M>, range: Range<Paddr>) -> bool {
485        &&& self.inv_with(&owner)
486        &&& range.start % PAGE_SIZE == 0
487        &&& range.end % PAGE_SIZE == 0
488        &&& self.range.start + range.start <= self.range.start + range.end <= self.range.end
489    }
490
491    /// The wrapper for the postcondition for slicing a segment with a given range:
492    ///
493    /// - the resulting slice must satisfy the invariant with the owner;
494    /// - the resulting slice must have the same physical address range as the slicing range.
495    ///
496    /// See also [`vstd::seq::Seq::subrange`].
497    pub open spec fn slice_ensures(
498        self,
499        owner: SegmentOwner<M>,
500        range: Range<Paddr>,
501        res: Self,
502    ) -> bool {
503        &&& res.inv_with(
504            &SegmentOwner {
505                perms: owner.perms.subrange(
506                    (range.start / PAGE_SIZE) as int,
507                    (range.end / PAGE_SIZE) as int,
508                ),
509            },
510        )
511    }
512
513    /// Checks if the current segment can be iterated to get the next frame:
514    ///
515    /// - the segment and meta regions must satisfy their respective invariants;
516    /// - the frame's slot must not be in `regions.slots` (the owner holds the permission);
517    /// - the frame's raw_count must be 1 (it was forgotten once).
518    pub open spec fn next_requires(
519        self,
520        regions: MetaRegionOwners,
521        owner: SegmentOwner<M>,
522    ) -> bool {
523        &&& self.inv()
524        &&& regions.inv()
525        &&& owner.perms.len() > 0
526        &&& !regions.slots.contains_key(frame_to_index(self.range.start))
527        &&& regions.slot_owners.contains_key(frame_to_index(self.range.start))
528        &&& regions.slot_owners[frame_to_index(self.range.start)].raw_count == 1
529        &&& regions.slot_owners[frame_to_index(self.range.start)].self_addr == frame_to_meta(
530            self.range.start,
531        )
532        &&& owner.perms[0].points_to.is_init()
533        &&& owner.perms[0].points_to.addr() == frame_to_meta(self.range.start)
534        &&& owner.perms[0].points_to.value().wf(
535            regions.slot_owners[frame_to_index(self.range.start)],
536        )
537    }
538
539    /// The wrapper for the postcondition for iterating to the next frame:
540    ///
541    /// - if the result is [`None`], then the segment has been exhausted;
542    /// - if the result is [`Some`], then the segment is advanced by one frame and
543    ///   the returned frame is the next frame, with its slot restored to `regions`.
544    pub open spec fn next_ensures(
545        old_self: Self,
546        new_self: Self,
547        old_regions: MetaRegionOwners,
548        new_regions: MetaRegionOwners,
549        res: Option<Frame<M>>,
550    ) -> bool {
551        &&& new_regions.inv()
552        &&& new_self.inv()
553        &&& match res {
554            None => { &&& new_self.range.start == old_self.range.end },
555            Some(f) => {
556                &&& new_self.range.start == old_self.range.start + PAGE_SIZE
557                &&& f.paddr() == old_self.range.start
558                &&& new_regions.slots.contains_key(frame_to_index(f.paddr()))
559                &&& new_regions.slot_owners[frame_to_index(f.paddr())].raw_count == 0
560            },
561        }
562    }
563
564    /// Creates a new [`Segment`] from unused frames.
565    ///
566    /// The caller must provide a closure to initialize metadata for all the frames.
567    /// The closure receives the physical address of the frame and returns the
568    /// metadata, which is similar to [`core::array::from_fn`].
569    ///
570    /// It returns an error if:
571    ///  - any of the frames cannot be created with a specific reason.
572    ///
573    /// # Verified Properties
574    /// ## Preconditions
575    /// See [`Self::from_unused_requires`].
576    /// ## Postconditions
577    /// See [`Self::from_unused_ensures`].
578    #[verus_spec(r =>
579        with
580            Tracked(regions): Tracked<&mut MetaRegionOwners>,
581                -> owner: Tracked<Option<SegmentOwner<M>>>,
582        requires
583            Self::from_unused_requires(*old(regions), range, metadata_fn),
584        ensures
585            Self::from_unused_ensures(*old(regions), *final(regions), owner@, range, metadata_fn, r),
586    )]
587    pub fn from_unused(range: Range<Paddr>, metadata_fn: impl Fn(Paddr) -> (Paddr, M)) -> (res:
588        Result<Self, GetFrameError>) {
589        proof_decl! {
590            let tracked mut owner: Option<SegmentOwner<M>> = None;
591            let tracked mut addrs = Seq::<usize>::tracked_empty();
592            let tracked mut perms = Seq::<MetaPerm<M>>::tracked_empty();
593        }
594        // Construct a segment early to recycle previously forgotten frames if
595        // the subsequent operations fails in the middle.
596        let mut segment = Self {
597            range: range.start..range.start,
598            _marker: core::marker::PhantomData,
599        };
600
601        let mut i = 0;
602        let addr_len = (range.end - range.start) / PAGE_SIZE;
603
604        #[verus_spec(
605            invariant
606                i <= addr_len,
607                i as int == addrs.len(),
608                i as int == perms.len(),
609                range.start % PAGE_SIZE == 0,
610                range.end % PAGE_SIZE == 0,
611                range.start <= range.start + i * PAGE_SIZE <= range.end,
612                range.end == range.start + addr_len * PAGE_SIZE,
613                addr_len == (range.end - range.start) / PAGE_SIZE as int,
614                i <= addr_len,
615                forall|paddr_in: Paddr|
616                    (range.start + i * PAGE_SIZE <= paddr_in < range.end && paddr_in % PAGE_SIZE == 0) ==> {
617                        &&& metadata_fn.requires((paddr_in,))
618                    },
619                forall|paddr_in: Paddr, paddr_out: Paddr, m: M|
620                    range.start + i * PAGE_SIZE <= paddr_in < range.end
621                        && paddr_in % PAGE_SIZE == 0
622                        && metadata_fn.ensures((paddr_in,), (paddr_out, m)) ==> {
623                        &&& paddr_out < MAX_PADDR
624                        &&& paddr_out % PAGE_SIZE == 0
625                        &&& paddr_in == paddr_out
626                        &&& regions.slots.contains_key(frame_to_index(paddr_out))
627                        &&& regions.slot_owners[frame_to_index(paddr_out)].usage is Unused
628                        &&& regions.slot_owners[frame_to_index(paddr_out)].inner_perms.in_list.points_to(0)
629                    },
630                forall|j: int|
631                    0 <= j < addrs.len() as int ==> {
632                        &&& !regions.slots.contains_key(frame_to_index_spec(addrs[j]))
633                        &&& addrs[j] % PAGE_SIZE == 0
634                        &&& addrs[j] < MAX_PADDR
635                        &&& addrs[j] == range.start + (j as u64) * PAGE_SIZE
636                    },
637                forall|j: int|
638                    #![trigger perms[j]]
639                    0 <= j < perms.len() as int ==> {
640                        &&& perms[j].addr() == frame_to_meta(addrs[j])
641                        &&& perms[j].wf(&perms[j].inner_perms)
642                        &&& perms[j].is_init()
643                    },
644                regions.inv(),
645                segment.range.start == range.start,
646                segment.range.end == range.start + i * PAGE_SIZE,
647            decreases addr_len - i,
648        )]
649        while i < addr_len {
650            let paddr_in = range.start + i * PAGE_SIZE;
651            let (paddr, meta) = metadata_fn(paddr_in);
652
653            let (frame, Tracked(frame_perm)) = match #[verus_spec(with Tracked(regions))]
654            MetaSlot::get_from_unused(paddr, meta, false) {
655                Ok(res) => {
656                    let (ptr, Tracked(frame_perm)) = res;
657                    (Frame { ptr, _marker: core::marker::PhantomData::<M> }, Tracked(frame_perm))
658                },
659                Err(e) => {
660                    return {
661                        proof_with!(|= Tracked(owner));
662                        Err(e)
663                    };
664                },
665            };
666
667            proof {
668                perms.tracked_push(frame_perm);
669            }
670
671            let _ = ManuallyDrop::new(frame, Tracked(regions));
672            segment.range.end = paddr + PAGE_SIZE;
673            proof {
674                addrs.tracked_push(paddr);
675            }
676
677            i += 1;
678        }
679
680        proof {
681            broadcast use vstd_extra::seq_extra::lemma_seq_to_set_map_contains;
682
683            owner = Some(SegmentOwner { perms });
684
685            assert forall|addr: usize|
686                #![trigger frame_to_index_spec(addr)]
687                range.start <= addr < range.end && addr % PAGE_SIZE == 0 implies {
688                &&& !regions.slots.contains_key(frame_to_index_spec(addr))
689            } by {
690                if !addrs.contains(addr) {
691                    let j = (addr - range.start) / PAGE_SIZE as int;
692                    assert(addrs[j as int] == addr);
693                }
694            }
695        }
696
697        proof_with!(|= Tracked(owner));
698        Ok(segment)
699    }
700
701    /// Splits the frames into two at the given byte offset from the start.
702    ///
703    /// The resulting frames cannot be empty. So the offset cannot be neither
704    /// zero nor the length of the frames.
705    ///
706    /// # Verified Properties
707    /// ## Preconditions
708    /// See [`Self::split_requires`].
709    ///
710    /// ## Postconditions
711    /// See [`Self::split_ensures`].
712    #[verus_spec(r =>
713        with
714            Tracked(owner): Tracked<SegmentOwner<M>>,
715            Tracked(regions): Tracked<&mut MetaRegionOwners>,
716                -> frame_perms: (Tracked<SegmentOwner<M>>, Tracked<SegmentOwner<M>>),
717        requires
718            Self::split_requires(self, owner, offset),
719        ensures
720            Self::split_ensures(self, offset, r.0, r.1, owner, frame_perms.0@, frame_perms.1@),
721    )]
722    pub fn split(self, offset: usize) -> (Self, Self) {
723        let at = self.range.start + offset;
724        let idx = offset / PAGE_SIZE;
725        let res = (
726            Self { range: self.range.start..at, _marker: core::marker::PhantomData },
727            Self { range: at..self.range.end, _marker: core::marker::PhantomData },
728        );
729
730        let _ = core::mem::ManuallyDrop::new(self);
731
732        let tracked mut perms = owner.perms;
733        let tracked rest = seq_tracked_split_at(&mut perms, idx as int);
734        let tracked frame_perms1 = SegmentOwner { perms };
735        let tracked frame_perms2 = SegmentOwner { perms: rest };
736
737        proof_with!(|= (Tracked(frame_perms1), Tracked(frame_perms2)));
738        res
739    }
740
741    /// Forgets the [`Segment`] and gets a raw range of physical addresses.
742    ///
743    /// The segment's permissions are returned to the caller via `frame_perms`.
744    /// The caller is responsible for holding onto the permissions and providing
745    /// them back when restoring the segment with [`Self::from_raw`].
746    #[verus_spec(r =>
747        with
748            Tracked(regions): Tracked<&mut MetaRegionOwners>,
749            Tracked(owner): Tracked<SegmentOwner<M>>,
750                -> frame_perms: Tracked<SegmentOwner<M>>,
751        requires
752            Self::into_raw_requires(self, *old(regions), owner),
753        ensures
754            Self::into_raw_ensures(self, *old(regions), *final(regions), r),
755            frame_perms@ == owner,
756    )]
757    pub(crate) fn into_raw(self) -> Range<Paddr> {
758        let range = self.range.clone();
759        let _ = core::mem::ManuallyDrop::new(self);
760
761        proof_with!(|= Tracked(owner));
762        range
763    }
764
765    /// Restores the [`Segment`] from the raw physical address range.
766    ///
767    /// # Verified Properties
768    /// ## Preconditions
769    /// See [`Self::from_raw_requires`].
770    ///
771    /// ## Postconditions
772    /// See [`Self::from_raw_ensures`].
773    ///
774    /// # Safety
775    ///
776    /// The range must be a forgotten [`Segment`] that matches the type `M`.
777    /// The caller must provide the permissions that were returned by [`Self::into_raw`].
778    #[verus_spec(r =>
779        with
780            Tracked(regions): Tracked<&mut MetaRegionOwners>,
781            Tracked(owner): Tracked<SegmentOwner<M>>,
782        requires
783            Self::from_raw_requires(*old(regions), range, owner),
784        ensures
785            Self::from_raw_ensures(r, *old(regions), *final(regions), owner, range),
786    )]
787    pub(crate) unsafe fn from_raw(range: Range<Paddr>) -> Self {
788        Self { range, _marker: core::marker::PhantomData }
789    }
790
791    /// Gets an extra handle to the frames in the byte offset range.
792    ///
793    /// The sliced byte offset range in indexed by the offset from the start of
794    /// the contiguous frames. The resulting frames holds extra reference counts.
795    ///
796    /// # Verified Properties
797    ///
798    /// # Verified Properties
799    ///
800    /// ## Preconditions
801    /// See [`Self::slice_requires`].
802    ///
803    /// ## Postconditions
804    /// See [`Self::slice_ensures`].
805    #[verus_spec(r =>
806        with
807            Tracked(owner): Tracked<&SegmentOwner<M>>,
808        requires
809            Self::slice_requires(*self, *owner, *range),
810        ensures
811            Self::slice_ensures(*self, *owner, *range, r),
812    )]
813    pub fn slice(&self, range: &Range<Paddr>) -> Self {
814        let start = self.range.start + range.start;
815        let end = self.range.start + range.end;
816
817        let mut i = 0;
818        let addr_len = (end - start) / PAGE_SIZE;
819        while i < addr_len
820            invariant
821                start % PAGE_SIZE == 0,
822                end % PAGE_SIZE == 0,
823                start + i * PAGE_SIZE <= end,
824                i <= addr_len,
825                addr_len == (end - start) / PAGE_SIZE as int,
826            decreases addr_len - i,
827        {
828            let paddr = start + i * PAGE_SIZE;
829            // SAFETY: We already have reference counts for the frames since
830            // for each frame there would be a forgotten handle when creating
831            // the `Segment` object.
832            // unsafe { inc_frame_ref_count(paddr); }
833            i += 1;
834        }
835
836        Self { range: start..end, _marker: core::marker::PhantomData }
837    }
838
839    /// Gets the next frame in the segment (raw), if any.
840    ///
841    /// Since the segments here must be "non-active" where
842    /// there is no extra Verus-tracked permission [`SegmentOwner`]
843    /// associated with it; [`Segment`] becomes a kind of "zombie"
844    /// container through which we can only iterate the frames and
845    /// get the frame out of the `regions` instead.
846    ///
847    /// # Note
848    ///
849    /// We chose to make `next` the member function of [`Segment`] rather than a trait method
850    /// because the current Verus standard library has limited support for [`core::iter::Iterator`]
851    /// and associated types, and we want to avoid the complexity of defining a custom iterator trait
852    /// and implementing it for `Segment`.
853    ///
854    /// # Verified Properties
855    /// ## Preconditions
856    /// See [`Self::next_requires`].
857    ///
858    /// ## Postconditions
859    /// See [`Self::next_ensures`].
860    #[verus_spec(res =>
861        with
862            Tracked(regions): Tracked<&mut MetaRegionOwners>,
863            Tracked(owner): Tracked<&mut SegmentOwner<M>>
864        requires
865            Self::next_requires(*old(self), *old(regions), *old(owner)),
866        ensures
867            Self::next_ensures(*old(self), *final(self), *old(regions), *final(regions), res),
868    )]
869    pub fn next(&mut self) -> Option<Frame<M>> {
870        if self.range.start < self.range.end {
871            let tracked perm = owner.perms.tracked_remove(0);
872
873            proof_decl! {
874                let tracked from_raw_debt: crate::specs::mm::frame::frame_specs::BorrowDebt;
875            }
876
877            let frame = unsafe {
878                #[verus_spec(with Tracked(regions), Tracked(&perm) => Tracked(from_raw_debt))]
879                Frame::<M>::from_raw(self.range.start)
880            };
881
882            proof {
883                // raw_count was 1 (segment was forgotten via into_raw), so discharge trivially.
884                from_raw_debt.discharge_bookkeeping();
885            }
886
887            self.range.start = self.range.start + PAGE_SIZE;
888            Some(frame)
889        } else {
890            None
891        }
892    }
893}
894
895impl<M: AnyFrameMeta + ?Sized> Drop for Segment<M> {
896    /// Verified drop: iterates over each frame in the segment, decrements its
897    /// reference count, and if it was the last reference, tears down the metadata.
898    fn drop(self, Tracked(s): Tracked<(MetaRegionOwners, SegmentOwner<M>)>) -> (res: Tracked<(MetaRegionOwners, SegmentOwner<M>)>)
899    {
900        let tracked (mut regions, mut owner) = s;
901        let ghost n = owner.perms.len();
902        let mut paddr = self.range.start;
903
904        let ghost mut k: int = 0;
905
906        let ghost old_regions = regions;
907        let ghost old_owner = owner;
908
909        // Helper spec: the frame index for the j-th frame in the segment
910        let ghost frame_idx = |j: int| -> usize { frame_to_index((self.range.start + j * PAGE_SIZE) as usize) };
911
912        loop
913            invariant
914                regions.inv(),
915                self.inv(),
916                self.range.start <= paddr <= self.range.end,
917                paddr == (self.range.start + k * PAGE_SIZE) as usize,
918                paddr % PAGE_SIZE == 0,
919                paddr < MAX_PADDR,
920                0 <= k <= n,
921                n == (self.range.end - self.range.start) as int / PAGE_SIZE as int,
922                owner.perms.len() == n - k,
923                paddr < self.range.end <==> k < n,
924                // Remaining owner.perms are the suffix of the original
925                forall|j: int|
926                    #![trigger owner.perms[j]]
927                    0 <= j < (n - k) ==>
928                        owner.perms[j] == old_owner.perms[j + k],
929                // Unprocessed frames' slot_owners are present and unchanged
930                forall|j: int| #![trigger old_owner.perms[j]]
931                    k <= j < n ==> {
932                        &&& regions.slot_owners.contains_key(frame_idx(j))
933                        &&& regions.slot_owners[frame_idx(j)] == old_regions.slot_owners[frame_idx(j)]
934                    },
935                // Slots not belonging to any frame in the segment are unchanged
936                forall|si: usize| #![trigger regions.slot_owners[si]]
937                    (forall|j: int| #![trigger old_owner.perms[j]]
938                        0 <= j < n ==> si != frame_idx(j))
939                    ==> regions.slot_owners[si] == old_regions.slot_owners[si],
940                // slots map: processed frames' PointsTo re-inserted, rest unchanged
941                forall|si: usize| #![trigger regions.slots[si]]
942                    (forall|j: int| #![trigger old_owner.perms[j]]
943                        0 <= j < k ==> si != frame_idx(j))
944                    ==> regions.slots.contains_key(si) == old_regions.slots.contains_key(si),
945                // Distinct indices (from drop_requires, propagated)
946                forall|i: int, j: int|
947                    #![trigger old_owner.perms[i], old_owner.perms[j]]
948                    0 <= i < j < n ==> frame_idx(i) != frame_idx(j),
949                // Original drop_requires still holds (old_regions/old_owner are immutable ghosts)
950                self.drop_requires((old_regions, old_owner)),
951            decreases n - k,
952        {
953            if paddr >= self.range.end {
954                break;
955            }
956
957            let ghost cur_idx = frame_idx(k);
958            let tracked perm = owner.perms.tracked_remove(0);
959
960            // perm == old_owner.perms[k] (from shift invariant)
961            let tracked slot_perm = perm.points_to;
962
963            proof {
964                assert(self.drop_requires((old_regions, old_owner)));
965                let ghost idx_k = frame_to_index((self.range.start + k * PAGE_SIZE) as usize);
966                assert(idx_k == cur_idx);
967            }
968
969            let ghost pre_slot_own = regions.slot_owners[cur_idx];
970            let tracked mut slot_own = regions.slot_owners.tracked_remove(cur_idx);
971
972            let meta_vaddr = frame_to_meta(paddr);
973            let ptr = vstd::simple_pptr::PPtr::<super::MetaSlot>::from_addr(meta_vaddr);
974
975            proof {
976                assert(slot_perm.is_init());
977                assert(slot_perm.value().wf(slot_own));
978                assert(slot_perm.value().ref_count.id() == slot_own.inner_perms.ref_count.id());
979                assert(slot_own.inner_perms.ref_count.value() > 0);
980                assert(meta_addr(frame_to_index(paddr)) == frame_to_meta(paddr)) by {
981                    assert(frame_to_index(paddr) == paddr / PAGE_SIZE);
982                    assert(meta_addr(frame_to_index(paddr)) == (crate::specs::arch::kspace::FRAME_METADATA_RANGE.start + frame_to_index(paddr) * super::meta::mapping::META_SLOT_SIZE) as usize);
983                }
984                assert(slot_perm.pptr() == ptr);
985            }
986
987            let slot = ptr.borrow(Tracked(&slot_perm));
988            let last_ref_cnt = slot.ref_count.fetch_sub(Tracked(&mut slot_own.inner_perms.ref_count), 1);
989
990            if last_ref_cnt == 1 {
991                super::acquire_fence();
992
993                proof {
994                    assert(slot_own.inner_perms.ref_count.value() == 0u64);
995                    assert(slot_own.raw_count == 0);
996                    assert(slot_own.inner_perms.storage.is_init());
997                    assert(slot_own.inner_perms.in_list.value() == 0u64);
998                    assert(slot_own.inv());
999                    assert(MetaSlot::drop_last_in_place_safety_cond(slot_own));
1000                }
1001
1002                #[verus_spec(with Tracked(&mut slot_own))]
1003                slot.drop_last_in_place();
1004            }
1005
1006            proof {
1007                let ghost mid_regions = regions;
1008
1009                regions.slot_owners.tracked_insert(cur_idx, slot_own);
1010
1011                assert(regions.slot_owners.dom() =~= mid_regions.slot_owners.dom().insert(cur_idx));
1012                assert(slot_own.inv());
1013
1014                regions.slots.tracked_insert(cur_idx, slot_perm);
1015                assert(regions.slots.dom().finite());
1016
1017                assert forall|i: usize| i < super::meta::mapping::max_meta_slots() <==>
1018                    #[trigger] regions.slot_owners.contains_key(i) by { }
1019
1020                assert forall|i: usize| #[trigger] regions.slots.contains_key(i) implies i < super::meta::mapping::max_meta_slots() by {
1021                    if i == cur_idx {
1022                        assert(regions.slot_owners.contains_key(cur_idx));
1023                    }
1024                }
1025
1026                assert forall|i: usize| #[trigger] regions.slots.contains_key(i) implies ({
1027                    &&& regions.slot_owners.contains_key(i)
1028                    &&& regions.slot_owners[i].inv()
1029                    &&& regions.slots[i].is_init()
1030                    &&& regions.slots[i].addr() == meta_addr(i)
1031                    &&& regions.slots[i].value().wf(regions.slot_owners[i])
1032                    &&& regions.slot_owners[i].self_addr == regions.slots[i].addr()
1033                }) by {
1034                    if i == cur_idx {
1035                        assert(regions.slots[i].is_init());
1036                        assert(regions.slots[i].addr() == meta_addr(i));
1037                        assert(regions.slots[i].value().wf(regions.slot_owners[i]));
1038                        assert(regions.slot_owners[i].self_addr == regions.slots[i].addr());
1039                    }
1040                }
1041
1042                assert forall|i: usize| #[trigger] regions.slot_owners.contains_key(i) implies regions.slot_owners[i].inv() by {
1043                    if i == cur_idx {
1044                        assert(slot_own.inv());
1045                    }
1046                }
1047            }
1048
1049            paddr = paddr + PAGE_SIZE;
1050
1051            proof {
1052                k = k + 1;
1053
1054                assert forall|j: int| #![trigger old_owner.perms[j]]
1055                    k <= j < n implies
1056                        regions.slot_owners[frame_idx(j)] == old_regions.slot_owners[frame_idx(j)]
1057                by {
1058                    let ghost _a = old_owner.perms[j];
1059                    let ghost _b = old_owner.perms[(k - 1) as int];
1060                    assert(frame_idx(j) != frame_idx(k - 1));
1061                };
1062
1063                assert forall|si: usize| #![trigger regions.slot_owners[si]]
1064                    (forall|j: int| #![trigger old_owner.perms[j]]
1065                        0 <= j < n ==> si != frame_idx(j))
1066                    implies regions.slot_owners[si] == old_regions.slot_owners[si]
1067                by {
1068                    let ghost _trigger = old_owner.perms[(k - 1) as int];
1069                    assert(si != frame_idx(k - 1));
1070                };
1071            }
1072        }
1073
1074        Tracked((regions, owner))
1075    }
1076}
1077
1078impl<M: AnyFrameMeta + Repr<MetaSlotStorage> + OwnerOf> RCClone for Segment<M> {
1079    open spec fn clone_requires(self, perm: MetaRegionOwners) -> bool {
1080        &&& self.inv()
1081        &&& perm.inv()
1082        &&& forall|pa: Paddr|
1083            #![trigger frame_to_index(pa)]
1084            (self.range.start <= pa < self.range.end && pa % PAGE_SIZE == 0) ==> {
1085                let idx = frame_to_index(pa);
1086                &&& perm.slots.contains_key(idx)
1087                &&& has_safe_slot(pa)
1088                &&& perm.slot_owners[idx].inner_perms.ref_count.value() > 0
1089                &&& perm.slot_owners[idx].inner_perms.ref_count.value() + 1
1090                    < super::meta::REF_COUNT_MAX
1091                &&& !MetaSlot::inc_ref_count_panic_cond(
1092                    perm.slot_owners[idx].inner_perms.ref_count)
1093            }
1094    }
1095
1096    open spec fn clone_ensures(self, old_perm: MetaRegionOwners, new_perm: MetaRegionOwners, res: Self) -> bool {
1097        &&& res.range == self.range
1098        &&& res.inv()
1099        &&& new_perm.inv()
1100    }
1101
1102    fn clone(&self, Tracked(perm): Tracked<&mut MetaRegionOwners>) -> (res: Self)
1103    {
1104        let mut paddr = self.range.start;
1105
1106        let ghost old_perm = *perm;
1107        loop
1108            invariant
1109                perm.inv(),
1110                self.inv(),
1111                perm.slots =~= old_perm.slots,
1112                perm.slot_owners.dom() =~= old_perm.slot_owners.dom(),
1113                self.range.start <= paddr <= self.range.end,
1114                paddr % PAGE_SIZE == 0,
1115                paddr < MAX_PADDR,
1116                forall|pa: Paddr|
1117                    #![trigger frame_to_index(pa)]
1118                    (paddr <= pa < self.range.end && pa % PAGE_SIZE == 0) ==> {
1119                        let idx = frame_to_index(pa);
1120                        &&& perm.slots.contains_key(idx)
1121                        &&& has_safe_slot(pa)
1122                        &&& perm.slot_owners[idx].inner_perms.ref_count.value() > 0
1123                        &&& perm.slot_owners[idx].inner_perms.ref_count.value() + 1
1124                            < super::meta::REF_COUNT_MAX
1125                        &&& !MetaSlot::inc_ref_count_panic_cond(
1126                            perm.slot_owners[idx].inner_perms.ref_count)
1127                    },
1128            decreases self.range.end - paddr,
1129        {
1130            if paddr >= self.range.end {
1131                break;
1132            }
1133
1134            proof {
1135                assert(paddr + PAGE_SIZE <= self.range.end);
1136                assert(paddr + PAGE_SIZE <= MAX_PADDR);
1137            }
1138
1139            #[verus_spec(with Tracked(perm))]
1140            crate::mm::frame::inc_frame_ref_count(paddr);
1141
1142            paddr = paddr + PAGE_SIZE;
1143        }
1144
1145        Self {
1146            range: self.range.start..self.range.end,
1147            _marker: core::marker::PhantomData,
1148        }
1149    }
1150}
1151
1152} // verus!