ostd/mm/frame/
segment.rs

1// SPDX-License-Identifier: MPL-2.0
2//! A contiguous range of frames.
3use vstd::prelude::*;
4use vstd_extra::seq_extra::{seq_tracked_map_values, seq_tracked_new, seq_tracked_subrange};
5
6use core::{fmt::Debug, mem::ManuallyDrop, ops::Range};
7
8use crate::mm::frame::{inc_frame_ref_count, untyped::AnyUFrameMeta, Frame};
9
10use vstd_extra::ownership::*;
11
12use super::meta::mapping::{frame_to_index, frame_to_index_spec, meta_addr, meta_to_frame_spec};
13use super::{AnyFrameMeta, GetFrameError, MetaPerm, MetaSlot};
14use crate::mm::{Paddr, PagingLevel, Vaddr};
15use crate::specs::arch::mm::{MAX_NR_PAGES, MAX_PADDR, PAGE_SIZE};
16use crate::specs::mm::frame::meta_region_owners::MetaRegionOwners;
17use vstd_extra::undroppable::*;
18
19verus! {
20
21/// A contiguous range of homogeneous physical memory frames.
22///
23/// This is a handle to multiple contiguous frames. It will be more lightweight
24/// than owning an array of frame handles.
25///
26/// The ownership is achieved by the reference counting mechanism of frames.
27/// When constructing a [`Segment`], the frame handles are created then
28/// forgotten, leaving the reference count. When dropping a it, the frame
29/// handles are restored and dropped, decrementing the reference count.
30///
31/// All the metadata of the frames are homogeneous, i.e., they are of the same
32/// type.
33#[repr(transparent)]
34#[rustc_has_incoherent_inherent_impls]
35pub struct Segment<M: AnyFrameMeta + ?Sized> {
36    /// The physical address range of the segment.
37    pub range: Range<Paddr>,
38    /// Marker for the metadata type.
39    pub _marker: core::marker::PhantomData<M>,
40}
41
42// TODO: treat `manually_drop` as equivalent to `into_raw`
43impl<M: AnyFrameMeta + ?Sized> Undroppable for Segment<M> {
44    type State = MetaRegionOwners;
45
46    open spec fn constructor_requires(self, s: Self::State) -> bool {
47        true
48    }
49
50    open spec fn constructor_ensures(self, s0: Self::State, s1: Self::State) -> bool {
51        s0 =~= s1
52    }
53
54    proof fn constructor_spec(self, tracked s: &mut Self::State) {
55        admit()
56    }
57}
58
59/// A [`SegmentOwner<M>`] holds the permission tokens for all frames in the
60/// [`Segment<M>`] for verification purposes.
61pub tracked struct SegmentOwner<M: AnyFrameMeta + ?Sized> {
62    pub perms: Seq<MetaPerm<M>>,
63}
64
65impl<M: AnyFrameMeta + ?Sized> Inv for Segment<M> {
66    /// The invariant of a [`Segment`].
67    open spec fn inv(self) -> bool {
68        &&& self.range.start % PAGE_SIZE() == 0
69        &&& self.range.end % PAGE_SIZE() == 0
70        &&& self.range.start <= self.range.end < MAX_PADDR()
71    }
72}
73
74impl<M: AnyFrameMeta + ?Sized> Inv for SegmentOwner<M> {
75    /// The invariant of a [`Segment`].
76    open spec fn inv(self) -> bool {
77        &&& forall|i: int|
78            #![trigger self.perms[i]]
79            0 <= i < self.perms.len() as int ==> {
80                &&& self.perms[i].addr() % PAGE_SIZE() == 0
81                &&& self.perms[i].addr() < MAX_PADDR()
82                &&& self.perms[i].wf()
83                &&& self.perms[i].is_init()
84            }
85    }
86}
87
88impl<M: AnyFrameMeta + ?Sized> Segment<M> {
89    pub open spec fn inv_with(&self, owner: &SegmentOwner<M>) -> bool {
90        &&& self.inv()
91        &&& owner.perms.len() * PAGE_SIZE() == self.range.end - self.range.start
92        &&& forall|i: int|
93            #![trigger owner.perms[i]]
94            0 <= i < owner.perms.len() as int ==> owner.perms[i].addr() == meta_addr(
95                frame_to_index((self.range.start + i * PAGE_SIZE()) as usize),
96            )
97    }
98}
99
100impl<M: AnyFrameMeta + ?Sized> SegmentOwner<M> {
101    pub open spec fn is_disjoint_with_meta_region(&self, region: &MetaRegionOwners) -> bool {
102        forall|i: int|
103            #![trigger self.perms[i]]
104            0 <= i < self.perms.len() as int ==> {
105                &&& !region.dropped_slots.contains_key(frame_to_index_spec(self.perms[i].addr()))
106            }
107    }
108}
109
110// impl<M: AnyFrameMeta + ?Sized> Debug for Segment<M> {
111//     fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
112//         write!(f, "Segment({:#x}..{:#x})", self.range.start, self.range.end)
113//     }
114// }
115/// A contiguous range of homogeneous untyped physical memory frames that have any metadata.
116///
117/// In other words, the metadata of the frames are of the same type, and they
118/// are untyped, but the type of metadata is not known at compile time. An
119/// [`USegment`] as a parameter accepts any untyped segments.
120///
121/// The usage of this frame will not be changed while this object is alive.
122pub type USegment = Segment<dyn AnyUFrameMeta>;
123
124// impl<M: AnyFrameMeta + ?Sized> Drop for Segment<M> {
125//     fn drop(&mut self) {
126//         for paddr in self.range.clone().step_by(PAGE_SIZE()()) {
127//             // SAFETY: for each frame there would be a forgotten handle
128//             // when creating the `Segment` object.
129//             drop(unsafe { Frame::<M>::from_raw(paddr) });
130//         }
131//     }
132// }
133// impl<M: AnyFrameMeta + ?Sized> Clone for Segment<M> {
134//     fn clone(&self) -> (res: Self)
135//         ensures
136//             res == *self,
137//     {
138//         let mut i = 0;
139//         let addr_len = (self.range.end - self.range.start) / PAGE_SIZE();
140//         while i < addr_len
141//             decreases addr_len - i,
142//         {
143//             let paddr = self.range.start + i * PAGE_SIZE();
144//             // SAFETY: for each frame there would be a forgotten handle
145//             // when creating the `Segment` object, so we already have
146//             // reference counts for the frames.
147//             unsafe {
148//                 inc_frame_ref_count(paddr);
149//             }
150//         }
151//         Self { range: self.range.clone(), _marker: core::marker::PhantomData }
152//     }
153// }
154#[verus_verify]
155impl<M: AnyFrameMeta> Segment<M> {
156    #[rustc_allow_incoherent_impl]
157    #[verifier::inline]
158    pub open spec fn start_paddr_spec(&self) -> Paddr
159        recommends
160            self.inv(),
161    {
162        self.range.start
163    }
164
165    #[rustc_allow_incoherent_impl]
166    #[verifier::inline]
167    pub open spec fn end_paddr_spec(&self) -> Paddr
168        recommends
169            self.inv(),
170    {
171        self.range.end
172    }
173
174    #[rustc_allow_incoherent_impl]
175    #[verifier::inline]
176    pub open spec fn size_spec(&self) -> usize
177        recommends
178            self.inv(),
179    {
180        (self.range.end - self.range.start) as usize
181    }
182
183    #[rustc_allow_incoherent_impl]
184    #[verifier::inline]
185    pub open spec fn nrpage_spec(&self) -> usize
186        recommends
187            self.inv(),
188    {
189        self.size_spec() / PAGE_SIZE()
190    }
191
192    #[rustc_allow_incoherent_impl]
193    pub open spec fn split_spec(self, offset: usize) -> (Self, Self)
194        recommends
195            self.inv(),
196            offset % PAGE_SIZE() == 0,
197            0 < offset < self.size_spec(),
198    {
199        let at = (self.range.start + offset) as usize;
200        let idx = at / PAGE_SIZE();
201        (
202            Self { range: self.range.start..at, _marker: core::marker::PhantomData },
203            Self { range: at..self.range.end, _marker: core::marker::PhantomData },
204        )
205    }
206
207    /// Gets the start physical address of the contiguous frames.
208    #[rustc_allow_incoherent_impl]
209    #[inline(always)]
210    #[verifier::when_used_as_spec(start_paddr_spec)]
211    pub fn start_paddr(&self) -> (res: Paddr)
212        requires
213            self.inv(),
214        returns
215            self.start_paddr_spec(),
216    {
217        self.range.start
218    }
219
220    /// Gets the end physical address of the contiguous frames.
221    #[rustc_allow_incoherent_impl]
222    #[inline(always)]
223    #[verifier::when_used_as_spec(end_paddr_spec)]
224    pub fn end_paddr(&self) -> (res: Paddr)
225        requires
226            self.inv(),
227        returns
228            self.end_paddr_spec(),
229    {
230        self.range.end
231    }
232
233    /// Gets the length in bytes of the contiguous frames.
234    #[rustc_allow_incoherent_impl]
235    #[inline(always)]
236    #[verifier::when_used_as_spec(size_spec)]
237    pub fn size(&self) -> (res: usize)
238        requires
239            self.inv(),
240        returns
241            self.size_spec(),
242    {
243        self.range.end - self.range.start
244    }
245
246    #[rustc_allow_incoherent_impl]
247    pub open spec fn from_unused_requires(
248        regions: MetaRegionOwners,
249        range: Range<Paddr>,
250        metadata_fn: impl Fn(Paddr) -> (Paddr, M),
251    ) -> bool {
252        &&& regions.inv()
253        &&& range.start % PAGE_SIZE() == 0
254        &&& range.end % PAGE_SIZE() == 0
255        &&& range.start <= range.end < MAX_PADDR()
256        &&& forall|paddr_in: Paddr|
257            (range.start <= paddr_in < range.end && paddr_in % PAGE_SIZE() == 0) ==> {
258                &&& metadata_fn.requires((paddr_in,))
259            }
260        &&& forall|paddr_in: Paddr, paddr_out: Paddr, m: M|
261            metadata_fn.ensures((paddr_in,), (paddr_out, m)) ==> {
262                &&& paddr_out < MAX_PADDR()
263                &&& paddr_out % PAGE_SIZE() == 0
264                &&& paddr_in == paddr_out
265                &&& regions.slots.contains_key(frame_to_index(paddr_out))
266                &&& !regions.dropped_slots.contains_key(frame_to_index(paddr_out))
267                &&& regions.slot_owners[frame_to_index(paddr_out)].usage is Unused
268                &&& regions.slot_owners[frame_to_index(paddr_out)].in_list.points_to(0)
269            }
270    }
271
272    #[rustc_allow_incoherent_impl]
273    pub open spec fn from_unused_ensures(
274        old_regions: MetaRegionOwners,
275        new_regions: MetaRegionOwners,
276        owner: Option<SegmentOwner<M>>,
277        range: Range<Paddr>,
278        metadata_fn: impl Fn(Paddr) -> (Paddr, M),
279        r: Result<Self, GetFrameError>,
280    ) -> bool {
281        &&& r matches Ok(r) ==> {
282            &&& new_regions.inv()
283            &&& r.range.start == range.start
284            &&& r.range.end == range.end
285            &&& owner matches Some(owner) && {
286                &&& r.inv_with(&owner)
287                &&& forall|i: int|
288                    #![trigger owner.perms[i]]
289                    0 <= i < owner.perms.len() as int ==> {
290                        &&& owner.perms[i].addr() == meta_addr(
291                            frame_to_index_spec((range.start + i * PAGE_SIZE()) as usize),
292                        )
293                    }
294                &&& new_regions.paddr_range_not_in_region(range)
295            }
296        }
297    }
298
299    #[rustc_allow_incoherent_impl]
300    pub open spec fn split_requires(self, owner: SegmentOwner<M>, offset: usize) -> bool {
301        &&& self.inv_with(&owner)
302        &&& offset % PAGE_SIZE() == 0
303        &&& 0 < offset < self.size()
304    }
305
306    #[rustc_allow_incoherent_impl]
307    pub open spec fn split_ensures(
308        self,
309        offset: usize,
310        lhs: Self,
311        rhs: Self,
312        ori_owner: SegmentOwner<M>,
313        lhs_owner: SegmentOwner<M>,
314        rhs_owner: SegmentOwner<M>,
315    ) -> bool {
316        &&& lhs.inv_with(&lhs_owner)
317        &&& rhs.inv_with(&rhs_owner)
318        &&& (lhs, rhs) == self.split_spec(offset)
319        &&& ori_owner.perms =~= (lhs_owner.perms + rhs_owner.perms)
320    }
321
322    #[rustc_allow_incoherent_impl]
323    pub open spec fn into_raw_requires(
324        self,
325        regions: MetaRegionOwners,
326        owner: SegmentOwner<M>,
327    ) -> bool {
328        &&& self.inv_with(&owner)
329        &&& regions.inv()
330        &&& owner.inv()
331        &&& owner.is_disjoint_with_meta_region(&regions)
332    }
333
334    #[rustc_allow_incoherent_impl]
335    pub open spec fn into_raw_ensures(
336        self,
337        regions: MetaRegionOwners,
338        owner: SegmentOwner<M>,
339        r: Range<Paddr>,
340    ) -> bool {
341        &&& r == self.range
342        &&& regions.inv()
343        &&& regions.paddr_range_in_dropped_region(self.range)
344    }
345
346    #[rustc_allow_incoherent_impl]
347    pub open spec fn from_raw_requires(regions: MetaRegionOwners, range: Range<Paddr>) -> bool {
348        &&& regions.inv()
349        &&& regions.paddr_range_in_dropped_region(range)
350        &&& range.start % PAGE_SIZE() == 0
351        &&& range.end % PAGE_SIZE() == 0
352        &&& range.start < range.end < MAX_PADDR()
353    }
354
355    #[rustc_allow_incoherent_impl]
356    pub open spec fn from_raw_ensures(
357        self,
358        old_regions: MetaRegionOwners,
359        new_regions: MetaRegionOwners,
360        owner: SegmentOwner<M>,
361        range: Range<Paddr>,
362    ) -> bool {
363        &&& self.inv_with(&owner)
364        &&& self.range == range
365        &&& new_regions.inv()
366        &&& new_regions.paddr_range_not_in_region(range)
367    }
368
369    #[rustc_allow_incoherent_impl]
370    pub open spec fn slice_requires(self, owner: SegmentOwner<M>, range: Range<Paddr>) -> bool {
371        &&& self.inv_with(&owner)
372        &&& range.start % PAGE_SIZE() == 0
373        &&& range.end % PAGE_SIZE() == 0
374        &&& self.range.start + range.start <= self.range.start + range.end <= self.range.end
375    }
376
377    #[rustc_allow_incoherent_impl]
378    pub open spec fn slice_ensures(
379        self,
380        owner: SegmentOwner<M>,
381        range: Range<Paddr>,
382        res: Self,
383    ) -> bool {
384        &&& res.inv_with(
385            &SegmentOwner {
386                perms: owner.perms.subrange(
387                    (range.start / PAGE_SIZE()) as int,
388                    (range.end / PAGE_SIZE()) as int,
389                ),
390            },
391        )
392    }
393
394    #[rustc_allow_incoherent_impl]
395    pub open spec fn next_requires(self, regions: MetaRegionOwners) -> bool {
396        &&& self.inv()
397        &&& regions.inv()
398        &&& regions.dropped_slots.contains_key(frame_to_index(self.range.start))
399        &&& !regions.slots.contains_key(frame_to_index(self.range.start))
400    }
401
402    #[rustc_allow_incoherent_impl]
403    pub open spec fn next_ensures(
404        old_self: Self,
405        new_self: Self,
406        old_regions: MetaRegionOwners,
407        new_regions: MetaRegionOwners,
408        res: Option<Frame<M>>,
409    ) -> bool {
410        &&& new_regions.inv()
411        &&& new_self.inv()
412        &&& match res {
413            None => { &&& new_self.range.start == old_self.range.end },
414            Some(f) => {
415                &&& new_self.range.start == old_self.range.start + PAGE_SIZE()
416                &&& f.paddr() == old_self.range.start
417                &&& forall|i: usize|
418                    #![trigger new_regions.dropped_slots[i], old_regions.dropped_slots[i]]
419                    {
420                        &&& i != frame_to_index(f.paddr()) ==> old_regions.dropped_slots[i]
421                            == new_regions.dropped_slots[i]
422                        &&& i == frame_to_index(f.paddr())
423                            ==> !new_regions.dropped_slots.contains_key(i)
424                    }
425            },
426        }
427    }
428
429    /// Creates a new [`Segment`] from unused frames.
430    ///
431    /// The caller must provide a closure to initialize metadata for all the frames.
432    /// The closure receives the physical address of the frame and returns the
433    /// metadata, which is similar to [`core::array::from_fn`].
434    ///
435    /// It returns an error if:
436    ///  - any of the frames cannot be created with a specific reason.
437    ///
438    #[rustc_allow_incoherent_impl]
439    #[verus_spec(r =>
440        with
441            Tracked(regions): Tracked<&mut MetaRegionOwners>,
442                -> owner: Tracked<Option<SegmentOwner<M>>>,
443        requires
444            Self::from_unused_requires(*old(regions), range, metadata_fn),
445        ensures
446            Self::from_unused_ensures(*old(regions), *regions, owner@, range, metadata_fn, r),
447    )]
448    pub fn from_unused(range: Range<Paddr>, metadata_fn: impl Fn(Paddr) -> (Paddr, M)) -> (res:
449        Result<Self, GetFrameError>) {
450        proof_decl! {
451            let tracked mut owner: Option<SegmentOwner<M>> = None;
452            let tracked mut addrs = Seq::<usize>::tracked_empty();
453        }
454        // Construct a segment early to recycle previously forgotten frames if
455        // the subsequent operations fails in the middle.
456        let mut segment = Self {
457            range: range.start..range.start,
458            _marker: core::marker::PhantomData,
459        };
460
461        let mut i = 0;
462        let addr_len = (range.end - range.start) / PAGE_SIZE();
463
464        #[verus_spec(
465            invariant
466                i <= addr_len,
467                i as int == addrs.len(),
468                range.start % PAGE_SIZE() == 0,
469                range.end % PAGE_SIZE() == 0,
470                range.start <= range.start + i * PAGE_SIZE() <= range.end,
471                range.end == range.start + addr_len * PAGE_SIZE(),
472                addr_len == (range.end - range.start) / PAGE_SIZE() as int,
473                i <= addr_len,
474                forall|paddr_in: Paddr|
475                    (range.start <= paddr_in < range.end && paddr_in % PAGE_SIZE() == 0) ==> {
476                        &&& metadata_fn.requires((paddr_in,))
477                    },
478                forall|paddr_in: Paddr, paddr_out: Paddr, m: M|
479                    metadata_fn.ensures((paddr_in,), (paddr_out, m)) ==> {
480                        &&& paddr_out < MAX_PADDR()
481                        &&& paddr_out % PAGE_SIZE() == 0
482                        &&& paddr_in == paddr_out
483                        &&& regions.slots.contains_key(frame_to_index(paddr_out))
484                        &&& !regions.dropped_slots.contains_key(frame_to_index(paddr_out))
485                        &&& regions.slot_owners[frame_to_index(paddr_out)].usage is Unused
486                        &&& regions.slot_owners[frame_to_index(paddr_out)].in_list.points_to(0)
487                    },
488                forall|j: int|
489                    0 <= j < addrs.len() as int ==> {
490                        &&& regions.slots.contains_key(frame_to_index_spec(addrs[j]))
491                        &&& !regions.dropped_slots.contains_key(frame_to_index_spec(addrs[j]))
492                        &&& addrs[j] % PAGE_SIZE() == 0
493                        &&& addrs[j] == range.start + (j as u64) * PAGE_SIZE()
494                    },
495                forall|paddr: Paddr| #[trigger]
496                    old(regions).slots.contains_key(frame_to_index(paddr))
497                        ==> regions.slots.contains_key(frame_to_index(paddr)),
498                regions.inv(),
499                segment.range.start == range.start,
500                segment.range.end == range.start + i * PAGE_SIZE(),
501            decreases addr_len - i,
502        )]
503        while i < addr_len {
504            let paddr = range.start + i * PAGE_SIZE();
505            let (paddr, meta) = metadata_fn(paddr);
506
507            let frame = match #[verus_spec(with Tracked(regions))]
508            Frame::<M>::from_unused(paddr, meta) {
509                Ok(f) => f,
510                Err(e) => {
511                    return {
512                        proof_with!(|= Tracked(owner));
513                        Err(e)
514                    };
515                },
516            };
517
518            proof {
519                assert(forall|paddr_in: Paddr, paddr_out: Paddr, m: M|
520                    metadata_fn.ensures((paddr_in,), (paddr_out, m)) ==> {
521                        &&& regions.slot_owners[frame_to_index(paddr_out)].usage is Unused
522                        &&& regions.slot_owners[frame_to_index(paddr_out)].in_list.points_to(0)
523                    }) by {
524                    admit();
525                }
526                assert(frame.constructor_requires(*old(regions))) by { admit() };
527            }
528
529            let _ = NeverDrop::new(frame, Tracked(regions));
530            segment.range.end = paddr + PAGE_SIZE();
531            proof {
532                addrs.tracked_push(paddr);
533                admit();
534            }
535
536            i += 1;
537        }
538
539        proof {
540            broadcast use vstd_extra::map_extra::lemma_map_remove_keys_finite;
541            broadcast use vstd_extra::seq_extra::lemma_seq_to_set_map_contains;
542            broadcast use vstd::map::group_map_axioms;
543            broadcast use vstd::map_lib::group_map_extra;
544
545            let tracked owner_seq = seq_tracked_map_values(
546                addrs,
547                |addr: usize|
548                    {
549                        let perm = regions.slots[frame_to_index(addr)];
550                        MetaPerm {
551                            addr: meta_addr(frame_to_index(addr)),
552                            points_to: perm,
553                            _T: core::marker::PhantomData,
554                        }
555                    },
556            );
557            owner = Some(SegmentOwner { perms: owner_seq });
558
559            let index = addrs.map_values(|addr: Paddr| frame_to_index(addr)).to_set();
560            regions.slots.tracked_remove_keys(index);
561
562            assert forall|addr: usize|
563                #![trigger frame_to_index_spec(addr)]
564                range.start <= addr < range.end && addr % PAGE_SIZE() == 0 implies {
565                &&& !regions.slots.contains_key(frame_to_index_spec(addr))
566                &&& !regions.dropped_slots.contains_key(frame_to_index_spec(addr))
567            } by {
568                // proof by contradiction
569                assert(addrs.contains(addr)) by {
570                    if !addrs.contains(addr) {
571                        let j = (addr - range.start) / PAGE_SIZE() as int;
572                        assert(0 <= j < addrs.len() as usize) by {
573                            assert(addr >= range.start);
574                            assert(addr < range.end);
575                            assert(addr % PAGE_SIZE() == 0);
576                        };
577                        assert(addrs[j as int] == addr);
578                    }
579                }
580            }
581        }
582
583        proof_with!(|= Tracked(owner));
584        Ok(segment)
585    }
586
587    /// Splits the frames into two at the given byte offset from the start.
588    ///
589    /// The resulting frames cannot be empty. So the offset cannot be neither
590    /// zero nor the length of the frames.
591    ///
592    /// # Panics
593    ///
594    /// The function panics if the offset is out of bounds, at either ends, or
595    /// not base-page-aligned.
596    #[rustc_allow_incoherent_impl]
597    #[verus_spec(r =>
598        with
599            Tracked(owner): Tracked<SegmentOwner<M>>,
600            Tracked(regions): Tracked<&mut MetaRegionOwners>,
601                -> frame_perms: (Tracked<SegmentOwner<M>>, Tracked<SegmentOwner<M>>),
602        requires
603            Self::split_requires(self, owner, offset),
604        ensures
605            Self::split_ensures(self, offset, r.0, r.1, owner, frame_perms.0@, frame_perms.1@),
606    )]
607    pub fn split(self, offset: usize) -> (Self, Self) {
608        let at = self.range.start + offset;
609        let idx = offset / PAGE_SIZE();
610        let res = (
611            Self { range: self.range.start..at, _marker: core::marker::PhantomData },
612            Self { range: at..self.range.end, _marker: core::marker::PhantomData },
613        );
614
615        let _ = NeverDrop::new(self, Tracked(regions));
616
617        let tracked frame_perms1 = SegmentOwner {
618            perms: seq_tracked_subrange(owner.perms, 0, idx as int),
619        };
620        let tracked frame_perms2 = SegmentOwner {
621            perms: seq_tracked_subrange(owner.perms, idx as int, owner.perms.len() as int),
622        };
623
624        proof {
625            owner.perms.lemma_split_at(idx as int);
626        }
627
628        proof_with!(|= (Tracked(frame_perms1), Tracked(frame_perms2)));
629        res
630    }
631
632    /// Forgets the [`Segment`] and gets a raw range of physical addresses.
633    // NOTE: forgotten frames will be released from `owner`'s permission and
634    //       later added back to `regions.dropped_slots`, and `owner` is moved.
635    #[rustc_allow_incoherent_impl]
636    #[verus_spec(r =>
637        with
638            Tracked(regions): Tracked<&mut MetaRegionOwners>,
639            Tracked(owner): Tracked<SegmentOwner<M>>,
640        requires
641            Self::into_raw_requires(self, *old(regions), owner),
642        ensures
643            Self::into_raw_ensures(self, *regions, owner, r),
644    )]
645    pub(crate) fn into_raw(self) -> Range<Paddr> {
646        proof {
647            broadcast use vstd_extra::seq_extra::lemma_seq_to_set_map_contains;
648            broadcast use crate::mm::frame::meta::mapping::lemma_paddr_to_meta_biinjective;
649            // Adjust the permission by transferring all
650            // PointsTo perms from `owner` into `regions.dropped_slots`.
651
652            let new_keys = owner.perms.map_values(|v: MetaPerm<M>| meta_to_frame_spec(v.addr()));
653            let new_dropped_slots = Map::new(
654                |i: usize| new_keys.contains(i),
655                |k: usize| { owner.perms[k as int].points_to },
656            );
657
658            regions.dropped_slots.tracked_union_prefer_right(new_dropped_slots);
659
660            assert forall|paddr: Paddr|
661                self.range.start <= paddr < self.range.end && paddr % PAGE_SIZE() == 0 implies {
662                &&& regions.dropped_slots.contains_key(#[trigger] frame_to_index_spec(paddr))
663            } by {
664                assert(new_keys.to_set().contains(frame_to_index_spec(paddr))) by {
665                    let j = (paddr - self.range.start) / PAGE_SIZE() as int;
666                    assert(0 <= j < owner.perms.len() as int);
667                    assert(owner.perms[j].addr() == meta_addr(frame_to_index_spec(paddr)));
668                }
669            }
670        }
671
672        let range = self.range.clone();
673        let _ = NeverDrop::new(self, Tracked(regions));
674        range
675    }
676
677    /// Restores the [`Segment`] from the raw physical address range.
678    ///
679    /// # Safety
680    ///
681    /// The range must be a forgotten [`Segment`] that matches the type `M`.
682    /// It could be manually forgotten by [`core::mem::forget`],
683    /// [`ManuallyDrop`], or [`Self::into_raw`].
684    #[rustc_allow_incoherent_impl]
685    #[verus_spec(r =>
686        with
687            Tracked(regions): Tracked<&mut MetaRegionOwners>,
688                -> frame_perms: Tracked<SegmentOwner<M>>,
689        requires
690            Self::from_raw_requires(*old(regions), range),
691        ensures
692            Self::from_raw_ensures(r, *old(regions), *regions, frame_perms@, range),
693    )]
694    pub(crate) unsafe fn from_raw(range: Range<Paddr>) -> Self {
695        proof_decl! {
696            let tracked mut frame_perms: SegmentOwner<M>;
697        }
698        // We create new permissions from stealing the `PointsTo` perms
699        // from `regions.dropped_slots` so we must ensure that all frames
700        // cannot be restored again.
701        proof {
702            broadcast use vstd::arithmetic::div_mod::group_div_basics;
703
704            let len = (range.end - range.start) / PAGE_SIZE() as int;
705            let tracked owner_seq = seq_tracked_new(
706                len as nat,
707                |i: int|
708                    {
709                        let paddr = (range.start + (i as u64) * PAGE_SIZE() as int) as Paddr;
710                        MetaPerm {
711                            addr: meta_addr(frame_to_index(paddr)),
712                            points_to: regions.dropped_slots[frame_to_index(paddr)],
713                            _T: core::marker::PhantomData,
714                        }
715                    },
716            );
717            frame_perms = SegmentOwner { perms: owner_seq };
718
719            // Then remove the frames from `regions.dropped_slots`.
720            let keys = Set::new(
721                |paddr: Paddr| { range.start <= paddr < range.end && paddr % PAGE_SIZE() == 0 },
722            );
723            let keys_to_remove = keys.map(|paddr: Paddr| frame_to_index(paddr));
724
725            regions.dropped_slots.tracked_remove_keys(keys_to_remove);
726
727            assert forall|paddr: Paddr|
728                #![trigger frame_to_index(paddr)]
729                range.start <= paddr < range.end && paddr % PAGE_SIZE() == 0 implies {
730                &&& !regions.dropped_slots.contains_key(#[trigger] frame_to_index(paddr))
731            } by {
732                assert(keys.contains(paddr));
733
734                if regions.dropped_slots.contains_key(frame_to_index(paddr)) {
735                    assert(!keys_to_remove.contains(frame_to_index(paddr)));
736                }
737            }
738        }
739
740        proof_with!(|= Tracked(frame_perms));
741        Self { range, _marker: core::marker::PhantomData }
742    }
743
744    /// Gets an extra handle to the frames in the byte offset range.
745    ///
746    /// The sliced byte offset range in indexed by the offset from the start of
747    /// the contiguous frames. The resulting frames holds extra reference counts.
748    ///
749    /// # Panics
750    ///
751    /// The function panics if the byte offset range is out of bounds, or if
752    /// any of the ends of the byte offset range is not base-page aligned.
753    #[rustc_allow_incoherent_impl]
754    #[verus_spec(r =>
755        with
756            Tracked(owner): Tracked<&SegmentOwner<M>>,
757        requires
758            Self::slice_requires(*self, *owner, *range),
759        ensures
760            Self::slice_ensures(*self, *owner, *range, r),
761    )]
762    pub fn slice(&self, range: &Range<Paddr>) -> Self {
763        let start = self.range.start + range.start;
764        let end = self.range.start + range.end;
765
766        let mut i = 0;
767        let addr_len = (end - start) / PAGE_SIZE();
768        while i < addr_len
769            invariant
770                start % PAGE_SIZE() == 0,
771                end % PAGE_SIZE() == 0,
772                start + i * PAGE_SIZE() <= end,
773                i <= addr_len,
774                addr_len == (end - start) / PAGE_SIZE() as int,
775            decreases addr_len - i,
776        {
777            let paddr = start + i * PAGE_SIZE();
778            // SAFETY: We already have reference counts for the frames since
779            // for each frame there would be a forgotten handle when creating
780            // the `Segment` object.
781            // unsafe { inc_frame_ref_count(paddr); }
782            i += 1;
783        }
784
785        Self { range: start..end, _marker: core::marker::PhantomData }
786    }
787
788    /// Gets the next frame in the segment (raw), if any.
789    ///
790    /// Since the segments here must be "non-active" where
791    /// there is no extra Verus-tracked permission [`SegmentOwner`]
792    /// associated with it; [`Segment`] becomes a kind of "zombie"
793    /// container through which we can only iterate the frames and
794    /// get the frame out of the `regions` instead.
795    #[rustc_allow_incoherent_impl]
796    #[verus_spec(res =>
797        with
798            Tracked(regions): Tracked<&mut MetaRegionOwners>,
799            Tracked(owner): Tracked<&mut SegmentOwner<M>>
800        requires
801            Self::next_requires(*old(self), *old(regions)),
802            old(owner).perms.len() > 0
803        ensures
804            Self::next_ensures(*old(self), *self, *old(regions), *regions, res),
805    )]
806    pub fn next(&mut self) -> Option<Frame<M>> {
807        if self.range.start < self.range.end {
808            let tracked perm = owner.perms.tracked_remove(0);
809            // SAFETY: each frame in the range would be a handle forgotten
810            // when creating the `Segment` object.
811            let frame = unsafe {
812                #[verus_spec(with Tracked(regions), Tracked(&perm))]
813                Frame::<M>::from_raw(self.range.start)
814            };
815
816            self.range.start = self.range.start + PAGE_SIZE();
817            Some(frame)
818        } else {
819            None
820        }
821    }
822}
823
824// impl<M: AnyFrameMeta + ?Sized> From<Frame<M>> for Segment<M> {
825//     fn from(frame: Frame<M>) -> Self {
826//         let pa = frame.start_paddr();
827//         let _ = ManuallyDrop::new(frame);
828//         Self { range: pa..pa + PAGE_SIZE(), _marker: core::marker::PhantomData }
829//     }
830// }
831/*
832Don't worry about `dyn` stuff for now
833impl<M: AnyFrameMeta> From<Segment<M>> for Segment<dyn AnyFrameMeta> {
834    fn from(seg: Segment<M>) -> Self {
835        let seg = ManuallyDrop::new(seg);
836        Self {
837            range: seg.range.clone(),
838            _marker: core::marker::PhantomData,
839        }
840    }
841}
842
843
844impl<M: AnyFrameMeta> TryFrom<Segment<dyn AnyFrameMeta>> for Segment<M> {
845    type Error = Segment<dyn AnyFrameMeta>;
846
847    fn try_from(seg: Segment<dyn AnyFrameMeta>) -> core::result::Result<Self, Self::Error> {
848        // SAFETY: for each page there would be a forgotten handle
849        // when creating the `Segment` object.
850        let first_frame = unsafe { Frame::<dyn AnyFrameMeta>::from_raw(seg.range.start) };
851        let first_frame = ManuallyDrop::new(first_frame);
852        if !(first_frame.dyn_meta() as &dyn core::any::Any).is::<M>() {
853            return Err(seg);
854        }
855        // Since segments are homogeneous, we can safely assume that the rest
856        // of the frames are of the same type. We just debug-check here.
857        #[cfg(debug_assertions)]
858        {
859            for paddr in seg.range.clone().step_by(PAGE_SIZE()) {
860                let frame = unsafe { Frame::<dyn AnyFrameMeta>::from_raw(paddr) };
861                let frame = ManuallyDrop::new(frame);
862                debug_assert!((frame.dyn_meta() as &dyn core::any::Any).is::<M>());
863            }
864        }
865        // SAFETY: The metadata is coerceable and the struct is transmutable.
866        Ok(unsafe { core::mem::transmute::<Segment<dyn AnyFrameMeta>, Segment<M>>(seg) })
867    }
868}
869
870impl<M: AnyUFrameMeta> From<Segment<M>> for USegment {
871    fn from(seg: Segment<M>) -> Self {
872        // SAFETY: The metadata is coerceable and the struct is transmutable.
873        unsafe { core::mem::transmute(seg) }
874    }
875}
876
877impl TryFrom<Segment<dyn AnyFrameMeta>> for USegment {
878    type Error = Segment<dyn AnyFrameMeta>;
879
880    /// Try converting a [`Segment<dyn AnyFrameMeta>`] into [`USegment`].
881    ///
882    /// If the usage of the page is not the same as the expected usage, it will
883    /// return the dynamic page itself as is.
884    fn try_from(seg: Segment<dyn AnyFrameMeta>) -> core::result::Result<Self, Self::Error> {
885        // SAFETY: for each page there would be a forgotten handle
886        // when creating the `Segment` object.
887        let first_frame = unsafe { Frame::<dyn AnyFrameMeta>::from_raw(seg.range.start) };
888        let first_frame = ManuallyDrop::new(first_frame);
889        if !first_frame.dyn_meta().is_untyped() {
890            return Err(seg);
891        }
892        // Since segments are homogeneous, we can safely assume that the rest
893        // of the frames are of the same type. We just debug-check here.
894        #[cfg(debug_assertions)]
895        {
896            for paddr in seg.range.clone().step_by(PAGE_SIZE()) {
897                let frame = unsafe { Frame::<dyn AnyFrameMeta>::from_raw(paddr) };
898                let frame = ManuallyDrop::new(frame);
899                debug_assert!(frame.dyn_meta().is_untyped());
900            }
901        }
902        // SAFETY: The metadata is coerceable and the struct is transmutable.
903        Ok(unsafe { core::mem::transmute::<Segment<dyn AnyFrameMeta>, USegment>(seg) })
904    }
905}
906*/
907} // verus!