pub struct VirtPtr {
pub vaddr: Vaddr,
pub range: Ghost<Range<Vaddr>>,
}Expand description
This module implements a model of virtual memory that can be used to reason about the behavior of code that interacts with virtual memory. It interfaces with the [vm_space] module for mapping and unmapping frames. Concrete representation of a pointer
Fields§
§vaddr: Vaddr§range: Ghost<Range<Vaddr>>Implementations§
Source§impl VirtPtr
impl VirtPtr
Sourcepub open spec fn new_spec(vaddr: Vaddr, len: usize) -> Self
pub open spec fn new_spec(vaddr: Vaddr, len: usize) -> Self
{
Self {
vaddr,
range: Ghost(Range {
start: vaddr,
end: (vaddr + len) as usize,
}),
}
}Sourcepub open spec fn is_defined(self) -> bool
pub open spec fn is_defined(self) -> bool
{
&&& self.vaddr != 0
&&& self.range@.start <= self.vaddr <= self.range@.end
}Sourcepub open spec fn is_valid(self) -> bool
pub open spec fn is_valid(self) -> bool
{
&&& self.is_defined()
&&& self.vaddr < self.range@.end
}Sourcepub exec fn read(self, Tracked(mem): Tracked<&MemView>) -> u8
pub exec fn read(self, Tracked(mem): Tracked<&MemView>) -> u8
requires
mem.addr_transl(self.vaddr) is Some,mem
.memory[mem.addr_transl(self.vaddr).unwrap().0]
.contents[mem.addr_transl(self.vaddr).unwrap().1 as int] is Init,self.is_valid(),Sourcepub exec fn write(self, Tracked(mem): Tracked<&mut MemView>, x: u8)
pub exec fn write(self, Tracked(mem): Tracked<&mut MemView>, x: u8)
requires
old(mem).addr_transl(self.vaddr) is Some,self.is_valid(),ensures*mem == old(mem).write(self.vaddr, x),Sourcepub open spec fn add_spec(self, n: usize) -> Self
pub open spec fn add_spec(self, n: usize) -> Self
{
VirtPtr {
vaddr: (self.vaddr + n) as usize,
range: self.range,
}
}Sourcepub exec fn add(&mut self, n: usize)
pub exec fn add(&mut self, n: usize)
requires
0 <= old(self).vaddr + n < usize::MAX,ensures*self == old(self).add_spec(n),Sourcepub open spec fn read_offset_spec(self, mem: MemView, n: usize) -> u8
pub open spec fn read_offset_spec(self, mem: MemView, n: usize) -> u8
{ mem.read((self.vaddr + n) as usize).value() }Sourcepub exec fn read_offset(&self, Tracked(mem): Tracked<&MemView>, n: usize) -> u8
pub exec fn read_offset(&self, Tracked(mem): Tracked<&MemView>, n: usize) -> u8
requires
0 < self.vaddr + n < usize::MAX,self.range@.start <= self.vaddr + n < self.range@.end,mem.addr_transl((self.vaddr + n) as usize) is Some,mem
.memory[mem.addr_transl((self.vaddr + n) as usize).unwrap().0]
.contents[mem.addr_transl((self.vaddr + n) as usize).unwrap().1 as int] is Init,Unlike add, we just create a temporary pointer value and read that
When self.vaddr == self.range.start this acts like array index notation
Sourcepub open spec fn write_offset_spec(self, mem: MemView, n: usize, x: u8) -> MemView
pub open spec fn write_offset_spec(self, mem: MemView, n: usize, x: u8) -> MemView
{ mem.write((self.vaddr + n) as usize, x) }Sourcepub exec fn write_offset(
&self,
verus_tmp_mem: Tracked<&mut MemView>,
n: usize,
x: u8,
)
pub exec fn write_offset( &self, verus_tmp_mem: Tracked<&mut MemView>, n: usize, x: u8, )
requires
self.inv(),self.range@.start <= self.vaddr + n < self.range@.end,old(mem).addr_transl((self.vaddr + n) as usize) is Some,Sourcepub open spec fn copy_offset_spec(src: Self, dst: Self, mem: MemView, n: usize) -> MemView
pub open spec fn copy_offset_spec(src: Self, dst: Self, mem: MemView, n: usize) -> MemView
{
let x = src.read_offset_spec(mem, n);
dst.write_offset_spec(mem, n, x)
}Sourcepub exec fn copy_offset(
src: &Self,
dst: &Self,
verus_tmp_mem: Tracked<&mut MemView>,
n: usize,
)
pub exec fn copy_offset( src: &Self, dst: &Self, verus_tmp_mem: Tracked<&mut MemView>, n: usize, )
requires
src.inv(),dst.inv(),src.range@.start <= src.vaddr + n < src.range@.end,dst.range@.start <= dst.vaddr + n < dst.range@.end,old(mem).addr_transl((src.vaddr + n) as usize) is Some,old(mem).addr_transl((dst.vaddr + n) as usize) is Some,old(mem).memory.contains_key(old(mem).addr_transl((src.vaddr + n) as usize).unwrap().0),old(mem)
.memory[old(mem).addr_transl((src.vaddr + n) as usize).unwrap().0]
.contents[old(mem).addr_transl((src.vaddr + n) as usize).unwrap().1 as int] is Init,ensures*mem == Self::copy_offset_spec(*src, *dst, *old(mem), n),Sourcepub open spec fn memcpy_spec(src: Self, dst: Self, mem: MemView, n: usize) -> MemView
pub open spec fn memcpy_spec(src: Self, dst: Self, mem: MemView, n: usize) -> MemView
{
if n == 0 {
mem
} else {
let mem = Self::copy_offset_spec(src, dst, mem, (n - 1) as usize);
Self::memcpy_spec(src, dst, mem, (n - 1) as usize)
}
}Sourcepub exec fn copy_nonoverlapping(
src: &Self,
dst: &Self,
verus_tmp_mem: Tracked<&mut MemView>,
n: usize,
)
pub exec fn copy_nonoverlapping( src: &Self, dst: &Self, verus_tmp_mem: Tracked<&mut MemView>, n: usize, )
requires
src.inv(),dst.inv(),src.range@.start <= src.vaddr,src.vaddr + n <= src.range@.end,dst.range@.start <= dst.vaddr,dst.vaddr + n < dst.range@.end,src.range@.end <= dst.range@.start || dst.range@.end <= src.range@.start,forall |i: usize| {
src.vaddr <= i < src.vaddr + n
==> {
&&& #[trigger] old(mem).addr_transl(i) is Some
&&& old(mem).memory.contains_key(old(mem).addr_transl(i).unwrap().0)
&&& old(mem)
.memory[old(mem).addr_transl(i).unwrap().0]
.contents[old(mem).addr_transl(i).unwrap().1 as int] is Init
}
},forall |i: usize| {
dst.vaddr <= i < dst.vaddr + n
==> {
&&& old(mem).addr_transl(i) is Some
}
},ensures*mem == Self::memcpy_spec(*src, *dst, *old(mem), n),Copies n bytes from src to dst in the given memory view.
The source and destination must not overlap.
copy_nonoverlapping is semantically equivalent to C’s memcpy,
but with the source and destination arguments swapped.
Sourcepub exec fn from_vaddr(vaddr: usize, len: usize) -> r : Self
pub exec fn from_vaddr(vaddr: usize, len: usize) -> r : Self
requires
vaddr != 0,0 < len <= usize::MAX - vaddr,ensuresr.is_valid(),r.range@.start == vaddr,r.range@.end == (vaddr + len) as usize,Sourcepub exec fn split(self, n: usize) -> r : (Self, Self)
pub exec fn split(self, n: usize) -> r : (Self, Self)
requires
self.is_valid(),0 <= n <= self.range@.end - self.range@.start,self.vaddr == self.range@.start,ensuresr.0.range@.start == self.range@.start,r.0.range@.end == self.range@.start + n,r.0.vaddr == self.range@.start,r.1.range@.start == self.range@.start + n,r.1.range@.end == self.range@.end,r.1.vaddr == self.range@.start + n,Executable helper to split the VirtPtr struct This updates the ghost ranges to match a MemView::split operation
Trait Implementations§
Auto Trait Implementations§
impl Freeze for VirtPtr
impl RefUnwindSafe for VirtPtr
impl Send for VirtPtr
impl Sync for VirtPtr
impl Unpin for VirtPtr
impl UnwindSafe for VirtPtr
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Mutably borrows from an owned value. Read more