pub struct AbstractVaddr {
pub offset: int,
pub index: Map<int, int>,
}Expand description
An abstract representation of a virtual address as a sequence of indices, representing the
values of the bit-fields that index into each level of the page table.
offset is the lowest 12 bits (the offset into a 4096 byte page)
index[0] is the next 9 bits (index into the 512 entries of the first level page table)
and so on up to index[NR_LEVELS-1].
Fields§
§offset: int§index: Map<int, int>Implementations§
Source§impl AbstractVaddr
impl AbstractVaddr
Sourcepub open spec fn from_vaddr(va: Vaddr) -> Self
pub open spec fn from_vaddr(va: Vaddr) -> Self
{
AbstractVaddr {
offset: (va % PAGE_SIZE) as int,
index: Map::new(
|i: int| 0 <= i < NR_LEVELS,
|i: int| ((va / pow2((12 + 9 * i) as nat) as usize) % NR_ENTRIES) as int,
),
}
}Extract the AbstractVaddr components from a concrete virtual address.
- offset = lowest 12 bits
- index[i] = bits (12 + 9i) to (12 + 9(i+1) - 1) for each level
Sourcepub proof fn from_vaddr_wf(va: Vaddr)
pub proof fn from_vaddr_wf(va: Vaddr)
AbstractVaddr::from_vaddr(va).inv(),Sourcepub open spec fn to_vaddr(self) -> Vaddr
pub open spec fn to_vaddr(self) -> Vaddr
{ (self.offset + self.to_vaddr_indices(0)) as Vaddr }Reconstruct the concrete virtual address from the AbstractVaddr components. va = offset + sum(index[i] * 2^(12 + 9*i)) for i in 0..NR_LEVELS
Sourcepub open spec fn to_vaddr_indices(self, start: int) -> int
pub open spec fn to_vaddr_indices(self, start: int) -> int
{
if start >= NR_LEVELS {
0
} else {
self.index[start] * pow2((12 + 9 * start) as nat) as int
+ self.to_vaddr_indices(start + 1)
}
}Helper: sum of index[i] * 2^(12 + 9*i) for i in start..NR_LEVELS
Sourcepub open spec fn reflect(self, va: Vaddr) -> bool
pub open spec fn reflect(self, va: Vaddr) -> bool
{ self == Self::from_vaddr(va) }reflect(self, va) holds when self is the abstract representation of va.
Sourcepub broadcast proof fn reflect_prop(self, va: Vaddr)
pub broadcast proof fn reflect_prop(self, va: Vaddr)
self.inv(),self.reflect(va),ensures#[trigger] self.to_vaddr() == va,#[trigger] Self::from_vaddr(va) == self,If self reflects va, then self.to_vaddr() == va and self == from_vaddr(va). The first ensures requires proving the round-trip property: from_vaddr(va).to_vaddr() == va.
Sourcepub proof fn from_vaddr_to_vaddr_roundtrip(va: Vaddr)
pub proof fn from_vaddr_to_vaddr_roundtrip(va: Vaddr)
Self::from_vaddr(va).to_vaddr() == va,Round-trip property: extracting and reconstructing a VA gives back the original.
Sourcepub broadcast proof fn reflect_from_vaddr(va: Vaddr)
pub broadcast proof fn reflect_from_vaddr(va: Vaddr)
#[trigger] Self::from_vaddr(va).reflect(va),#[trigger] Self::from_vaddr(va).inv(),from_vaddr(va) reflects va (by definition of reflect).
Sourcepub broadcast proof fn reflect_to_vaddr(self)
pub broadcast proof fn reflect_to_vaddr(self)
self.inv(),ensures#[trigger] self.reflect(self.to_vaddr()),If self.inv(), then self reflects self.to_vaddr().
Sourcepub proof fn to_vaddr_from_vaddr_roundtrip(abs: Self)
pub proof fn to_vaddr_from_vaddr_roundtrip(abs: Self)
abs.inv(),ensuresSelf::from_vaddr(abs.to_vaddr()) == abs,Inverse round-trip: to_vaddr then from_vaddr gives back the original AbstractVaddr.
Sourcepub broadcast proof fn reflect_eq(self, other: Self, va: Vaddr)
pub broadcast proof fn reflect_eq(self, other: Self, va: Vaddr)
self.reflect(va),other.reflect(va),ensures(self == other),If two AbstractVaddrs reflect the same va, they are equal.
Sourcepub open spec fn align_down(self, level: int) -> Self
pub open spec fn align_down(self, level: int) -> Self
{
if level == 1 {
AbstractVaddr {
offset: 0,
index: self.index,
}
} else {
let tmp = self.align_down(level - 1);
AbstractVaddr {
index: tmp.index.insert(level - 2, 0),
..tmp
}
}
}Sourcepub proof fn align_down_inv(self, level: int)
pub proof fn align_down_inv(self, level: int)
1 <= level < NR_LEVELS,self.inv(),ensuresself.align_down(level).inv(),forall |i: int| {
level <= i < NR_LEVELS
==> #[trigger] self.index[i - 1] == self.align_down(level).index[i - 1]
},Sourcepub proof fn align_down_to_vaddr_eq_if_upper_indices_eq(self, other: Self, level: int)
pub proof fn align_down_to_vaddr_eq_if_upper_indices_eq(self, other: Self, level: int)
1 <= level <= NR_LEVELS,self.inv(),other.inv(),forall |i: int| level - 1 <= i < NR_LEVELS ==> self.index[i] == other.index[i],ensuresself.align_down(level).to_vaddr() == other.align_down(level).to_vaddr(),If two AbstractVaddrs share the same indices at levels >= level-1 (i.e., index[level-1] and above),
then aligning them down to level gives the same to_vaddr() result.
This is because align_down(level) zeroes offset and indices 0 through level-2,
so only indices level-1 and above affect the to_vaddr() result.
Sourcepub proof fn align_down_concrete(self, level: int)
pub proof fn align_down_concrete(self, level: int)
1 <= level <= NR_LEVELS,ensuresself
.align_down(level)
.reflect(
nat_align_down(self.to_vaddr() as nat, page_size(level as PagingLevel) as nat)
as Vaddr,
),Sourcepub open spec fn align_up(self, level: int) -> Self
pub open spec fn align_up(self, level: int) -> Self
{
let lower_aligned = self.align_down(level - 1);
lower_aligned.next_index(level)
}Sourcepub proof fn align_up_concrete(self, level: int)
pub proof fn align_up_concrete(self, level: int)
1 <= level <= NR_LEVELS,ensuresself
.align_up(level)
.reflect(
nat_align_up(self.to_vaddr() as nat, page_size(level as PagingLevel) as nat)
as Vaddr,
),Sourcepub proof fn align_diff(self, level: int)
pub proof fn align_diff(self, level: int)
1 <= level <= NR_LEVELS,ensuresnat_align_up(self.to_vaddr() as nat, page_size(level as PagingLevel) as nat)
== nat_align_down(self.to_vaddr() as nat, page_size(level as PagingLevel) as nat)
+ page_size(level as PagingLevel),nat_align_up(self.to_vaddr() as nat, page_size(level as PagingLevel) as nat) < usize::MAX,Sourcepub open spec fn next_index(self, level: int) -> Self
pub open spec fn next_index(self, level: int) -> Self
{
let index = self.index[level - 1];
let next_index = index + 1;
if next_index == NR_ENTRIES && level < NR_LEVELS {
let next_va = Self {
offset: self.offset,
index: self.index.insert(level - 1, 0),
};
next_va.next_index(level + 1)
} else if next_index == NR_ENTRIES && level == NR_LEVELS {
Self {
offset: self.offset,
index: self.index.insert(level - 1, 0),
}
} else {
Self {
offset: self.offset,
index: self.index.insert(level - 1, next_index),
}
}
}Sourcepub open spec fn wrapped(self, start_level: int, level: int) -> bool
pub open spec fn wrapped(self, start_level: int, level: int) -> bool
{
&&& self.next_index(start_level).index[level - 1] == 0
==> {
&&& self.index[level - 1] + 1 == NR_ENTRIES
&&& if level < NR_LEVELS {
self.wrapped(start_level, level + 1)
} else {
true
}
}
&&& self.next_index(start_level).index[level - 1] != 0
==> self.index[level - 1] + 1 < NR_ENTRIES
}Sourcepub proof fn use_wrapped(self, start_level: int, level: int)
pub proof fn use_wrapped(self, start_level: int, level: int)
1 <= start_level <= level < NR_LEVELS,self.wrapped(start_level, level),self.next_index(start_level).index[level - 1] == 0,ensuresself.index[level - 1] + 1 == NR_ENTRIES,Sourcepub proof fn wrapped_unwrap(self, start_level: int, level: int)
pub proof fn wrapped_unwrap(self, start_level: int, level: int)
1 <= start_level <= level < NR_LEVELS,self.wrapped(start_level, level),self.next_index(start_level).index[level - 1] == 0,ensuresself.wrapped(start_level, level + 1),Sourcepub proof fn next_index_wrap_condition(self, level: int)
pub proof fn next_index_wrap_condition(self, level: int)
self.inv(),1 <= level <= NR_LEVELS,ensuresself.wrapped(level, level),Sourcepub open spec fn compute_vaddr(self) -> Vaddr
pub open spec fn compute_vaddr(self) -> Vaddr
{ self.rec_compute_vaddr(0) }Computes the concrete vaddr from the abstract representation. This matches the structure: index[NR_LEVELS-1] * 2^39 + index[NR_LEVELS-2] * 2^30 + … + index[0] * 2^12 + offset
Sourcepub open spec fn rec_compute_vaddr(self, i: int) -> Vaddr
pub open spec fn rec_compute_vaddr(self, i: int) -> Vaddr
{
if i >= NR_LEVELS {
self.offset as Vaddr
} else {
let shift = page_size((i + 1) as PagingLevel);
(self.index[i] * shift + self.rec_compute_vaddr(i + 1)) as Vaddr
}
}Helper for computing vaddr recursively from level i upward.
Sourcepub open spec fn to_path(self, level: int) -> TreePath<NR_ENTRIES>
pub open spec fn to_path(self, level: int) -> TreePath<NR_ENTRIES>
0 <= level < NR_LEVELS,{ TreePath(self.rec_to_path(NR_LEVELS - 1, level)) }Extracts a TreePath from this abstract vaddr, from the root down to the given level. The path has length (NR_LEVELS - level), containing indices for paging levels NR_LEVELS..level+1.
- level=0: full path of length NR_LEVELS with indices for all levels
- level=3: path of length 1 with just the root index
Path index mapping:
- path.index(0) = self.index[NR_LEVELS - 1] (root level)
- path.index(i) = self.index[NR_LEVELS - 1 - i]
- path.index(NR_LEVELS - level - 1) = self.index[level] (last entry)
Sourcepub open spec fn rec_to_path(self, abstract_level: int, bottom_level: int) -> Seq<usize>
pub open spec fn rec_to_path(self, abstract_level: int, bottom_level: int) -> Seq<usize>
{
if abstract_level < bottom_level {
seq![]
} else if abstract_level == bottom_level {
seq![self.index[abstract_level] as usize]
} else {
self.rec_to_path(abstract_level - 1, bottom_level)
.push(self.index[abstract_level] as usize)
}
}Builds the path sequence from abstract_level down to bottom_level (both inclusive). abstract_level and bottom_level refer to the index keys in self.index (0 = lowest level, NR_LEVELS-1 = root). Returns indices in order from highest level (first in seq) to lowest level (last in seq).
Sourcepub proof fn to_path_vaddr(self, level: int)
pub proof fn to_path_vaddr(self, level: int)
self.inv(),0 <= level < NR_LEVELS,ensuresvaddr(self.to_path(level)) == self.align_down(level + 1).compute_vaddr(),The vaddr of the path from this abstract vaddr equals the aligned vaddr at that level.
Sourcepub proof fn to_vaddr_is_compute_vaddr(self)
pub proof fn to_vaddr_is_compute_vaddr(self)
self.inv(),ensuresself.to_vaddr() == self.compute_vaddr(),The concrete to_vaddr() equals the computed vaddr.
Sourcepub proof fn index_increment_adds_page_size(self, level: int)
pub proof fn index_increment_adds_page_size(self, level: int)
self.inv(),1 <= level <= NR_LEVELS,self.index[level - 1] + 1 < NR_ENTRIES,ensures(Self {
index: self.index.insert(level - 1, self.index[level - 1] + 1),
..self
})
.to_vaddr() == self.to_vaddr() + page_size(level as PagingLevel),Incrementing index[level-1] by 1 increases to_vaddr() by page_size(level).
Sourcepub proof fn to_path_len(self, level: int)
pub proof fn to_path_len(self, level: int)
0 <= level < NR_LEVELS,ensuresself.to_path(level).len() == NR_LEVELS - level,Path extracted from abstract vaddr has correct length.
Sourcepub proof fn to_path_inv(self, level: int)
pub proof fn to_path_inv(self, level: int)
self.inv(),0 <= level < NR_LEVELS,ensuresself.to_path(level).inv(),Path extracted from abstract vaddr has valid indices.
Source§impl AbstractVaddr
Connection between TreePath’s vaddr and AbstractVaddr
impl AbstractVaddr
Connection between TreePath’s vaddr and AbstractVaddr
Sourcepub proof fn path_matches_vaddr(self, path: TreePath<NR_ENTRIES>)
pub proof fn path_matches_vaddr(self, path: TreePath<NR_ENTRIES>)
self.inv(),path.inv(),path.len() <= NR_LEVELS,forall |i: int| 0 <= i < path.len() ==> path.index(i) == self.index[NR_LEVELS - 1 - i],ensuresvaddr(path)
== self.align_down((NR_LEVELS - path.len() + 1) as int).compute_vaddr()
- self.align_down((NR_LEVELS - path.len() + 1) as int).offset,If a TreePath matches this abstract vaddr’s indices at all levels covered by the path, then vaddr(path) equals the aligned compute_vaddr at the corresponding level.
Sourcepub proof fn to_path_index(self, level: int, i: int)
pub proof fn to_path_index(self, level: int, i: int)
self.inv(),0 <= level < NR_LEVELS,0 <= i < NR_LEVELS - level,ensuresself.to_path(level).index(i) == self.index[NR_LEVELS - 1 - i],The path index at position i corresponds to the abstract vaddr index at level (NR_LEVELS - 1 - i). This is the key mapping between TreePath ordering and AbstractVaddr index ordering.
Sourcepub proof fn to_path_vaddr_concrete(self, level: int)
pub proof fn to_path_vaddr_concrete(self, level: int)
self.inv(),0 <= level < NR_LEVELS,ensuresvaddr(self.to_path(level))
== nat_align_down(
self.to_vaddr() as nat,
page_size((level + 1) as PagingLevel) as nat,
) as usize,Direct connection: vaddr(to_path(level)) equals the aligned concrete vaddr. This combines to_path_vaddr with to_vaddr_is_compute_vaddr.
Sourcepub proof fn vaddr_range_from_path(self, level: int)
pub proof fn vaddr_range_from_path(self, level: int)
self.inv(),0 <= level < NR_LEVELS,ensuresvaddr(self.to_path(level)) <= self.to_vaddr()
< vaddr(self.to_path(level)) + page_size((level + 1) as PagingLevel),Key property: if we have a path that matches cur_va’s indices, then vaddr(path) + page_size(level) bounds the range containing cur_va.