ostd/specs/arch/x86_64/mm.rs
1use vstd::prelude::*;
2
3use vstd_extra::prelude::*;
4
5use core::ops::Range;
6
7use super::*;
8use crate::mm::{page_prop::CachePolicy, Paddr, Vaddr};
9
10verus! {
11
12/// Page size.
13pub const PAGE_SIZE: usize = 4096;
14
15/// The maximum number of entries in a page table node
16pub const NR_ENTRIES: usize = 512;
17
18/// The maximum level of a page table node.
19pub const NR_LEVELS: usize = 4;
20
21/// Parameterized maximum physical address.
22pub const MAX_PADDR: usize = 0x8000_0000;
23
24pub const MAX_NR_PAGES: u64 = (MAX_PADDR / PAGE_SIZE) as u64;
25
26/// The maximum virtual address of user space (non inclusive).
27pub const MAX_USERSPACE_VADDR: Vaddr = 0x0000_8000_0000_0000_usize - PAGE_SIZE;
28
29/// The kernel address space.
30/// There are the high canonical addresses defined in most 48-bit width
31/// architectures.
32pub const KERNEL_VADDR_RANGE: Range<Vaddr> = 0xffff_8000_0000_0000_usize..0xffff_ffff_ffff_0000_usize;
33
34pub uninterp spec fn current_page_table_paddr_spec() -> Paddr;
35
36/// Activates the given level 4 page table.
37/// The cache policy of the root page table node is controlled by `root_pt_cache`.
38///
39/// # Safety
40///
41/// Changing the level 4 page table is unsafe, because it's possible to violate memory safety by
42/// changing the page mapping.
43#[verifier::external_body]
44pub unsafe fn activate_page_table(root_paddr: Paddr, root_pt_cache: CachePolicy) {
45 unimplemented!()
46}
47
48#[verifier::external_body]
49#[verifier::when_used_as_spec(current_page_table_paddr_spec)]
50#[verus_spec(
51 returns
52 current_page_table_paddr_spec(),
53)]
54pub fn current_page_table_paddr() -> Paddr {
55 unimplemented!()
56}
57
58} // verus!
59verus! {
60
61/// Flush any TLB entry that contains the map of the given virtual address.
62///
63/// This flush performs regardless of the global-page bit. So it can flush both global
64/// and non-global entries.
65#[verifier::external_body]
66pub fn tlb_flush_addr(vaddr: Vaddr) {
67 // tlb::flush(VirtAddr::new(vaddr as u64));
68 unimplemented!()
69}
70
71/// Flush any TLB entry that intersects with the given address range.
72#[verifier::external_body]
73pub fn tlb_flush_addr_range(range: &Range<Vaddr>) {
74 for vaddr in range.clone().step_by(PAGE_SIZE) {
75 tlb_flush_addr(vaddr);
76 }
77}
78
79/// Flush all TLB entries except for the global-page entries.
80#[verifier::external_body]
81pub fn tlb_flush_all_excluding_global() {
82 // tlb::flush_all();
83 unimplemented!()
84}
85
86/// Flush all TLB entries, including global-page entries.
87#[verifier::external_body]
88pub fn tlb_flush_all_including_global() {
89 // SAFETY: updates to CR4 here only change the global-page bit, the side effect
90 // is only to invalidate the TLB, which doesn't affect the memory safety.
91 // unsafe {
92 // // To invalidate all entries, including global-page
93 // // entries, disable global-page extensions (CR4.PGE=0).
94 // x86_64::registers::control::Cr4::update(|cr4| {
95 // *cr4 -= x86_64::registers::control::Cr4Flags::PAGE_GLOBAL;
96 // });
97 // x86_64::registers::control::Cr4::update(|cr4| {
98 // *cr4 |= x86_64::registers::control::Cr4Flags::PAGE_GLOBAL;
99 // });
100 // }
101 unimplemented!()
102}
103
104} // verus!