ostd/mm/tlb.rs
1// SPDX-License-Identifier: MPL-2.0
2//! TLB flush operations.
3use vstd::prelude::*;
4
5use vstd_extra::ownership::*;
6
7use alloc::vec::Vec;
8use core::{
9 ops::Range,
10 sync::atomic::{AtomicBool, Ordering},
11};
12
13use super::{
14 frame::{meta::AnyFrameMeta, Frame},
15 Vaddr, PAGE_SIZE,
16};
17
18use crate::specs::mm::cpu::{AtomicCpuSet, CpuSet, PinCurrentCpu};
19use crate::specs::mm::page_table::Mapping;
20use crate::specs::mm::tlb::TlbModel;
21
22/*use crate::{
23 arch::irq,
24 cpu::{AtomicCpuSet, CpuSet, PinCurrentCpu},
25 cpu_local,
26 sync::{LocalIrqDisabled, SpinLock},
27};*/
28
29#[verus_verify]
30verus! {
31
32/// A TLB flusher that is aware of which CPUs are needed to be flushed.
33///
34/// The flusher needs to stick to the current CPU.
35pub struct TlbFlusher<'a /*, G: PinCurrentCpu*/ > {
36 target_cpus: &'a AtomicCpuSet,
37 have_unsynced_flush: CpuSet,
38 ops_stack: OpsStack,
39 //_pin_current: G,
40}
41
42impl<'a /*, G: PinCurrentCpu*/ > TlbFlusher<'a /*, G*/ > {
43 /// Creates a new TLB flusher with the specified CPUs to be flushed.
44 ///
45 /// The target CPUs should be a reference to an [`AtomicCpuSet`] that will
46 /// be loaded upon [`Self::dispatch_tlb_flush`].
47 ///
48 /// The flusher needs to stick to the current CPU. So please provide a
49 /// guard that implements [`PinCurrentCpu`].
50 pub fn new(target_cpus: &'a AtomicCpuSet /*, pin_current_guard: G*/ ) -> Self {
51 Self {
52 target_cpus,
53 have_unsynced_flush: CpuSet::new_empty(),
54 ops_stack: OpsStack::new(),
55 //_pin_current: pin_current_guard,
56 }
57 }
58
59 /// Issues a pending TLB flush request.
60 ///
61 /// This function does not guarantee to flush the TLB entries on either
62 /// this CPU or remote CPUs. The flush requests are only performed when
63 /// [`Self::dispatch_tlb_flush`] is called.
64 #[verus_spec(
65 with Tracked(model): Tracked<&mut TlbModel>
66 ensures
67 *model == old(model).issue_tlb_flush_spec(op),
68 model.inv(),
69 )]
70 #[verifier::external_body]
71 pub fn issue_tlb_flush<M: AnyFrameMeta>(&mut self, op: TlbFlushOp) {
72 self.ops_stack.push(op, None::<Frame<M>>);
73 }
74
75 /// Issues a TLB flush request that must happen before dropping the page.
76 ///
77 /// If we need to remove a mapped page from the page table, we can only
78 /// recycle the page after all the relevant TLB entries in all CPUs are
79 /// flushed. Otherwise if the page is recycled for other purposes, the user
80 /// space program can still access the page through the TLB entries. This
81 /// method is designed to be used in such cases.
82 #[verus_spec(r =>
83 with Tracked(model): Tracked<&mut TlbModel>
84 ensures
85 *model == old(model).issue_tlb_flush_spec(op),
86 model.inv(),
87 )]
88 #[verifier::external_body]
89 pub fn issue_tlb_flush_with<M: AnyFrameMeta>(
90 &mut self,
91 op: TlbFlushOp,
92 drop_after_flush: Frame<M>,
93 ) {
94 self.ops_stack.push(op, Some(drop_after_flush));
95 }
96
97 /// Dispatches all the pending TLB flush requests.
98 ///
99 /// All previous pending requests issued by [`Self::issue_tlb_flush`] or
100 /// [`Self::issue_tlb_flush_with`] starts to be processed after this
101 /// function. But it may not be synchronous. Upon the return of this
102 /// function, the TLB entries may not be coherent.
103 #[verifier::external_body]
104 #[verus_spec(r =>
105 with Tracked(model): Tracked<&mut TlbModel>
106 requires
107 old(model).inv(),
108 ensures
109 *model == old(model).dispatch_tlb_flush_spec(),
110 model.inv(),
111 )]
112 pub fn dispatch_tlb_flush(&mut self) {
113 unimplemented!()/*let irq_guard = crate::trap::irq::disable_local();
114
115 if self.ops_stack.is_empty() {
116 return;
117 }
118
119 // `Release` to make sure our modification on the PT is visible to CPUs
120 // that are going to activate the PT.
121 let mut target_cpus = self.target_cpus.load(Ordering::Release);
122
123 let cur_cpu = irq_guard.current_cpu();
124 let mut need_flush_on_self = false;
125
126 if target_cpus.contains(cur_cpu) {
127 target_cpus.remove(cur_cpu);
128 need_flush_on_self = true;
129 }
130
131 for cpu in target_cpus.iter() {
132 {
133 let mut flush_ops = FLUSH_OPS.get_on_cpu(cpu).lock();
134 flush_ops.push_from(&self.ops_stack);
135
136 // Clear ACK before dropping the lock to avoid false ACKs.
137 ACK_REMOTE_FLUSH
138 .get_on_cpu(cpu)
139 .store(false, Ordering::Relaxed);
140 }
141 self.have_unsynced_flush.add(cpu);
142 }
143
144 crate::smp::inter_processor_call(&target_cpus, do_remote_flush);
145
146 // Flush ourselves after sending all IPIs to save some time.
147 if need_flush_on_self {
148 self.ops_stack.flush_all();
149 } else {
150 self.ops_stack.clear_without_flush();
151 }*/
152
153 }
154
155 /// Waits for all the previous TLB flush requests to be completed.
156 ///
157 /// After this function, all TLB entries corresponding to previous
158 /// dispatched TLB flush requests are guaranteed to be coherent.
159 ///
160 /// The TLB flush requests are issued with [`Self::issue_tlb_flush`] and
161 /// dispatched with [`Self::dispatch_tlb_flush`]. This method will not
162 /// dispatch any issued requests so it will not guarantee TLB coherence
163 /// of requests that are not dispatched.
164 ///
165 /// # Panics
166 ///
167 /// This method panics if the IRQs are disabled. Since the remote flush are
168 /// processed in IRQs, two CPUs may deadlock if they are waiting for each
169 /// other's TLB coherence.
170 #[verifier::external_body]
171 pub fn sync_tlb_flush(&mut self) {
172 unimplemented!()/*
173 assert!(
174 irq::is_local_enabled(),
175 "Waiting for remote flush with IRQs disabled"
176 );
177
178 for cpu in self.have_unsynced_flush.iter() {
179 while !ACK_REMOTE_FLUSH.get_on_cpu(cpu).load(Ordering::Relaxed) {
180 core::hint::spin_loop();
181 }
182 }
183
184 self.have_unsynced_flush = CpuSet::new_empty();
185 */
186
187 }
188}
189
190/// The operation to flush TLB entries.
191#[derive(Debug, Clone, PartialEq, Eq)]
192pub enum TlbFlushOp {
193 /// Flush all TLB entries except for the global entries.
194 All,
195 /// Flush the TLB entry for the specified virtual address.
196 Address(Vaddr),
197 /// Flush the TLB entries for the specified virtual address range.
198 Range(Range<Vaddr>),
199}
200
201impl TlbFlushOp {
202 /// Performs the TLB flush operation on the current CPU.
203 #[verifier::external_body]
204 pub fn perform_on_current(&self) {
205 unimplemented!()/*use crate::arch::mm::{
206 tlb_flush_addr, tlb_flush_addr_range, tlb_flush_all_excluding_global,
207 };
208 match self {
209 TlbFlushOp::All => tlb_flush_all_excluding_global(),
210 TlbFlushOp::Address(addr) => tlb_flush_addr(*addr),
211 TlbFlushOp::Range(range) => tlb_flush_addr_range(range),
212 }*/
213
214 }
215
216 #[verifier::external_body]
217 fn optimize_for_large_range(self) -> Self {
218 match self {
219 TlbFlushOp::Range(range) => {
220 if range.len() > FLUSH_ALL_RANGE_THRESHOLD {
221 TlbFlushOp::All
222 } else {
223 TlbFlushOp::Range(range)
224 }
225 },
226 _ => self,
227 }
228 }
229}
230
231// The queues of pending requests on each CPU.
232/*cpu_local! {
233 static FLUSH_OPS: SpinLock<OpsStack, LocalIrqDisabled> = SpinLock::new(OpsStack::new());
234 /// Whether this CPU finishes the last remote flush request.
235 static ACK_REMOTE_FLUSH: AtomicBool = AtomicBool::new(true);
236}*/
237
238#[verifier::external_body]
239fn do_remote_flush() {
240 unimplemented!()/*
241 // No races because we are in IRQs or have disabled preemption.
242 let current_cpu = crate::cpu::CpuId::current_racy();
243
244 let mut new_op_queue = OpsStack::new();
245 {
246 let mut op_queue = FLUSH_OPS.get_on_cpu(current_cpu).lock();
247
248 core::mem::swap(&mut *op_queue, &mut new_op_queue);
249
250 // ACK before dropping the lock so that we won't miss flush requests.
251 ACK_REMOTE_FLUSH
252 .get_on_cpu(current_cpu)
253 .store(true, Ordering::Relaxed);
254 }
255 // Unlock the locks quickly to avoid contention. ACK before flushing is
256 // fine since we cannot switch back to userspace now.
257 new_op_queue.flush_all();
258 */
259
260}
261
262/// If a TLB flushing request exceeds this threshold, we flush all.
263const FLUSH_ALL_RANGE_THRESHOLD: usize = 32 * PAGE_SIZE;
264
265/// If the number of pending requests exceeds this threshold, we flush all the
266/// TLB entries instead of flushing them one by one.
267const FLUSH_ALL_OPS_THRESHOLD: usize = 32;
268
269struct OpsStack {
270 ops: [Option<TlbFlushOp>; FLUSH_ALL_OPS_THRESHOLD],
271 need_flush_all: bool,
272 size: usize,
273 // page_keeper: Vec<Frame<dyn AnyFrameMeta>>,
274}
275
276impl OpsStack {
277 #[verifier::external_body]
278 const fn new() -> Self {
279 Self {
280 ops: [const { None };FLUSH_ALL_OPS_THRESHOLD],
281 need_flush_all: false,
282 size: 0,
283 // page_keeper: Vec::new(),
284 }
285 }
286
287 #[verifier::external_body]
288 fn is_empty(&self) -> bool {
289 !self.need_flush_all && self.size == 0
290 }
291
292 #[verifier::external_body]
293 fn push<M: AnyFrameMeta>(&mut self, op: TlbFlushOp, drop_after_flush: Option<Frame<M>>) {
294 if let Some(frame) = drop_after_flush {
295 // self.page_keeper.push(frame);
296 }
297 if self.need_flush_all {
298 return ;
299 }
300 let op = op.optimize_for_large_range();
301 if op == TlbFlushOp::All || self.size >= FLUSH_ALL_OPS_THRESHOLD {
302 self.need_flush_all = true;
303 self.size = 0;
304 return ;
305 }
306 self.ops[self.size] = Some(op);
307 self.size += 1;
308 }
309
310 #[verifier::external_body]
311 fn push_from(&mut self, other: &OpsStack) {
312 // self.page_keeper.extend(other.page_keeper.iter().cloned());
313 if self.need_flush_all {
314 return ;
315 }
316 if other.need_flush_all || self.size + other.size >= FLUSH_ALL_OPS_THRESHOLD {
317 self.need_flush_all = true;
318 self.size = 0;
319 return ;
320 }
321 for i in 0..other.size {
322 self.ops[self.size] = other.ops[i].clone();
323 self.size += 1;
324 }
325 }
326
327 #[verifier::external_body]
328 fn flush_all(&mut self) {
329 if self.need_flush_all {
330 unimplemented!() /*crate::arch::mm::tlb_flush_all_excluding_global();*/
331
332 } else {
333 for i in 0..self.size {
334 if let Some(op) = &self.ops[i] {
335 op.perform_on_current();
336 }
337 }
338 }
339
340 self.clear_without_flush();
341 }
342
343 #[verifier::external_body]
344 fn clear_without_flush(&mut self) {
345 self.need_flush_all = false;
346 self.size = 0;
347 // self.page_keeper.clear();
348 }
349}
350
351} // verus!