ostd/task/
mod.rs

1// SPDX-License-Identifier: MPL-2.0
2//! Tasks are the unit of code execution.
3use vstd::prelude::*;
4
5/* pub mod atomic_mode;
6mod kernel_stack; */
7mod preempt;
8/* mod processor;
9pub mod scheduler;
10mod utils;
11
12use core::{
13    any::Any,
14    borrow::Borrow,
15    cell::{Cell, SyncUnsafeCell},
16    ops::Deref,
17    ptr::NonNull,
18    sync::atomic::AtomicBool,
19};
20
21use kernel_stack::KernelStack;
22use processor::current_task;
23use spin::Once;
24use utils::ForceSync;
25*/
26pub use self::{
27    preempt::{disable_preempt, DisabledPreemptGuard},
28    /* scheduler::info::{AtomicCpuId, TaskScheduleInfo}, */
29};
30/*
31pub(crate) use crate::arch::task::{context_switch, TaskContext};
32use crate::{cpu::context::UserContext, prelude::*, trap::in_interrupt_context};
33
34static PRE_SCHEDULE_HANDLER: Once<fn()> = Once::new();
35
36static POST_SCHEDULE_HANDLER: Once<fn()> = Once::new();
37
38/// Injects a handler to be executed before scheduling.
39pub fn inject_pre_schedule_handler(handler: fn()) {
40    PRE_SCHEDULE_HANDLER.call_once(|| handler);
41}
42
43/// Injects a handler to be executed after scheduling.
44pub fn inject_post_schedule_handler(handler: fn()) {
45    POST_SCHEDULE_HANDLER.call_once(|| handler);
46}*/
47
48/// A task that executes a function to the end.
49///
50/// Each task is associated with per-task data and an optional user space.
51/// If having a user space, the task can switch to the user space to
52/// execute user code. Multiple tasks can share a single user space.
53#[verus_verify]
54#[derive(Debug)]
55pub struct Task {
56    /*#[expect(clippy::type_complexity)]
57    func: ForceSync<Cell<Option<Box<dyn FnOnce() + Send>>>>,
58
59    data: Box<dyn Any + Send + Sync>,
60    local_data: ForceSync<Box<dyn Any + Send>>,
61
62    user_ctx: Option<Arc<UserContext>>,
63    ctx: SyncUnsafeCell<TaskContext>,
64    /// kernel stack, note that the top is SyscallFrame/TrapFrame
65    kstack: KernelStack,
66
67    /// If we have switched this task to a CPU.
68    ///
69    /// This is to enforce not context switching to an already running task.
70    /// See [`processor::switch_to_task`] for more details.
71    switched_to_cpu: AtomicBool,
72
73    schedule_info: TaskScheduleInfo,*/
74}
75/*
76impl Task {
77    /// Gets the current task.
78    ///
79    /// It returns `None` if the function is called in the bootstrap context.
80    pub fn current() -> Option<CurrentTask> {
81        let current_task = current_task()?;
82
83        // SAFETY: `current_task` is the current task.
84        Some(unsafe { CurrentTask::new(current_task) })
85    }
86
87    pub(super) fn ctx(&self) -> &SyncUnsafeCell<TaskContext> {
88        &self.ctx
89    }
90
91    /// Sets thread-local storage pointer.
92    pub fn set_tls_pointer(&self, tls: usize) {
93        let ctx_ptr = self.ctx.get();
94
95        // SAFETY: it's safe to set user tls pointer in kernel context.
96        unsafe { (*ctx_ptr).set_tls_pointer(tls) }
97    }
98
99    /// Gets thread-local storage pointer.
100    pub fn tls_pointer(&self) -> usize {
101        let ctx_ptr = self.ctx.get();
102
103        // SAFETY: it's safe to get user tls pointer in kernel context.
104        unsafe { (*ctx_ptr).tls_pointer() }
105    }
106
107    /// Yields execution so that another task may be scheduled.
108    ///
109    /// Note that this method cannot be simply named "yield" as the name is
110    /// a Rust keyword.
111    #[track_caller]
112    pub fn yield_now() {
113        scheduler::yield_now()
114    }
115
116    /// Kicks the task scheduler to run the task.
117    ///
118    /// BUG: This method highly depends on the current scheduling policy.
119    #[track_caller]
120    pub fn run(self: &Arc<Self>) {
121        scheduler::run_new_task(self.clone());
122    }
123
124    /// Returns the task data.
125    pub fn data(&self) -> &Box<dyn Any + Send + Sync> {
126        &self.data
127    }
128
129    /// Get the attached scheduling information.
130    pub fn schedule_info(&self) -> &TaskScheduleInfo {
131        &self.schedule_info
132    }
133
134    /// Returns the user context of this task, if it has.
135    pub fn user_ctx(&self) -> Option<&Arc<UserContext>> {
136        if self.user_ctx.is_some() {
137            Some(self.user_ctx.as_ref().unwrap())
138        } else {
139            None
140        }
141    }
142}
143
144/// Options to create or spawn a new task.
145pub struct TaskOptions {
146    func: Option<Box<dyn FnOnce() + Send>>,
147    data: Option<Box<dyn Any + Send + Sync>>,
148    local_data: Option<Box<dyn Any + Send>>,
149    user_ctx: Option<Arc<UserContext>>,
150}
151
152impl TaskOptions {
153    /// Creates a set of options for a task.
154    pub fn new<F>(func: F) -> Self
155    where
156        F: FnOnce() + Send + 'static,
157    {
158        Self {
159            func: Some(Box::new(func)),
160            data: None,
161            local_data: None,
162            user_ctx: None,
163        }
164    }
165
166    /// Sets the function that represents the entry point of the task.
167    pub fn func<F>(mut self, func: F) -> Self
168    where
169        F: Fn() + Send + 'static,
170    {
171        self.func = Some(Box::new(func));
172        self
173    }
174
175    /// Sets the data associated with the task.
176    pub fn data<T>(mut self, data: T) -> Self
177    where
178        T: Any + Send + Sync,
179    {
180        self.data = Some(Box::new(data));
181        self
182    }
183
184    /// Sets the local data associated with the task.
185    pub fn local_data<T>(mut self, data: T) -> Self
186    where
187        T: Any + Send,
188    {
189        self.local_data = Some(Box::new(data));
190        self
191    }
192
193    /// Sets the user context associated with the task.
194    pub fn user_ctx(mut self, user_ctx: Option<Arc<UserContext>>) -> Self {
195        self.user_ctx = user_ctx;
196        self
197    }
198
199    /// Builds a new task without running it immediately.
200    pub fn build(self) -> Result<Task> {
201        /// all task will entering this function
202        /// this function is mean to executing the task_fn in Task
203        extern "C" fn kernel_task_entry() -> ! {
204            // SAFETY: The new task is switched on a CPU for the first time, `after_switching_to`
205            // hasn't been called yet.
206            unsafe { processor::after_switching_to() };
207
208            let current_task = Task::current()
209                .expect("no current task, it should have current task in kernel task entry");
210
211            // SAFETY: The `func` field will only be accessed by the current task in the task
212            // context, so the data won't be accessed concurrently.
213            let task_func = unsafe { current_task.func.get() };
214            let task_func = task_func
215                .take()
216                .expect("task function is `None` when trying to run");
217            task_func();
218
219            // Manually drop all the on-stack variables to prevent memory leakage!
220            // This is needed because `scheduler::exit_current()` will never return.
221            //
222            // However, `current_task` _borrows_ the current task without holding
223            // an extra reference count. So we do nothing here.
224
225            scheduler::exit_current();
226        }
227
228        let kstack = KernelStack::new_with_guard_page()?;
229
230        let mut ctx = SyncUnsafeCell::new(TaskContext::default());
231        if let Some(user_ctx) = self.user_ctx.as_ref() {
232            ctx.get_mut().set_tls_pointer(user_ctx.tls_pointer());
233        };
234        ctx.get_mut()
235            .set_instruction_pointer(kernel_task_entry as usize);
236        // We should reserve space for the return address in the stack, otherwise
237        // we will write across the page boundary due to the implementation of
238        // the context switch.
239        //
240        // According to the System V AMD64 ABI, the stack pointer should be aligned
241        // to at least 16 bytes. And a larger alignment is needed if larger arguments
242        // are passed to the function. The `kernel_task_entry` function does not
243        // have any arguments, so we only need to align the stack pointer to 16 bytes.
244        ctx.get_mut().set_stack_pointer(kstack.end_vaddr() - 16);
245
246        let new_task = Task {
247            func: ForceSync::new(Cell::new(self.func)),
248            data: self.data.unwrap_or_else(|| Box::new(())),
249            local_data: ForceSync::new(self.local_data.unwrap_or_else(|| Box::new(()))),
250            user_ctx: self.user_ctx,
251            ctx,
252            kstack,
253            schedule_info: TaskScheduleInfo {
254                cpu: AtomicCpuId::default(),
255            },
256            switched_to_cpu: AtomicBool::new(false),
257        };
258
259        Ok(new_task)
260    }
261
262    /// Builds a new task and runs it immediately.
263    #[track_caller]
264    pub fn spawn(self) -> Result<Arc<Task>> {
265        let task = Arc::new(self.build()?);
266        task.run();
267        Ok(task)
268    }
269}
270
271/// The current task.
272///
273/// This type is not `Send`, so it cannot outlive the current task.
274///
275/// This type is also not `Sync`, so it can provide access to the local data of the current task.
276#[derive(Debug)]
277pub struct CurrentTask(NonNull<Task>);
278
279// The intern `NonNull<Task>` contained by `CurrentTask` implies that `CurrentTask` is `!Send` and
280// `!Sync`. But it is still good to do this explicitly because these properties are key for
281// soundness.
282impl !Send for CurrentTask {}
283impl !Sync for CurrentTask {}
284
285impl CurrentTask {
286    /// # Safety
287    ///
288    /// The caller must ensure that `task` is the current task.
289    unsafe fn new(task: NonNull<Task>) -> Self {
290        Self(task)
291    }
292
293    /// Returns the local data of the current task.
294    ///
295    /// Note that the local data is only accessible in the task context. Although there is a
296    /// current task in the non-task context (e.g. IRQ handlers), access to the local data is
297    /// forbidden as it may cause soundness problems.
298    ///
299    /// # Panics
300    ///
301    /// This method will panic if called in a non-task context.
302    pub fn local_data(&self) -> &(dyn Any + Send) {
303        assert!(!in_interrupt_context());
304
305        let local_data = &self.local_data;
306
307        // SAFETY: The `local_data` field will only be accessed by the current task in the task
308        // context, so the data won't be accessed concurrently.
309        &**unsafe { local_data.get() }
310    }
311
312    /// Returns a cloned `Arc<Task>`.
313    pub fn cloned(&self) -> Arc<Task> {
314        let ptr = self.0.as_ptr();
315
316        // SAFETY: The current task is always a valid task and it is always contained in an `Arc`.
317        unsafe { Arc::increment_strong_count(ptr) };
318
319        // SAFETY: We've increased the reference count in the current `Arc<Task>` above.
320        unsafe { Arc::from_raw(ptr) }
321    }
322}
323
324impl Deref for CurrentTask {
325    type Target = Task;
326
327    fn deref(&self) -> &Self::Target {
328        // SAFETY: The current task is always a valid task.
329        unsafe { self.0.as_ref() }
330    }
331}
332
333impl AsRef<Task> for CurrentTask {
334    fn as_ref(&self) -> &Task {
335        self
336    }
337}
338
339impl Borrow<Task> for CurrentTask {
340    fn borrow(&self) -> &Task {
341        self
342    }
343}
344
345/// Trait for manipulating the task context.
346pub trait TaskContextApi {
347    /// Sets instruction pointer
348    fn set_instruction_pointer(&mut self, ip: usize);
349
350    /// Gets instruction pointer
351    fn instruction_pointer(&self) -> usize;
352
353    /// Sets stack pointer
354    fn set_stack_pointer(&mut self, sp: usize);
355
356    /// Gets stack pointer
357    fn stack_pointer(&self) -> usize;
358}
359
360#[cfg(ktest)]
361mod test {
362    use crate::prelude::*;
363
364    #[ktest]
365    fn create_task() {
366        #[expect(clippy::eq_op)]
367        let task = || {
368            assert_eq!(1, 1);
369        };
370        let task = Arc::new(
371            crate::task::TaskOptions::new(task)
372                .data(())
373                .build()
374                .unwrap(),
375        );
376        task.run();
377    }
378
379    #[ktest]
380    fn spawn_task() {
381        #[expect(clippy::eq_op)]
382        let task = || {
383            assert_eq!(1, 1);
384        };
385        let _ = crate::task::TaskOptions::new(task).data(()).spawn();
386    }
387}
388*/