ostd/task/mod.rs
1// SPDX-License-Identifier: MPL-2.0
2//! Tasks are the unit of code execution.
3use vstd::prelude::*;
4
5/* pub mod atomic_mode;
6mod kernel_stack;
7mod preempt;
8mod processor;
9pub mod scheduler;
10mod utils;
11
12use core::{
13 any::Any,
14 borrow::Borrow,
15 cell::{Cell, SyncUnsafeCell},
16 ops::Deref,
17 ptr::NonNull,
18 sync::atomic::AtomicBool,
19};
20
21use kernel_stack::KernelStack;
22use processor::current_task;
23use spin::Once;
24use utils::ForceSync;
25
26pub use self::{
27 preempt::{disable_preempt, halt_cpu, DisabledPreemptGuard},
28 scheduler::info::{AtomicCpuId, TaskScheduleInfo},
29};
30pub(crate) use crate::arch::task::{context_switch, TaskContext};
31use crate::{cpu::context::UserContext, prelude::*, trap::in_interrupt_context};
32
33static PRE_SCHEDULE_HANDLER: Once<fn()> = Once::new();
34
35static POST_SCHEDULE_HANDLER: Once<fn()> = Once::new();
36
37/// Injects a handler to be executed before scheduling.
38pub fn inject_pre_schedule_handler(handler: fn()) {
39 PRE_SCHEDULE_HANDLER.call_once(|| handler);
40}
41
42/// Injects a handler to be executed after scheduling.
43pub fn inject_post_schedule_handler(handler: fn()) {
44 POST_SCHEDULE_HANDLER.call_once(|| handler);
45}*/
46
47/// A task that executes a function to the end.
48///
49/// Each task is associated with per-task data and an optional user space.
50/// If having a user space, the task can switch to the user space to
51/// execute user code. Multiple tasks can share a single user space.
52#[verus_verify]
53#[derive(Debug)]
54pub struct Task {
55 /*#[expect(clippy::type_complexity)]
56 func: ForceSync<Cell<Option<Box<dyn FnOnce() + Send>>>>,
57
58 data: Box<dyn Any + Send + Sync>,
59 local_data: ForceSync<Box<dyn Any + Send>>,
60
61 user_ctx: Option<Arc<UserContext>>,
62 ctx: SyncUnsafeCell<TaskContext>,
63 /// kernel stack, note that the top is SyscallFrame/TrapFrame
64 kstack: KernelStack,
65
66 /// If we have switched this task to a CPU.
67 ///
68 /// This is to enforce not context switching to an already running task.
69 /// See [`processor::switch_to_task`] for more details.
70 switched_to_cpu: AtomicBool,
71
72 schedule_info: TaskScheduleInfo,*/
73}
74/*
75impl Task {
76 /// Gets the current task.
77 ///
78 /// It returns `None` if the function is called in the bootstrap context.
79 pub fn current() -> Option<CurrentTask> {
80 let current_task = current_task()?;
81
82 // SAFETY: `current_task` is the current task.
83 Some(unsafe { CurrentTask::new(current_task) })
84 }
85
86 pub(super) fn ctx(&self) -> &SyncUnsafeCell<TaskContext> {
87 &self.ctx
88 }
89
90 /// Sets thread-local storage pointer.
91 pub fn set_tls_pointer(&self, tls: usize) {
92 let ctx_ptr = self.ctx.get();
93
94 // SAFETY: it's safe to set user tls pointer in kernel context.
95 unsafe { (*ctx_ptr).set_tls_pointer(tls) }
96 }
97
98 /// Gets thread-local storage pointer.
99 pub fn tls_pointer(&self) -> usize {
100 let ctx_ptr = self.ctx.get();
101
102 // SAFETY: it's safe to get user tls pointer in kernel context.
103 unsafe { (*ctx_ptr).tls_pointer() }
104 }
105
106 /// Yields execution so that another task may be scheduled.
107 ///
108 /// Note that this method cannot be simply named "yield" as the name is
109 /// a Rust keyword.
110 #[track_caller]
111 pub fn yield_now() {
112 scheduler::yield_now()
113 }
114
115 /// Kicks the task scheduler to run the task.
116 ///
117 /// BUG: This method highly depends on the current scheduling policy.
118 #[track_caller]
119 pub fn run(self: &Arc<Self>) {
120 scheduler::run_new_task(self.clone());
121 }
122
123 /// Returns the task data.
124 pub fn data(&self) -> &Box<dyn Any + Send + Sync> {
125 &self.data
126 }
127
128 /// Get the attached scheduling information.
129 pub fn schedule_info(&self) -> &TaskScheduleInfo {
130 &self.schedule_info
131 }
132
133 /// Returns the user context of this task, if it has.
134 pub fn user_ctx(&self) -> Option<&Arc<UserContext>> {
135 if self.user_ctx.is_some() {
136 Some(self.user_ctx.as_ref().unwrap())
137 } else {
138 None
139 }
140 }
141}
142
143/// Options to create or spawn a new task.
144pub struct TaskOptions {
145 func: Option<Box<dyn FnOnce() + Send>>,
146 data: Option<Box<dyn Any + Send + Sync>>,
147 local_data: Option<Box<dyn Any + Send>>,
148 user_ctx: Option<Arc<UserContext>>,
149}
150
151impl TaskOptions {
152 /// Creates a set of options for a task.
153 pub fn new<F>(func: F) -> Self
154 where
155 F: FnOnce() + Send + 'static,
156 {
157 Self {
158 func: Some(Box::new(func)),
159 data: None,
160 local_data: None,
161 user_ctx: None,
162 }
163 }
164
165 /// Sets the function that represents the entry point of the task.
166 pub fn func<F>(mut self, func: F) -> Self
167 where
168 F: Fn() + Send + 'static,
169 {
170 self.func = Some(Box::new(func));
171 self
172 }
173
174 /// Sets the data associated with the task.
175 pub fn data<T>(mut self, data: T) -> Self
176 where
177 T: Any + Send + Sync,
178 {
179 self.data = Some(Box::new(data));
180 self
181 }
182
183 /// Sets the local data associated with the task.
184 pub fn local_data<T>(mut self, data: T) -> Self
185 where
186 T: Any + Send,
187 {
188 self.local_data = Some(Box::new(data));
189 self
190 }
191
192 /// Sets the user context associated with the task.
193 pub fn user_ctx(mut self, user_ctx: Option<Arc<UserContext>>) -> Self {
194 self.user_ctx = user_ctx;
195 self
196 }
197
198 /// Builds a new task without running it immediately.
199 pub fn build(self) -> Result<Task> {
200 /// all task will entering this function
201 /// this function is mean to executing the task_fn in Task
202 extern "C" fn kernel_task_entry() -> ! {
203 // SAFETY: The new task is switched on a CPU for the first time, `after_switching_to`
204 // hasn't been called yet.
205 unsafe { processor::after_switching_to() };
206
207 let current_task = Task::current()
208 .expect("no current task, it should have current task in kernel task entry");
209
210 // SAFETY: The `func` field will only be accessed by the current task in the task
211 // context, so the data won't be accessed concurrently.
212 let task_func = unsafe { current_task.func.get() };
213 let task_func = task_func
214 .take()
215 .expect("task function is `None` when trying to run");
216 task_func();
217
218 // Manually drop all the on-stack variables to prevent memory leakage!
219 // This is needed because `scheduler::exit_current()` will never return.
220 //
221 // However, `current_task` _borrows_ the current task without holding
222 // an extra reference count. So we do nothing here.
223
224 scheduler::exit_current();
225 }
226
227 let kstack = KernelStack::new_with_guard_page()?;
228
229 let mut ctx = SyncUnsafeCell::new(TaskContext::default());
230 if let Some(user_ctx) = self.user_ctx.as_ref() {
231 ctx.get_mut().set_tls_pointer(user_ctx.tls_pointer());
232 };
233 ctx.get_mut()
234 .set_instruction_pointer(kernel_task_entry as usize);
235 // We should reserve space for the return address in the stack, otherwise
236 // we will write across the page boundary due to the implementation of
237 // the context switch.
238 //
239 // According to the System V AMD64 ABI, the stack pointer should be aligned
240 // to at least 16 bytes. And a larger alignment is needed if larger arguments
241 // are passed to the function. The `kernel_task_entry` function does not
242 // have any arguments, so we only need to align the stack pointer to 16 bytes.
243 ctx.get_mut().set_stack_pointer(kstack.end_vaddr() - 16);
244
245 let new_task = Task {
246 func: ForceSync::new(Cell::new(self.func)),
247 data: self.data.unwrap_or_else(|| Box::new(())),
248 local_data: ForceSync::new(self.local_data.unwrap_or_else(|| Box::new(()))),
249 user_ctx: self.user_ctx,
250 ctx,
251 kstack,
252 schedule_info: TaskScheduleInfo {
253 cpu: AtomicCpuId::default(),
254 },
255 switched_to_cpu: AtomicBool::new(false),
256 };
257
258 Ok(new_task)
259 }
260
261 /// Builds a new task and runs it immediately.
262 #[track_caller]
263 pub fn spawn(self) -> Result<Arc<Task>> {
264 let task = Arc::new(self.build()?);
265 task.run();
266 Ok(task)
267 }
268}
269
270/// The current task.
271///
272/// This type is not `Send`, so it cannot outlive the current task.
273///
274/// This type is also not `Sync`, so it can provide access to the local data of the current task.
275#[derive(Debug)]
276pub struct CurrentTask(NonNull<Task>);
277
278// The intern `NonNull<Task>` contained by `CurrentTask` implies that `CurrentTask` is `!Send` and
279// `!Sync`. But it is still good to do this explicitly because these properties are key for
280// soundness.
281impl !Send for CurrentTask {}
282impl !Sync for CurrentTask {}
283
284impl CurrentTask {
285 /// # Safety
286 ///
287 /// The caller must ensure that `task` is the current task.
288 unsafe fn new(task: NonNull<Task>) -> Self {
289 Self(task)
290 }
291
292 /// Returns the local data of the current task.
293 ///
294 /// Note that the local data is only accessible in the task context. Although there is a
295 /// current task in the non-task context (e.g. IRQ handlers), access to the local data is
296 /// forbidden as it may cause soundness problems.
297 ///
298 /// # Panics
299 ///
300 /// This method will panic if called in a non-task context.
301 pub fn local_data(&self) -> &(dyn Any + Send) {
302 assert!(!in_interrupt_context());
303
304 let local_data = &self.local_data;
305
306 // SAFETY: The `local_data` field will only be accessed by the current task in the task
307 // context, so the data won't be accessed concurrently.
308 &**unsafe { local_data.get() }
309 }
310
311 /// Returns a cloned `Arc<Task>`.
312 pub fn cloned(&self) -> Arc<Task> {
313 let ptr = self.0.as_ptr();
314
315 // SAFETY: The current task is always a valid task and it is always contained in an `Arc`.
316 unsafe { Arc::increment_strong_count(ptr) };
317
318 // SAFETY: We've increased the reference count in the current `Arc<Task>` above.
319 unsafe { Arc::from_raw(ptr) }
320 }
321}
322
323impl Deref for CurrentTask {
324 type Target = Task;
325
326 fn deref(&self) -> &Self::Target {
327 // SAFETY: The current task is always a valid task.
328 unsafe { self.0.as_ref() }
329 }
330}
331
332impl AsRef<Task> for CurrentTask {
333 fn as_ref(&self) -> &Task {
334 self
335 }
336}
337
338impl Borrow<Task> for CurrentTask {
339 fn borrow(&self) -> &Task {
340 self
341 }
342}
343
344/// Trait for manipulating the task context.
345pub trait TaskContextApi {
346 /// Sets instruction pointer
347 fn set_instruction_pointer(&mut self, ip: usize);
348
349 /// Gets instruction pointer
350 fn instruction_pointer(&self) -> usize;
351
352 /// Sets stack pointer
353 fn set_stack_pointer(&mut self, sp: usize);
354
355 /// Gets stack pointer
356 fn stack_pointer(&self) -> usize;
357}
358
359#[cfg(ktest)]
360mod test {
361 use crate::prelude::*;
362
363 #[ktest]
364 fn create_task() {
365 #[expect(clippy::eq_op)]
366 let task = || {
367 assert_eq!(1, 1);
368 };
369 let task = Arc::new(
370 crate::task::TaskOptions::new(task)
371 .data(())
372 .build()
373 .unwrap(),
374 );
375 task.run();
376 }
377
378 #[ktest]
379 fn spawn_task() {
380 #[expect(clippy::eq_op)]
381 let task = || {
382 assert_eq!(1, 1);
383 };
384 let _ = crate::task::TaskOptions::new(task).data(()).spawn();
385 }
386}
387*/