ostd/task/mod.rs
1// SPDX-License-Identifier: MPL-2.0
2
3//! Tasks are the unit of code execution.
4
5pub mod atomic_mode;
6mod kernel_stack;
7mod preempt;
8mod processor;
9pub mod scheduler;
10mod utils;
11
12use core::{
13 any::Any,
14 borrow::Borrow,
15 cell::{Cell, SyncUnsafeCell},
16 ops::Deref,
17 ptr::NonNull,
18 sync::atomic::AtomicBool,
19};
20
21use kernel_stack::KernelStack;
22use processor::current_task;
23use spin::Once;
24use utils::ForceSync;
25
26pub use self::{
27 preempt::{DisabledPreemptGuard, disable_preempt, halt_cpu},
28 scheduler::info::{AtomicCpuId, TaskScheduleInfo},
29};
30use crate::{arch::task::TaskContext, irq::InterruptLevel, prelude::*};
31
32static PRE_SCHEDULE_HANDLER: Once<fn()> = Once::new();
33
34static POST_SCHEDULE_HANDLER: Once<fn()> = Once::new();
35
36/// Injects a handler to be executed before scheduling.
37pub fn inject_pre_schedule_handler(handler: fn()) {
38 PRE_SCHEDULE_HANDLER.call_once(|| handler);
39}
40
41/// Injects a handler to be executed after scheduling.
42pub fn inject_post_schedule_handler(handler: fn()) {
43 POST_SCHEDULE_HANDLER.call_once(|| handler);
44}
45
46/// A task that executes a function to the end.
47///
48/// Each task is associated with per-task data and an optional user space.
49/// If having a user space, the task can switch to the user space to
50/// execute user code. Multiple tasks can share a single user space.
51#[derive(Debug)]
52pub struct Task {
53 #[expect(clippy::type_complexity)]
54 func: ForceSync<Cell<Option<Box<dyn FnOnce() + Send>>>>,
55
56 data: Box<dyn Any + Send + Sync>,
57 local_data: ForceSync<Box<dyn Any + Send>>,
58
59 ctx: SyncUnsafeCell<TaskContext>,
60 /// kernel stack, note that the top is SyscallFrame/TrapFrame
61 kstack: KernelStack,
62
63 /// If we have switched this task to a CPU.
64 ///
65 /// This is to enforce not context switching to an already running task.
66 /// See [`processor::switch_to_task`] for more details.
67 switched_to_cpu: AtomicBool,
68
69 schedule_info: TaskScheduleInfo,
70}
71
72impl Task {
73 /// Gets the current task.
74 ///
75 /// It returns `None` if the function is called in the bootstrap context.
76 pub fn current() -> Option<CurrentTask> {
77 let current_task = current_task()?;
78
79 // SAFETY: `current_task` is the current task.
80 Some(unsafe { CurrentTask::new(current_task) })
81 }
82
83 pub(super) fn ctx(&self) -> &SyncUnsafeCell<TaskContext> {
84 &self.ctx
85 }
86
87 /// Yields execution so that another task may be scheduled.
88 ///
89 /// Note that this method cannot be simply named "yield" as the name is
90 /// a Rust keyword.
91 #[track_caller]
92 pub fn yield_now() {
93 scheduler::yield_now()
94 }
95
96 /// Kicks the task scheduler to run the task.
97 ///
98 /// BUG: This method highly depends on the current scheduling policy.
99 #[track_caller]
100 pub fn run(self: &Arc<Self>) {
101 scheduler::run_new_task(self.clone());
102 }
103
104 /// Returns the task data.
105 pub fn data(&self) -> &Box<dyn Any + Send + Sync> {
106 &self.data
107 }
108
109 /// Get the attached scheduling information.
110 pub fn schedule_info(&self) -> &TaskScheduleInfo {
111 &self.schedule_info
112 }
113}
114
115/// Options to create or spawn a new task.
116pub struct TaskOptions {
117 func: Option<Box<dyn FnOnce() + Send>>,
118 data: Option<Box<dyn Any + Send + Sync>>,
119 local_data: Option<Box<dyn Any + Send>>,
120}
121
122impl TaskOptions {
123 /// Creates a set of options for a task.
124 pub fn new<F>(func: F) -> Self
125 where
126 F: FnOnce() + Send + 'static,
127 {
128 Self {
129 func: Some(Box::new(func)),
130 data: None,
131 local_data: None,
132 }
133 }
134
135 /// Sets the function that represents the entry point of the task.
136 pub fn func<F>(mut self, func: F) -> Self
137 where
138 F: Fn() + Send + 'static,
139 {
140 self.func = Some(Box::new(func));
141 self
142 }
143
144 /// Sets the data associated with the task.
145 pub fn data<T>(mut self, data: T) -> Self
146 where
147 T: Any + Send + Sync,
148 {
149 self.data = Some(Box::new(data));
150 self
151 }
152
153 /// Sets the local data associated with the task.
154 pub fn local_data<T>(mut self, data: T) -> Self
155 where
156 T: Any + Send,
157 {
158 self.local_data = Some(Box::new(data));
159 self
160 }
161
162 /// Builds a new task without running it immediately.
163 pub fn build(self) -> Result<Task> {
164 // All tasks will enter this function. It is meant to execute the `task_fn` in `Task`.
165 //
166 // We provide an assembly wrapper for this function as the end of call stack so we
167 // have to disable name mangling for it.
168 //
169 // # Safety
170 //
171 // This function must be called from `switch.S` when the context is prepared correctly.
172 // SAFETY: The name does not collide with other symbols.
173 #[unsafe(no_mangle)]
174 unsafe extern "C" fn kernel_task_entry() -> ! {
175 // SAFETY: The new task is switched on a CPU for the first time, `after_switching_to`
176 // hasn't been called yet.
177 unsafe { processor::after_switching_to() };
178
179 let current_task = Task::current()
180 .expect("no current task, it should have current task in kernel task entry");
181
182 // SAFETY: The `func` field will only be accessed by the current task in the task
183 // context, so the data won't be accessed concurrently.
184 let task_func = unsafe { current_task.func.get() };
185 let task_func = task_func
186 .take()
187 .expect("task function is `None` when trying to run");
188 task_func();
189
190 // Manually drop all the on-stack variables to prevent memory leakage!
191 // This is needed because `scheduler::exit_current()` will never return.
192 //
193 // However, `current_task` _borrows_ the current task without holding
194 // an extra reference count. So we do nothing here.
195
196 scheduler::exit_current();
197 }
198
199 let kstack = KernelStack::new_with_guard_page()?;
200
201 let mut ctx = TaskContext::new();
202 ctx.set_instruction_pointer(
203 crate::arch::task::kernel_task_entry_wrapper as *const () as usize,
204 );
205 // We should reserve space for the return address in the stack, otherwise
206 // we will write across the page boundary due to the implementation of
207 // the context switch.
208 //
209 // According to the System V AMD64 ABI, the stack pointer should be aligned
210 // to at least 16 bytes. And a larger alignment is needed if larger arguments
211 // are passed to the function. The `kernel_task_entry` function does not
212 // have any arguments, so we only need to align the stack pointer to 16 bytes.
213 ctx.set_stack_pointer(kstack.end_vaddr() - 16);
214
215 let new_task = Task {
216 func: ForceSync::new(Cell::new(self.func)),
217 data: self.data.unwrap_or_else(|| Box::new(())),
218 local_data: ForceSync::new(self.local_data.unwrap_or_else(|| Box::new(()))),
219 ctx: SyncUnsafeCell::new(ctx),
220 kstack,
221 schedule_info: TaskScheduleInfo {
222 cpu: AtomicCpuId::default(),
223 },
224 switched_to_cpu: AtomicBool::new(false),
225 };
226
227 Ok(new_task)
228 }
229
230 /// Builds a new task and runs it immediately.
231 #[track_caller]
232 pub fn spawn(self) -> Result<Arc<Task>> {
233 let task = Arc::new(self.build()?);
234 task.run();
235 Ok(task)
236 }
237}
238
239/// The current task.
240///
241/// This type is not `Send`, so it cannot outlive the current task.
242///
243/// This type is also not `Sync`, so it can provide access to the local data of the current task.
244#[derive(Debug)]
245pub struct CurrentTask(NonNull<Task>);
246
247// The intern `NonNull<Task>` contained by `CurrentTask` implies that `CurrentTask` is `!Send` and
248// `!Sync`. But it is still good to do this explicitly because these properties are key for
249// soundness.
250impl !Send for CurrentTask {}
251impl !Sync for CurrentTask {}
252
253impl CurrentTask {
254 /// # Safety
255 ///
256 /// The caller must ensure that `task` is the current task.
257 unsafe fn new(task: NonNull<Task>) -> Self {
258 Self(task)
259 }
260
261 /// Returns the local data of the current task.
262 ///
263 /// Note that the local data is only accessible in the task context. Although there is a
264 /// current task in the non-task context (e.g. IRQ handlers), access to the local data is
265 /// forbidden as it may cause soundness problems.
266 ///
267 /// # Panics
268 ///
269 /// This method will panic if called in a non-task context.
270 pub fn local_data(&self) -> &(dyn Any + Send) {
271 assert!(InterruptLevel::current().is_task_context());
272
273 let local_data = &self.local_data;
274
275 // SAFETY: The `local_data` field will only be accessed by the current task in the task
276 // context, so the data won't be accessed concurrently.
277 &**unsafe { local_data.get() }
278 }
279
280 /// Returns a cloned `Arc<Task>`.
281 pub fn cloned(&self) -> Arc<Task> {
282 let ptr = self.0.as_ptr();
283
284 // SAFETY: The current task is always a valid task and it is always contained in an `Arc`.
285 unsafe { Arc::increment_strong_count(ptr) };
286
287 // SAFETY: We've increased the reference count in the current `Arc<Task>` above.
288 unsafe { Arc::from_raw(ptr) }
289 }
290}
291
292impl Deref for CurrentTask {
293 type Target = Task;
294
295 fn deref(&self) -> &Self::Target {
296 // SAFETY: The current task is always a valid task.
297 unsafe { self.0.as_ref() }
298 }
299}
300
301impl AsRef<Task> for CurrentTask {
302 fn as_ref(&self) -> &Task {
303 self
304 }
305}
306
307impl Borrow<Task> for CurrentTask {
308 fn borrow(&self) -> &Task {
309 self
310 }
311}
312
313/// A trait that provides methods to manipulate the task context.
314pub(crate) trait TaskContextApi {
315 /// Sets the instruction pointer.
316 fn set_instruction_pointer(&mut self, ip: usize);
317
318 /// Sets the stack pointer.
319 fn set_stack_pointer(&mut self, sp: usize);
320}
321
322#[cfg(ktest)]
323mod test {
324 use crate::prelude::*;
325
326 #[ktest]
327 fn create_task() {
328 #[expect(clippy::eq_op)]
329 let task = || {
330 assert_eq!(1, 1);
331 };
332 let task = Arc::new(
333 crate::task::TaskOptions::new(task)
334 .data(())
335 .build()
336 .unwrap(),
337 );
338 task.run();
339 }
340
341 #[ktest]
342 fn spawn_task() {
343 #[expect(clippy::eq_op)]
344 let task = || {
345 assert_eq!(1, 1);
346 };
347 let _ = crate::task::TaskOptions::new(task).data(()).spawn();
348 }
349}