ostd/task/mod.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
// SPDX-License-Identifier: MPL-2.0
//! Tasks are the unit of code execution.
pub mod atomic_mode;
mod kernel_stack;
mod preempt;
mod processor;
pub mod scheduler;
mod utils;
use core::{
any::Any,
borrow::Borrow,
cell::{Cell, SyncUnsafeCell},
ops::Deref,
ptr::NonNull,
sync::atomic::AtomicBool,
};
use kernel_stack::KernelStack;
use processor::current_task;
use spin::Once;
use utils::ForceSync;
pub use self::{
preempt::{disable_preempt, halt_cpu, DisabledPreemptGuard},
scheduler::info::{AtomicCpuId, TaskScheduleInfo},
};
use crate::{arch::task::TaskContext, prelude::*, trap::in_interrupt_context};
static PRE_SCHEDULE_HANDLER: Once<fn()> = Once::new();
static POST_SCHEDULE_HANDLER: Once<fn()> = Once::new();
/// Injects a handler to be executed before scheduling.
pub fn inject_pre_schedule_handler(handler: fn()) {
PRE_SCHEDULE_HANDLER.call_once(|| handler);
}
/// Injects a handler to be executed after scheduling.
pub fn inject_post_schedule_handler(handler: fn()) {
POST_SCHEDULE_HANDLER.call_once(|| handler);
}
/// A task that executes a function to the end.
///
/// Each task is associated with per-task data and an optional user space.
/// If having a user space, the task can switch to the user space to
/// execute user code. Multiple tasks can share a single user space.
#[derive(Debug)]
pub struct Task {
#[expect(clippy::type_complexity)]
func: ForceSync<Cell<Option<Box<dyn FnOnce() + Send>>>>,
data: Box<dyn Any + Send + Sync>,
local_data: ForceSync<Box<dyn Any + Send>>,
ctx: SyncUnsafeCell<TaskContext>,
/// kernel stack, note that the top is SyscallFrame/TrapFrame
kstack: KernelStack,
/// If we have switched this task to a CPU.
///
/// This is to enforce not context switching to an already running task.
/// See [`processor::switch_to_task`] for more details.
switched_to_cpu: AtomicBool,
schedule_info: TaskScheduleInfo,
}
impl Task {
/// Gets the current task.
///
/// It returns `None` if the function is called in the bootstrap context.
pub fn current() -> Option<CurrentTask> {
let current_task = current_task()?;
// SAFETY: `current_task` is the current task.
Some(unsafe { CurrentTask::new(current_task) })
}
pub(super) fn ctx(&self) -> &SyncUnsafeCell<TaskContext> {
&self.ctx
}
/// Yields execution so that another task may be scheduled.
///
/// Note that this method cannot be simply named "yield" as the name is
/// a Rust keyword.
#[track_caller]
pub fn yield_now() {
scheduler::yield_now()
}
/// Kicks the task scheduler to run the task.
///
/// BUG: This method highly depends on the current scheduling policy.
#[track_caller]
pub fn run(self: &Arc<Self>) {
scheduler::run_new_task(self.clone());
}
/// Returns the task data.
pub fn data(&self) -> &Box<dyn Any + Send + Sync> {
&self.data
}
/// Get the attached scheduling information.
pub fn schedule_info(&self) -> &TaskScheduleInfo {
&self.schedule_info
}
}
/// Options to create or spawn a new task.
pub struct TaskOptions {
func: Option<Box<dyn FnOnce() + Send>>,
data: Option<Box<dyn Any + Send + Sync>>,
local_data: Option<Box<dyn Any + Send>>,
}
impl TaskOptions {
/// Creates a set of options for a task.
pub fn new<F>(func: F) -> Self
where
F: FnOnce() + Send + 'static,
{
Self {
func: Some(Box::new(func)),
data: None,
local_data: None,
}
}
/// Sets the function that represents the entry point of the task.
pub fn func<F>(mut self, func: F) -> Self
where
F: Fn() + Send + 'static,
{
self.func = Some(Box::new(func));
self
}
/// Sets the data associated with the task.
pub fn data<T>(mut self, data: T) -> Self
where
T: Any + Send + Sync,
{
self.data = Some(Box::new(data));
self
}
/// Sets the local data associated with the task.
pub fn local_data<T>(mut self, data: T) -> Self
where
T: Any + Send,
{
self.local_data = Some(Box::new(data));
self
}
/// Builds a new task without running it immediately.
pub fn build(self) -> Result<Task> {
// All tasks will enter this function. It is meant to execute the `task_fn` in `Task`.
//
// We provide an assembly wrapper for this function as the end of call stack so we
// have to disable name mangling for it.
#[no_mangle]
extern "C" fn kernel_task_entry() -> ! {
// SAFETY: The new task is switched on a CPU for the first time, `after_switching_to`
// hasn't been called yet.
unsafe { processor::after_switching_to() };
let current_task = Task::current()
.expect("no current task, it should have current task in kernel task entry");
// SAFETY: The `func` field will only be accessed by the current task in the task
// context, so the data won't be accessed concurrently.
let task_func = unsafe { current_task.func.get() };
let task_func = task_func
.take()
.expect("task function is `None` when trying to run");
task_func();
// Manually drop all the on-stack variables to prevent memory leakage!
// This is needed because `scheduler::exit_current()` will never return.
//
// However, `current_task` _borrows_ the current task without holding
// an extra reference count. So we do nothing here.
scheduler::exit_current();
}
let kstack = KernelStack::new_with_guard_page()?;
let mut ctx = TaskContext::new();
ctx.set_instruction_pointer(crate::arch::task::kernel_task_entry_wrapper as usize);
// We should reserve space for the return address in the stack, otherwise
// we will write across the page boundary due to the implementation of
// the context switch.
//
// According to the System V AMD64 ABI, the stack pointer should be aligned
// to at least 16 bytes. And a larger alignment is needed if larger arguments
// are passed to the function. The `kernel_task_entry` function does not
// have any arguments, so we only need to align the stack pointer to 16 bytes.
ctx.set_stack_pointer(kstack.end_vaddr() - 16);
let new_task = Task {
func: ForceSync::new(Cell::new(self.func)),
data: self.data.unwrap_or_else(|| Box::new(())),
local_data: ForceSync::new(self.local_data.unwrap_or_else(|| Box::new(()))),
ctx: SyncUnsafeCell::new(ctx),
kstack,
schedule_info: TaskScheduleInfo {
cpu: AtomicCpuId::default(),
},
switched_to_cpu: AtomicBool::new(false),
};
Ok(new_task)
}
/// Builds a new task and runs it immediately.
#[track_caller]
pub fn spawn(self) -> Result<Arc<Task>> {
let task = Arc::new(self.build()?);
task.run();
Ok(task)
}
}
/// The current task.
///
/// This type is not `Send`, so it cannot outlive the current task.
///
/// This type is also not `Sync`, so it can provide access to the local data of the current task.
#[derive(Debug)]
pub struct CurrentTask(NonNull<Task>);
// The intern `NonNull<Task>` contained by `CurrentTask` implies that `CurrentTask` is `!Send` and
// `!Sync`. But it is still good to do this explicitly because these properties are key for
// soundness.
impl !Send for CurrentTask {}
impl !Sync for CurrentTask {}
impl CurrentTask {
/// # Safety
///
/// The caller must ensure that `task` is the current task.
unsafe fn new(task: NonNull<Task>) -> Self {
Self(task)
}
/// Returns the local data of the current task.
///
/// Note that the local data is only accessible in the task context. Although there is a
/// current task in the non-task context (e.g. IRQ handlers), access to the local data is
/// forbidden as it may cause soundness problems.
///
/// # Panics
///
/// This method will panic if called in a non-task context.
pub fn local_data(&self) -> &(dyn Any + Send) {
assert!(!in_interrupt_context());
let local_data = &self.local_data;
// SAFETY: The `local_data` field will only be accessed by the current task in the task
// context, so the data won't be accessed concurrently.
&**unsafe { local_data.get() }
}
/// Returns a cloned `Arc<Task>`.
pub fn cloned(&self) -> Arc<Task> {
let ptr = self.0.as_ptr();
// SAFETY: The current task is always a valid task and it is always contained in an `Arc`.
unsafe { Arc::increment_strong_count(ptr) };
// SAFETY: We've increased the reference count in the current `Arc<Task>` above.
unsafe { Arc::from_raw(ptr) }
}
}
impl Deref for CurrentTask {
type Target = Task;
fn deref(&self) -> &Self::Target {
// SAFETY: The current task is always a valid task.
unsafe { self.0.as_ref() }
}
}
impl AsRef<Task> for CurrentTask {
fn as_ref(&self) -> &Task {
self
}
}
impl Borrow<Task> for CurrentTask {
fn borrow(&self) -> &Task {
self
}
}
/// A trait that provides methods to manipulate the task context.
pub(crate) trait TaskContextApi {
/// Sets the instruction pointer.
fn set_instruction_pointer(&mut self, ip: usize);
/// Sets the stack pointer.
fn set_stack_pointer(&mut self, sp: usize);
}
#[cfg(ktest)]
mod test {
use crate::prelude::*;
#[ktest]
fn create_task() {
#[expect(clippy::eq_op)]
let task = || {
assert_eq!(1, 1);
};
let task = Arc::new(
crate::task::TaskOptions::new(task)
.data(())
.build()
.unwrap(),
);
task.run();
}
#[ktest]
fn spawn_task() {
#[expect(clippy::eq_op)]
let task = || {
assert_eq!(1, 1);
};
let _ = crate::task::TaskOptions::new(task).data(()).spawn();
}
}