ostd/trap/irq.rs
1// SPDX-License-Identifier: MPL-2.0
2//! IRQ line and IRQ guards.
3use vstd::prelude::*;
4
5use core::{fmt::Debug, ops::Deref};
6
7// use id_alloc::IdAlloc;
8// use spin::Once;
9
10use crate::{
11 arch::{
12 irq::{self/* , IrqRemapping, IRQ_NUM_MAX, IRQ_NUM_MIN */},
13 /* trap::TrapFrame, */
14 },
15 prelude::*,
16 sync::{GuardTransfer, RwLock, SpinLock, /* WriteIrqDisabled */},
17 // task::atomic_mode::InAtomicMode,
18 // Error,
19};
20/*
21/// A type alias for the IRQ callback function.
22pub type IrqCallbackFunction = dyn Fn(&TrapFrame) + Sync + Send + 'static;
23
24/// An Interrupt ReQuest (IRQ) line.
25///
26/// Users can use [`alloc`] or [`alloc_specific`] to allocate a (specific) IRQ line.
27///
28/// The IRQ number is guaranteed to be an external IRQ number and users can use [`on_active`] to
29/// safely register callback functions on this IRQ line. When the IRQ line is dropped, all the
30/// registered callbacks will be unregistered automatically.
31///
32/// [`alloc`]: Self::alloc
33/// [`alloc_specific`]: Self::alloc_specific
34/// [`on_active`]: Self::on_active
35#[derive(Debug)]
36#[must_use]
37pub struct IrqLine {
38 inner: Arc<InnerHandle>,
39 callbacks: Vec<CallbackHandle>,
40}
41
42impl IrqLine {
43 /// Allocates an available IRQ line.
44 pub fn alloc() -> Result<Self> {
45 get_or_init_allocator()
46 .lock()
47 .alloc()
48 .map(|id| Self::new(id as u8))
49 .ok_or(Error::NotEnoughResources)
50 }
51
52 /// Allocates a specific IRQ line.
53 pub fn alloc_specific(irq_num: u8) -> Result<Self> {
54 get_or_init_allocator()
55 .lock()
56 .alloc_specific((irq_num - IRQ_NUM_MIN) as usize)
57 .map(|id| Self::new(id as u8))
58 .ok_or(Error::NotEnoughResources)
59 }
60
61 fn new(index: u8) -> Self {
62 let inner = InnerHandle { index };
63 inner.remapping.init(index + IRQ_NUM_MIN);
64
65 Self {
66 inner: Arc::new(inner),
67 callbacks: Vec::new(),
68 }
69 }
70
71 /// Gets the IRQ number.
72 pub fn num(&self) -> u8 {
73 self.inner.index + IRQ_NUM_MIN
74 }
75
76 /// Registers a callback that will be invoked when the IRQ is active.
77 ///
78 /// For each IRQ line, multiple callbacks may be registered.
79 pub fn on_active<F>(&mut self, callback: F)
80 where
81 F: Fn(&TrapFrame) + Sync + Send + 'static,
82 {
83 let callback_handle = {
84 let callback_box = Box::new(callback);
85 let callback_addr = core::ptr::from_ref(&*callback_box).addr();
86
87 let mut callbacks = self.inner.callbacks.write();
88 callbacks.push(callback_box);
89
90 CallbackHandle {
91 irq_index: self.inner.index,
92 callback_addr,
93 }
94 };
95
96 self.callbacks.push(callback_handle);
97 }
98
99 /// Checks if there are no registered callbacks.
100 pub fn is_empty(&self) -> bool {
101 self.callbacks.is_empty()
102 }
103
104 /// Gets the remapping index of the IRQ line.
105 ///
106 /// This method will return `None` if interrupt remapping is disabled or
107 /// not supported by the architecture.
108 pub fn remapping_index(&self) -> Option<u16> {
109 self.inner.remapping.remapping_index()
110 }
111}
112
113impl Clone for IrqLine {
114 fn clone(&self) -> Self {
115 Self {
116 inner: self.inner.clone(),
117 callbacks: Vec::new(),
118 }
119 }
120}
121
122struct Inner {
123 callbacks: RwLock<Vec<Box<IrqCallbackFunction>>, WriteIrqDisabled>,
124 remapping: IrqRemapping,
125}
126
127impl Inner {
128 const fn new() -> Self {
129 Self {
130 callbacks: RwLock::new(Vec::new()),
131 remapping: IrqRemapping::new(),
132 }
133 }
134}
135
136const NUMBER_OF_IRQS: usize = (IRQ_NUM_MAX - IRQ_NUM_MIN) as usize + 1;
137
138static INNERS: [Inner; NUMBER_OF_IRQS] = [const { Inner::new() }; NUMBER_OF_IRQS];
139static ALLOCATOR: Once<SpinLock<IdAlloc>> = Once::new();
140
141fn get_or_init_allocator() -> &'static SpinLock<IdAlloc> {
142 ALLOCATOR.call_once(|| SpinLock::new(IdAlloc::with_capacity(NUMBER_OF_IRQS)))
143}
144
145/// A handle for an allocated IRQ line.
146///
147/// When the handle is dropped, the IRQ line will be released automatically.
148#[must_use]
149#[derive(Debug)]
150struct InnerHandle {
151 index: u8,
152}
153
154impl Deref for InnerHandle {
155 type Target = Inner;
156
157 fn deref(&self) -> &Self::Target {
158 &INNERS[self.index as usize]
159 }
160}
161
162impl Drop for InnerHandle {
163 fn drop(&mut self) {
164 ALLOCATOR.get().unwrap().lock().free(self.index as usize);
165 }
166}
167
168/// A handle for a registered callback on an IRQ line.
169///
170/// When the handle is dropped, the callback will be unregistered automatically.
171#[must_use]
172#[derive(Debug)]
173struct CallbackHandle {
174 irq_index: u8,
175 callback_addr: usize,
176}
177
178impl Drop for CallbackHandle {
179 fn drop(&mut self) {
180 let mut callbacks = INNERS[self.irq_index as usize].callbacks.write();
181
182 let pos = callbacks
183 .iter()
184 .position(|element| core::ptr::from_ref(&**element).addr() == self.callback_addr);
185 let _ = callbacks.swap_remove(pos.unwrap());
186 }
187}
188
189pub(super) fn process_top_half(trap_frame: &TrapFrame, irq_num: usize) {
190 let inner = &INNERS[irq_num - (IRQ_NUM_MIN as usize)];
191 for callback in &*inner.callbacks.read() {
192 callback(trap_frame);
193 }
194}*/
195
196// ####### IRQ Guards #######
197verus! {
198/// Disables all IRQs on the current CPU (i.e., locally).
199///
200/// This function returns a guard object, which will automatically enable local IRQs again when
201/// it is dropped. This function works correctly even when it is called in a _nested_ way.
202/// The local IRQs shall only be re-enabled when the most outer guard is dropped.
203///
204/// This function can play nicely with [`SpinLock`] as the type uses this function internally.
205/// One can invoke this function even after acquiring a spin lock. And the reversed order is also ok.
206///
207/// [`SpinLock`]: crate::sync::SpinLock
208///
209/// # Example
210///
211/// ```rust
212/// use ostd::trap;
213///
214/// {
215/// let _ = trap::irq::disable_local();
216/// todo!("do something when irqs are disabled");
217/// }
218/// ```
219pub fn disable_local() -> DisabledLocalIrqGuard {
220 DisabledLocalIrqGuard::new()
221}
222
223/// A guard for disabled local IRQs.
224#[clippy::has_significant_drop]
225#[must_use]
226#[derive(Debug)]
227pub struct DisabledLocalIrqGuard {
228 was_enabled: bool,
229}
230
231impl !Send for DisabledLocalIrqGuard {}
232
233// SAFETY: The guard disables local IRQs, which meets the first
234// sufficient condition for atomic mode.
235/* unsafe impl InAtomicMode for DisabledLocalIrqGuard {} */
236impl DisabledLocalIrqGuard {
237 #[verifier::external_body]
238 fn new() -> Self {
239 let was_enabled = irq::is_local_enabled();
240 if was_enabled {
241 irq::disable_local();
242 }
243 Self { was_enabled }
244 }
245}
246
247impl GuardTransfer for DisabledLocalIrqGuard {
248 #[verifier::external_body]
249 fn transfer_to(&mut self) -> Self {
250 let was_enabled = self.was_enabled;
251 self.was_enabled = false;
252 Self { was_enabled }
253 }
254}
255}
256/*impl Drop for DisabledLocalIrqGuard {
257 fn drop(&mut self) {
258 if self.was_enabled {
259 irq::enable_local();
260 }
261 }
262} */
263
264#[cfg(ktest)]
265mod test {
266 use super::*;
267
268 const IRQ_NUM: u8 = 64;
269 const IRQ_INDEX: usize = (IRQ_NUM - IRQ_NUM_MIN) as usize;
270
271 #[ktest]
272 fn alloc_and_free_irq() {
273 let irq_line = IrqLine::alloc_specific(IRQ_NUM).unwrap();
274 assert!(IrqLine::alloc_specific(IRQ_NUM).is_err());
275
276 let irq_line_cloned = irq_line.clone();
277 assert!(IrqLine::alloc_specific(IRQ_NUM).is_err());
278
279 drop(irq_line);
280 assert!(IrqLine::alloc_specific(IRQ_NUM).is_err());
281
282 drop(irq_line_cloned);
283 assert!(IrqLine::alloc_specific(IRQ_NUM).is_ok());
284 }
285
286 #[ktest]
287 fn register_and_unregister_callback() {
288 let mut irq_line = IrqLine::alloc_specific(IRQ_NUM).unwrap();
289 let mut irq_line_cloned = irq_line.clone();
290
291 assert_eq!(INNERS[IRQ_INDEX].callbacks.read().len(), 0);
292
293 irq_line.on_active(|_| {});
294 assert_eq!(INNERS[IRQ_INDEX].callbacks.read().len(), 1);
295
296 irq_line_cloned.on_active(|_| {});
297 assert_eq!(INNERS[IRQ_INDEX].callbacks.read().len(), 2);
298
299 irq_line_cloned.on_active(|_| {});
300 assert_eq!(INNERS[IRQ_INDEX].callbacks.read().len(), 3);
301
302 drop(irq_line);
303 assert_eq!(INNERS[IRQ_INDEX].callbacks.read().len(), 2);
304
305 drop(irq_line_cloned);
306 assert_eq!(INNERS[IRQ_INDEX].callbacks.read().len(), 0);
307 }
308}