use core::sync::atomic::{AtomicBool, Ordering};
use align_ext::AlignExt;
use log::debug;
#[cfg(feature = "intel_tdx")]
use tdx_guest::tdcall;
use trapframe::TrapFrame;
use super::ex_table::ExTable;
#[cfg(feature = "intel_tdx")]
use crate::arch::{cpu::VIRTUALIZATION_EXCEPTION, tdx_guest::handle_virtual_exception};
use crate::{
cpu::{CpuException, CpuExceptionInfo, PageFaultErrorCode, PAGE_FAULT},
cpu_local,
mm::{
kspace::{KERNEL_PAGE_TABLE, LINEAR_MAPPING_BASE_VADDR, LINEAR_MAPPING_VADDR_RANGE},
page_prop::{CachePolicy, PageProperty},
PageFlags, PrivilegedPageFlags as PrivFlags, MAX_USERSPACE_VADDR, PAGE_SIZE,
},
task::current_task,
trap::call_irq_callback_functions,
};
cpu_local! {
static IS_KERNEL_INTERRUPTED: AtomicBool = AtomicBool::new(false);
}
pub fn is_kernel_interrupted() -> bool {
IS_KERNEL_INTERRUPTED.load(Ordering::Acquire)
}
#[no_mangle]
extern "sysv64" fn trap_handler(f: &mut TrapFrame) {
if CpuException::is_cpu_exception(f.trap_num as u16) {
match CpuException::to_cpu_exception(f.trap_num as u16).unwrap() {
#[cfg(feature = "intel_tdx")]
&VIRTUALIZATION_EXCEPTION => {
let ve_info = tdcall::get_veinfo().expect("#VE handler: fail to get VE info\n");
handle_virtual_exception(f, &ve_info);
}
&PAGE_FAULT => {
let page_fault_addr = x86_64::registers::control::Cr2::read().as_u64();
if (0..MAX_USERSPACE_VADDR).contains(&(page_fault_addr as usize)) {
handle_user_page_fault(f, page_fault_addr);
} else {
handle_kernel_page_fault(f, page_fault_addr);
}
}
exception => {
panic!(
"Cannot handle kernel cpu exception:{:?}. Error code:{:x?}; Trapframe:{:#x?}.",
exception, f.error_code, f
);
}
}
} else {
IS_KERNEL_INTERRUPTED.store(true, Ordering::Release);
call_irq_callback_functions(f);
IS_KERNEL_INTERRUPTED.store(false, Ordering::Release);
}
}
fn handle_user_page_fault(f: &mut TrapFrame, page_fault_addr: u64) {
let current_task = current_task().unwrap();
let user_space = current_task
.user_space()
.expect("the user space is missing when a page fault from the user happens.");
let info = CpuExceptionInfo {
page_fault_addr: page_fault_addr as usize,
id: f.trap_num,
error_code: f.error_code,
};
let res = user_space.vm_space().handle_page_fault(&info);
if res.is_ok() {
return;
}
if let Some(addr) = ExTable::find_recovery_inst_addr(f.rip) {
f.rip = addr;
} else {
panic!("Cannot handle user page fault; Trapframe:{:#x?}.", f);
}
}
fn handle_kernel_page_fault(f: &TrapFrame, page_fault_vaddr: u64) {
let error_code = PageFaultErrorCode::from_bits_truncate(f.error_code);
debug!(
"kernel page fault: address {:?}, error code {:?}",
page_fault_vaddr as *const (), error_code
);
assert!(
LINEAR_MAPPING_VADDR_RANGE.contains(&(page_fault_vaddr as usize)),
"kernel page fault: the address is outside the range of the linear mapping",
);
const SUPPORTED_ERROR_CODES: PageFaultErrorCode = PageFaultErrorCode::PRESENT
.union(PageFaultErrorCode::WRITE)
.union(PageFaultErrorCode::INSTRUCTION);
assert!(
SUPPORTED_ERROR_CODES.contains(error_code),
"kernel page fault: the error code is not supported",
);
assert!(
!error_code.contains(PageFaultErrorCode::INSTRUCTION),
"kernel page fault: the direct mapping cannot be executed",
);
assert!(
!error_code.contains(PageFaultErrorCode::PRESENT),
"kernel page fault: the direct mapping already exists",
);
let page_table = KERNEL_PAGE_TABLE
.get()
.expect("kernel page fault: the kernel page table is not initialized");
let vaddr = (page_fault_vaddr as usize).align_down(PAGE_SIZE);
let paddr = vaddr - LINEAR_MAPPING_BASE_VADDR;
unsafe {
page_table
.map(
&(vaddr..vaddr + PAGE_SIZE),
&(paddr..paddr + PAGE_SIZE),
PageProperty {
flags: PageFlags::RW,
cache: CachePolicy::Uncacheable,
#[cfg(not(feature = "intel_tdx"))]
priv_flags: PrivFlags::GLOBAL,
#[cfg(feature = "intel_tdx")]
priv_flags: PrivFlags::SHARED | PrivFlags::GLOBAL,
},
)
.unwrap();
}
}