ostd/arch/x86/cpu/context/
mod.rs

1// SPDX-License-Identifier: MPL-2.0
2
3//! CPU execution context control.
4
5use alloc::boxed::Box;
6use core::arch::x86_64::{_fxrstor64, _fxsave64, _xrstor64, _xsave64};
7
8use bitflags::bitflags;
9use cfg_if::cfg_if;
10use log::debug;
11use ostd_pod::Pod;
12use spin::Once;
13use x86::bits64::segmentation::wrfsbase;
14use x86_64::registers::{
15    control::{Cr0, Cr0Flags},
16    rflags::RFlags,
17    xcontrol::XCr0,
18};
19
20use crate::{
21    arch::{
22        irq::HwIrqLine,
23        trap::{RawUserContext, TrapFrame},
24    },
25    cpu::PrivilegeLevel,
26    irq::call_irq_callback_functions,
27    mm::Vaddr,
28    user::{ReturnReason, UserContextApi, UserContextApiInternal},
29};
30
31cfg_if! {
32    if #[cfg(feature = "cvm_guest")] {
33        mod tdx;
34
35        use tdx::VirtualizationExceptionHandler;
36    }
37}
38
39/// Userspace CPU context, including general-purpose registers and exception information.
40#[derive(Clone, Default, Debug)]
41#[repr(C)]
42pub struct UserContext {
43    user_context: RawUserContext,
44    exception: Option<CpuException>,
45}
46
47/// General registers.
48#[derive(Debug, Default, Clone, Copy, Eq, PartialEq)]
49#[repr(C)]
50#[expect(missing_docs)]
51pub struct GeneralRegs {
52    pub rax: usize,
53    pub rbx: usize,
54    pub rcx: usize,
55    pub rdx: usize,
56    pub rsi: usize,
57    pub rdi: usize,
58    pub rbp: usize,
59    pub rsp: usize,
60    pub r8: usize,
61    pub r9: usize,
62    pub r10: usize,
63    pub r11: usize,
64    pub r12: usize,
65    pub r13: usize,
66    pub r14: usize,
67    pub r15: usize,
68    pub rip: usize,
69    pub rflags: usize,
70    pub fsbase: usize,
71    pub gsbase: usize,
72}
73
74/// Architectural CPU exceptions (x86-64 vectors 0-31).
75///
76/// For the authoritative specification of each vector, see the
77/// Intel® 64 and IA-32 Architectures Software Developer’s Manual,
78/// Volume 3 “System Programming Guide”, Chapter 6 “Interrupt and Exception
79/// Handling”, in particular Section 6.15 “Exception and Interrupt
80/// Reference”.
81///
82/// Every enum variant corresponds to one exception defined by the
83/// Intel/AMD architecture.
84/// Variants that naturally carry an error code (or other error information)
85/// expose it through their associated data fields.
86//
87// TODO: Some exceptions (like `AlignmentCheck`) also push an
88//       error code onto the stack, but that detail is not yet represented
89//       in this type definition.
90#[derive(Debug, Clone, Copy, PartialEq, Eq)]
91pub enum CpuException {
92    ///  0 – #DE  Divide-by-zero error.
93    DivisionError,
94    ///  1 – #DB  Debug.
95    Debug,
96    ///  2 – NMI  Non-maskable interrupt.
97    NonMaskableInterrupt,
98    ///  3 – #BP  Breakpoint (INT3).
99    BreakPoint,
100    ///  4 – #OF  Overflow.
101    Overflow,
102    ///  5 – #BR  Bound-range exceeded.
103    BoundRangeExceeded,
104    ///  6 – #UD  Invalid or undefined opcode.
105    InvalidOpcode,
106    ///  7 – #NM  Device not available (FPU/MMX/SSE disabled).
107    DeviceNotAvailable,
108    ///  8 – #DF  Double fault (always pushes an error code of 0).
109    DoubleFault,
110    ///  9 – Coprocessor segment overrun (reserved on modern CPUs).
111    CoprocessorSegmentOverrun,
112    /// 10 – #TS  Invalid TSS.
113    InvalidTss(SelectorErrorCode),
114    /// 11 – #NP  Segment not present.
115    SegmentNotPresent(SelectorErrorCode),
116    /// 12 – #SS  Stack-segment fault.
117    StackSegmentFault(SelectorErrorCode),
118    /// 13 – #GP  General protection fault
119    GeneralProtectionFault(Option<SelectorErrorCode>),
120    /// 14 – #PF  Page fault.
121    PageFault(RawPageFaultInfo),
122    // 15: Reserved
123    /// 16 – #MF  x87 floating-point exception.
124    X87FloatingPointException,
125    /// 17 – #AC  Alignment check.
126    AlignmentCheck,
127    /// 18 – #MC  Machine check.
128    MachineCheck,
129    /// 19 – #XM / #XF  SIMD/FPU floating-point exception.
130    SIMDFloatingPointException,
131    /// 20 – #VE  Virtualization exception.
132    VirtualizationException,
133    /// 21 – #CP  Control protection exception (CET).
134    ControlProtectionException,
135    // 22-27: Reserved
136    /// 28 – #HV  Hypervisor injection exception.
137    HypervisorInjectionException,
138    /// 29 – #VC  VMM communication exception (SEV-ES GHCB).
139    VMMCommunicationException,
140    /// 30 – #SX  Security exception.
141    SecurityException,
142    // 31: Reserved
143    /// Catch-all for reserved or undefined vector numbers.
144    Reserved,
145}
146
147impl CpuException {
148    pub(crate) fn new(trap_num: usize, error_code: usize) -> Option<Self> {
149        let exception = match trap_num {
150            0 => Self::DivisionError,
151            1 => Self::Debug,
152            2 => Self::NonMaskableInterrupt,
153            3 => Self::BreakPoint,
154            4 => Self::Overflow,
155            5 => Self::BoundRangeExceeded,
156            6 => Self::InvalidOpcode,
157            7 => Self::DeviceNotAvailable,
158            8 => {
159                // A double fault will always generate an error code with a value of zero.
160                debug_assert_eq!(error_code, 0);
161                Self::DoubleFault
162            }
163            9 => Self::CoprocessorSegmentOverrun,
164            10 => Self::InvalidTss(SelectorErrorCode(error_code)),
165            11 => Self::SegmentNotPresent(SelectorErrorCode(error_code)),
166            12 => Self::StackSegmentFault(SelectorErrorCode(error_code)),
167            13 => {
168                let error_code = if error_code == 0 {
169                    None
170                } else {
171                    Some(SelectorErrorCode(error_code))
172                };
173                Self::GeneralProtectionFault(error_code)
174            }
175            14 => {
176                let page_fault_addr = x86_64::registers::control::Cr2::read_raw() as usize;
177                Self::PageFault(RawPageFaultInfo {
178                    error_code: PageFaultErrorCode::from_bits(error_code).unwrap(),
179                    addr: page_fault_addr,
180                })
181            }
182            // Reserved 15
183            16 => Self::X87FloatingPointException,
184            17 => Self::AlignmentCheck,
185            18 => Self::MachineCheck,
186            19 => Self::SIMDFloatingPointException,
187            20 => Self::VirtualizationException,
188            21 => Self::ControlProtectionException,
189            // Reserved 22-27
190            28 => Self::HypervisorInjectionException,
191            29 => Self::VMMCommunicationException,
192            30 => Self::SecurityException,
193            // Reserved 31
194            15 | 22..=27 | 31 => Self::Reserved,
195            _ => return None,
196        };
197
198        Some(exception)
199    }
200
201    const fn type_(&self) -> CpuExceptionType {
202        match self {
203            Self::Debug => CpuExceptionType::FaultOrTrap,
204            Self::NonMaskableInterrupt => CpuExceptionType::Interrupt,
205            Self::BreakPoint | Self::Overflow => CpuExceptionType::Trap,
206            Self::DoubleFault | Self::MachineCheck => CpuExceptionType::Abort,
207            Self::Reserved => CpuExceptionType::Reserved,
208            _ => CpuExceptionType::Fault,
209        }
210    }
211
212    pub(crate) const fn is_cpu_exception(trap_num: usize) -> bool {
213        trap_num <= 31
214    }
215}
216
217/// Selector error code.
218///
219/// Reference: <https://wiki.osdev.org/Exceptions#Selector_Error_Code>.
220#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
221pub struct SelectorErrorCode(usize);
222
223impl UserContext {
224    /// Returns a reference to the general registers.
225    pub fn general_regs(&self) -> &GeneralRegs {
226        &self.user_context.general
227    }
228
229    /// Returns a mutable reference to the general registers
230    pub fn general_regs_mut(&mut self) -> &mut GeneralRegs {
231        &mut self.user_context.general
232    }
233
234    /// Takes the CPU exception out.
235    pub fn take_exception(&mut self) -> Option<CpuException> {
236        self.exception.take()
237    }
238
239    /// Sets the thread-local storage pointer.
240    pub fn set_tls_pointer(&mut self, tls: usize) {
241        self.set_fsbase(tls)
242    }
243
244    /// Gets the thread-local storage pointer.
245    pub fn tls_pointer(&self) -> usize {
246        self.fsbase()
247    }
248
249    /// Activates the thread-local storage pointer for the current task.
250    ///
251    /// The method by itself is safe because the value of the TLS register won't affect kernel code.
252    /// But if the user program relies on the TLS pointer, make sure that the pointer is correctly
253    /// set when entering the user space.
254    pub fn activate_tls_pointer(&self) {
255        // In x86, context switching preserves `fsbase`, but `fsbase` won't be loaded at
256        // `UserContext::execute`, so it must be activated in advance.
257        //
258        // SAFETY: Setting `fsbase` won't affect kernel code.
259        unsafe { wrfsbase(self.fsbase() as u64) }
260    }
261}
262
263impl UserContextApiInternal for UserContext {
264    fn execute<F>(&mut self, mut has_kernel_event: F) -> ReturnReason
265    where
266        F: FnMut() -> bool,
267    {
268        // set interrupt flag so that in user mode it can receive external interrupts
269        // set ID flag which means cpu support CPUID instruction
270        self.user_context.general.rflags |= (RFlags::INTERRUPT_FLAG | RFlags::ID).bits() as usize;
271
272        const SYSCALL_TRAPNUM: usize = 0x100;
273
274        // Return when it is syscall or cpu exception type is Fault or Trap.
275        loop {
276            crate::task::scheduler::might_preempt();
277            self.user_context.run();
278
279            let exception =
280                CpuException::new(self.user_context.trap_num, self.user_context.error_code);
281            match exception {
282                #[cfg(feature = "cvm_guest")]
283                Some(CpuException::VirtualizationException) => {
284                    let ve_handler = VirtualizationExceptionHandler::new();
285                    // Check out the doc of `VirtualizationExceptionHandler::new` to
286                    // see why IRQs must enabled _after_ instantiating a `VirtualizationExceptionHandler`.
287                    crate::arch::irq::enable_local();
288                    ve_handler.handle(self);
289                }
290                Some(exception) if exception.type_().is_fault_or_trap() => {
291                    crate::arch::irq::enable_local();
292                    self.exception = Some(exception);
293                    return ReturnReason::UserException;
294                }
295                Some(exception) => {
296                    panic!(
297                        "cannot handle user CPU exception: {:?}, trapframe: {:?}",
298                        exception,
299                        self.as_trap_frame()
300                    );
301                }
302                None if self.user_context.trap_num == SYSCALL_TRAPNUM => {
303                    crate::arch::irq::enable_local();
304                    return ReturnReason::UserSyscall;
305                }
306                None => {
307                    call_irq_callback_functions(
308                        &self.as_trap_frame(),
309                        &HwIrqLine::new(self.as_trap_frame().trap_num as u8),
310                        PrivilegeLevel::User,
311                    );
312                    crate::arch::irq::enable_local();
313                }
314            }
315
316            if has_kernel_event() {
317                break ReturnReason::KernelEvent;
318            }
319        }
320    }
321
322    fn as_trap_frame(&self) -> TrapFrame {
323        TrapFrame {
324            rax: self.user_context.general.rax,
325            rbx: self.user_context.general.rbx,
326            rcx: self.user_context.general.rcx,
327            rdx: self.user_context.general.rdx,
328            rsi: self.user_context.general.rsi,
329            rdi: self.user_context.general.rdi,
330            rbp: self.user_context.general.rbp,
331            rsp: self.user_context.general.rsp,
332            r8: self.user_context.general.r8,
333            r9: self.user_context.general.r9,
334            r10: self.user_context.general.r10,
335            r11: self.user_context.general.r11,
336            r12: self.user_context.general.r12,
337            r13: self.user_context.general.r13,
338            r14: self.user_context.general.r14,
339            r15: self.user_context.general.r15,
340            _pad: 0,
341            trap_num: self.user_context.trap_num,
342            error_code: self.user_context.error_code,
343            rip: self.user_context.general.rip,
344            cs: 0,
345            rflags: self.user_context.general.rflags,
346        }
347    }
348}
349
350/// As Osdev Wiki defines(<https://wiki.osdev.org/Exceptions>):
351/// CPU exceptions are classified as:
352///
353/// Faults: These can be corrected and the program may continue as if nothing happened.
354///
355/// Traps: Traps are reported immediately after the execution of the trapping instruction.
356///
357/// Aborts: Some severe unrecoverable error.
358///
359/// But there exists some vector which are special. Vector 1 can be both fault or trap and vector 2 is interrupt.
360/// So here we also define FaultOrTrap and Interrupt
361#[derive(Copy, Clone, PartialEq, Eq, Debug)]
362pub enum CpuExceptionType {
363    /// CPU faults. Faults can be corrected, and the program may continue as if nothing happened.
364    Fault,
365    /// CPU traps. Traps are reported immediately after the execution of the trapping instruction
366    Trap,
367    /// Faults or traps
368    FaultOrTrap,
369    /// CPU interrupts
370    Interrupt,
371    /// Some severe unrecoverable error
372    Abort,
373    /// Reserved for future use
374    Reserved,
375}
376
377impl CpuExceptionType {
378    /// Returns whether this exception type is a fault or a trap.
379    pub fn is_fault_or_trap(self) -> bool {
380        match self {
381            CpuExceptionType::Trap | CpuExceptionType::Fault | CpuExceptionType::FaultOrTrap => {
382                true
383            }
384            CpuExceptionType::Abort | CpuExceptionType::Interrupt | CpuExceptionType::Reserved => {
385                false
386            }
387        }
388    }
389}
390
391/// Architecture-specific data reported with a page-fault exception.
392#[derive(Debug, Clone, Copy, PartialEq, Eq)]
393pub struct RawPageFaultInfo {
394    /// The error code pushed by the CPU for this page fault.
395    pub error_code: PageFaultErrorCode,
396    /// The linear (virtual) address that triggered the fault (contents of CR2).
397    pub addr: Vaddr,
398}
399
400bitflags! {
401    /// Page Fault error code. Following the Intel Architectures Software Developer's Manual Volume 3
402    pub struct PageFaultErrorCode : usize{
403        /// 0 if no translation for the linear address.
404        const PRESENT       = 1 << 0;
405        /// 1 if the access was a write.
406        const WRITE         = 1 << 1;
407        /// 1 if the access was a user-mode access.
408        const USER          = 1 << 2;
409        /// 1 if there is no translation for the linear address
410        /// because a reserved bit was set.
411        const RESERVED      = 1 << 3;
412        /// 1 if the access was an instruction fetch.
413        const INSTRUCTION   = 1 << 4;
414        /// 1 if the access was a data access to a linear address with a protection key for which
415        /// the protection-key rights registers disallow access.
416        const PROTECTION    = 1 << 5;
417        /// 1 if the access was a shadow-stack access.
418        const SHADOW_STACK  = 1 << 6;
419        /// 1 if there is no translation for the linear address using HLAT paging.
420        const HLAT          = 1 << 7;
421        /// 1 if the exception is unrelated to paging and resulted from violation of SGX-specific
422        /// access-control requirements.
423        const SGX           = 1 << 15;
424    }
425}
426
427impl UserContextApi for UserContext {
428    fn trap_number(&self) -> usize {
429        self.user_context.trap_num
430    }
431
432    fn trap_error_code(&self) -> usize {
433        self.user_context.error_code
434    }
435
436    fn set_instruction_pointer(&mut self, ip: usize) {
437        self.set_rip(ip);
438    }
439
440    fn set_stack_pointer(&mut self, sp: usize) {
441        self.set_rsp(sp)
442    }
443
444    fn stack_pointer(&self) -> usize {
445        self.rsp()
446    }
447
448    fn instruction_pointer(&self) -> usize {
449        self.rip()
450    }
451}
452
453macro_rules! cpu_context_impl_getter_setter {
454    ( $( [ $field: ident, $setter_name: ident] ),*) => {
455        impl UserContext {
456            $(
457                #[doc = concat!("Gets the value of ", stringify!($field))]
458                #[inline(always)]
459                pub fn $field(&self) -> usize {
460                    self.user_context.general.$field
461                }
462
463                #[doc = concat!("Sets the value of ", stringify!(field))]
464                #[inline(always)]
465                pub fn $setter_name(&mut self, $field: usize) {
466                    self.user_context.general.$field = $field;
467                }
468            )*
469        }
470    };
471}
472
473cpu_context_impl_getter_setter!(
474    [rax, set_rax],
475    [rbx, set_rbx],
476    [rcx, set_rcx],
477    [rdx, set_rdx],
478    [rsi, set_rsi],
479    [rdi, set_rdi],
480    [rbp, set_rbp],
481    [rsp, set_rsp],
482    [r8, set_r8],
483    [r9, set_r9],
484    [r10, set_r10],
485    [r11, set_r11],
486    [r12, set_r12],
487    [r13, set_r13],
488    [r14, set_r14],
489    [r15, set_r15],
490    [rip, set_rip],
491    [rflags, set_rflags],
492    [fsbase, set_fsbase],
493    [gsbase, set_gsbase]
494);
495
496/// The FPU context of user task.
497///
498/// This could be used for saving both legacy and modern state format.
499#[derive(Debug)]
500pub struct FpuContext {
501    xsave_area: Box<XSaveArea>,
502    area_size: usize,
503}
504
505impl FpuContext {
506    /// Creates a new FPU context.
507    pub fn new() -> Self {
508        let mut area_size = size_of::<FxSaveArea>();
509        if let Some(xsave_area_size) = XSAVE_AREA_SIZE.get() {
510            area_size = area_size.max(*xsave_area_size);
511        }
512
513        Self {
514            xsave_area: Box::new(XSaveArea::new()),
515            area_size,
516        }
517    }
518
519    /// Saves CPU's current FPU context to this instance.
520    pub fn save(&mut self) {
521        let mem_addr = self.as_bytes_mut().as_mut_ptr();
522
523        if XSTATE_MAX_FEATURES.is_completed() {
524            unsafe { _xsave64(mem_addr, XFEATURE_MASK_USER_RESTORE) };
525        } else {
526            unsafe { _fxsave64(mem_addr) };
527        }
528
529        debug!("Save FPU context");
530    }
531
532    /// Loads CPU's FPU context from this instance.
533    pub fn load(&mut self) {
534        let mem_addr = self.as_bytes().as_ptr();
535
536        if let Some(xstate_max_features) = XSTATE_MAX_FEATURES.get() {
537            let rs_mask = XFEATURE_MASK_USER_RESTORE & *xstate_max_features;
538
539            unsafe { _xrstor64(mem_addr, rs_mask) };
540        } else {
541            unsafe { _fxrstor64(mem_addr) };
542        }
543
544        debug!("Load FPU context");
545    }
546
547    /// Returns the FPU context as a byte slice.
548    pub fn as_bytes(&self) -> &[u8] {
549        &self.xsave_area.as_bytes()[..self.area_size]
550    }
551
552    /// Returns the FPU context as a mutable byte slice.
553    pub fn as_bytes_mut(&mut self) -> &mut [u8] {
554        &mut self.xsave_area.as_bytes_mut()[..self.area_size]
555    }
556}
557
558impl Default for FpuContext {
559    fn default() -> Self {
560        Self::new()
561    }
562}
563
564impl Clone for FpuContext {
565    fn clone(&self) -> Self {
566        let mut xsave_area = Box::new(XSaveArea::new());
567        xsave_area.fxsave_area = self.xsave_area.fxsave_area;
568        xsave_area.features = self.xsave_area.features;
569        xsave_area.compaction = self.xsave_area.compaction;
570        if self.area_size > size_of::<FxSaveArea>() {
571            let len = self.area_size - size_of::<FxSaveArea>() - 64;
572            xsave_area.extended_state_area[..len]
573                .copy_from_slice(&self.xsave_area.extended_state_area[..len]);
574        }
575
576        Self {
577            xsave_area,
578            area_size: self.area_size,
579        }
580    }
581}
582
583/// The modern FPU context format (as saved and restored by the `XSAVE` and `XRSTOR` instructions).
584#[repr(C)]
585#[repr(align(64))]
586#[derive(Clone, Copy, Debug, Pod)]
587struct XSaveArea {
588    fxsave_area: FxSaveArea,
589    features: u64,
590    compaction: u64,
591    reserved: [u64; 6],
592    extended_state_area: [u8; MAX_XSAVE_AREA_SIZE - size_of::<FxSaveArea>() - 64],
593}
594
595impl XSaveArea {
596    fn new() -> Self {
597        let features = if let Some(xstate_max_features) = XSTATE_MAX_FEATURES.get() {
598            XCr0::read().bits() & *xstate_max_features
599        } else {
600            0
601        };
602
603        let mut xsave_area = Self::new_zeroed();
604        // Set the initial values for the FPU context. Refer to Intel SDM, Table 11-1:
605        // "IA-32 and Intel® 64 Processor States Following Power-up, Reset, or INIT (Contd.)".
606        xsave_area.fxsave_area.control = 0x037F;
607        xsave_area.fxsave_area.tag = 0xFFFF;
608        xsave_area.fxsave_area.mxcsr = 0x1F80;
609        xsave_area.features = features;
610
611        xsave_area
612    }
613}
614
615/// The legacy SSE/MMX FPU context format (as saved and restored by the `FXSAVE` and `FXRSTOR` instructions).
616#[repr(C)]
617#[repr(align(16))]
618#[derive(Clone, Copy, Debug, Pod)]
619struct FxSaveArea {
620    control: u16,         // x87 FPU Control Word
621    status: u16,          // x87 FPU Status Word
622    tag: u16,             // x87 FPU Tag Word
623    op: u16,              // x87 FPU Last Instruction Opcode
624    ip: u32,              // x87 FPU Instruction Pointer Offset
625    cs: u32,              // x87 FPU Instruction Pointer Selector
626    dp: u32,              // x87 FPU Instruction Operand (Data) Pointer Offset
627    ds: u32,              // x87 FPU Instruction Operand (Data) Pointer Selector
628    mxcsr: u32,           // MXCSR Register State
629    mxcsr_mask: u32,      // MXCSR Mask
630    st_space: [u32; 32], // x87 FPU or MMX technology registers (ST0-ST7 or MM0-MM7, 128 bits per field)
631    xmm_space: [u32; 64], // XMM registers (XMM0-XMM15, 128 bits per field)
632    padding: [u32; 12],  // Padding
633    reserved: [u32; 12], // Software reserved
634}
635
636/// The XSTATE features (user & supervisor) supported by the processor.
637static XSTATE_MAX_FEATURES: Once<u64> = Once::new();
638
639/// Mask features which are restored when returning to user space.
640///
641/// X87 | SSE | AVX | OPMASK | ZMM_HI256 | HI16_ZMM
642const XFEATURE_MASK_USER_RESTORE: u64 = 0b1110_0111;
643
644/// The real size in bytes of the XSAVE area containing all states enabled by XCRO | IA32_XSS.
645static XSAVE_AREA_SIZE: Once<usize> = Once::new();
646
647/// The max size in bytes of the XSAVE area.
648const MAX_XSAVE_AREA_SIZE: usize = 4096;
649
650pub(in crate::arch) fn enable_essential_features() {
651    use super::extension::{IsaExtensions, has_extensions};
652
653    if has_extensions(IsaExtensions::XSAVE) {
654        XSTATE_MAX_FEATURES.call_once(|| super::cpuid::query_xstate_max_features().unwrap());
655        XSAVE_AREA_SIZE.call_once(|| {
656            let xsave_area_size = super::cpuid::query_xsave_area_size().unwrap() as usize;
657            assert!(xsave_area_size <= MAX_XSAVE_AREA_SIZE);
658            xsave_area_size
659        });
660    }
661
662    // We now assume that all x86-64 CPUs should have the FPU. Otherwise, we should check
663    // `has_extensions(IsaExtensions::FPU)` here.
664    {
665        let mut cr0 = Cr0::read();
666        cr0.remove(Cr0Flags::TASK_SWITCHED | Cr0Flags::EMULATE_COPROCESSOR);
667
668        unsafe {
669            Cr0::write(cr0);
670            // Flush out any pending x87 state.
671            core::arch::asm!("fninit");
672        }
673    }
674}