diff --git a/kernel/rustkernel/src/sync/spinlock.rs b/kernel/rustkernel/src/sync/spinlock.rs index 5e7835d..e7ef0c5 100644 --- a/kernel/rustkernel/src/sync/spinlock.rs +++ b/kernel/rustkernel/src/sync/spinlock.rs @@ -1,6 +1,6 @@ use crate::{ proc::{mycpu, Cpu}, - riscv, + trap::{push_intr_off, pop_intr_off}, }; use core::{ ffi::c_char, @@ -37,7 +37,7 @@ impl Spinlock { self.cpu == unsafe { mycpu() } && self.locked.load(Ordering::Relaxed) } pub unsafe fn lock_unguarded(&self) { - push_off(); + push_intr_off(); if self.held_by_current_cpu() { panic!("Attempt to acquire twice by the same CPU"); @@ -68,7 +68,7 @@ impl Spinlock { this.cpu = null_mut(); this.locked.store(false, Ordering::Release); - pop_off(); + pop_intr_off(); } } @@ -100,37 +100,3 @@ pub unsafe extern "C" fn acquire(lock: *mut Spinlock) { pub unsafe extern "C" fn release(lock: *mut Spinlock) { (*lock).unlock(); } - -// push_off/pop_off are like intr_off()/intr_on() except that they are matched: -// it takes two pop_off()s to undo two push_off()s. Also, if interrupts -// are initially off, then push_off, pop_off leaves them off. - -#[no_mangle] -pub unsafe extern "C" fn push_off() { - let old = riscv::intr_get(); - let cpu = mycpu(); - - riscv::intr_off(); - if (*cpu).interrupt_disable_layers == 0 { - (*cpu).previous_interrupts_enabled = old; - } - (*cpu).interrupt_disable_layers += 1; -} -#[no_mangle] -pub unsafe extern "C" fn pop_off() { - let cpu = mycpu(); - - if riscv::intr_get() == 1 { - // crate::panic_byte(b'0'); - panic!("pop_off - interruptible"); - } else if (*cpu).interrupt_disable_layers < 1 { - // crate::panic_byte(b'1'); - panic!("pop_off"); - } - - (*cpu).interrupt_disable_layers -= 1; - - if (*cpu).interrupt_disable_layers == 0 && (*cpu).previous_interrupts_enabled == 1 { - riscv::intr_on(); - } -} diff --git a/kernel/rustkernel/src/sync/spinmutex.rs b/kernel/rustkernel/src/sync/spinmutex.rs index 890546b..5a618e6 100644 --- a/kernel/rustkernel/src/sync/spinmutex.rs +++ b/kernel/rustkernel/src/sync/spinmutex.rs @@ -17,13 +17,22 @@ impl SpinMutex { } } pub fn lock(&self) -> SpinMutexGuard<'_, T> { + unsafe { + crate::trap::push_intr_off(); + } + while self.locked.swap(true, Ordering::Acquire) { core::hint::spin_loop(); } + SpinMutexGuard { mutex: self } } pub unsafe fn unlock(&self) { self.locked.store(false, Ordering::Release); + + unsafe { + crate::trap::pop_intr_off(); + } } } unsafe impl Sync for SpinMutex where T: Send {} diff --git a/kernel/rustkernel/src/trap.rs b/kernel/rustkernel/src/trap.rs index 4c0ed73..548028a 100644 --- a/kernel/rustkernel/src/trap.rs +++ b/kernel/rustkernel/src/trap.rs @@ -278,3 +278,37 @@ pub unsafe extern "C" fn usertrap() { usertrapret(); } + +// push_intr_off/pop_intr_off are like intr_off()/intr_on() except that they are matched: +// it takes two pop_intr_off()s to undo two push_intr_off()s. Also, if interrupts +// are initially off, then push_intr_off, pop_intr_off leaves them off. + +#[no_mangle] +pub unsafe extern "C" fn push_intr_off() { + let old = crate::riscv::intr_get(); + let cpu = mycpu(); + + crate::riscv::intr_off(); + if (*cpu).interrupt_disable_layers == 0 { + (*cpu).previous_interrupts_enabled = old; + } + (*cpu).interrupt_disable_layers += 1; +} +#[no_mangle] +pub unsafe extern "C" fn pop_intr_off() { + let cpu = mycpu(); + + if crate::riscv::intr_get() == 1 { + // crate::panic_byte(b'0'); + panic!("pop_intr_off - interruptible"); + } else if (*cpu).interrupt_disable_layers < 1 { + // crate::panic_byte(b'1'); + panic!("pop_intr_off"); + } + + (*cpu).interrupt_disable_layers -= 1; + + if (*cpu).interrupt_disable_layers == 0 && (*cpu).previous_interrupts_enabled == 1 { + crate::riscv::intr_on(); + } +}