arch mem
This commit is contained in:
parent
934dc13f96
commit
cbea3a19f4
@ -1,10 +1,13 @@
|
||||
//! Architecture-agnostic interrupt handling.
|
||||
|
||||
#[cfg(target_arch = "riscv64")]
|
||||
pub use super::riscv::plic::plic_claim as handle_interrupt;
|
||||
#[cfg(target_arch = "riscv64")]
|
||||
pub use super::riscv::plic::plic_complete as complete_interrupt;
|
||||
#[cfg(target_arch = "riscv64")]
|
||||
pub use super::riscv::plic::plicinit as init;
|
||||
#[cfg(target_arch = "riscv64")]
|
||||
pub use super::riscv::plic::plicinithart as inithart;
|
||||
pub use super::riscv::{
|
||||
asm::{
|
||||
intr_get as interrupts_enabled, intr_off as disable_interrupts,
|
||||
intr_on as enable_interrupts,
|
||||
},
|
||||
plic::{
|
||||
plic_claim as handle_interrupt, plic_complete as complete_interrupt, plicinit as init,
|
||||
plicinithart as inithart,
|
||||
},
|
||||
};
|
||||
|
19
kernel/rustkernel/src/arch/mem.rs
Normal file
19
kernel/rustkernel/src/arch/mem.rs
Normal file
@ -0,0 +1,19 @@
|
||||
//! Architecture-agnostic memory management.
|
||||
|
||||
#[cfg(target_arch = "riscv64")]
|
||||
pub use super::riscv::{
|
||||
asm::sfence_vma as flush_cached_pages,
|
||||
mem::{
|
||||
Pagetable, PagetableEntry, KERNEL_BASE, PAGE_SIZE, PHYSICAL_END, TRAMPOLINE, TRAPFRAME,
|
||||
PTE_V, PTE_R, PTE_W, PTE_X, PTE_U,
|
||||
VIRTUAL_MAX,
|
||||
},
|
||||
};
|
||||
|
||||
pub fn round_up_page(size: usize) -> usize {
|
||||
(size + PAGE_SIZE - 1) & !(PAGE_SIZE - 1)
|
||||
}
|
||||
|
||||
pub fn round_down_page(addr: usize) -> usize {
|
||||
addr & !(PAGE_SIZE - 1)
|
||||
}
|
@ -1,6 +1,9 @@
|
||||
#[cfg(target_arch = "riscv64")]
|
||||
pub mod riscv;
|
||||
#[cfg(target_arch = "riscv64")]
|
||||
pub use riscv::hardware;
|
||||
|
||||
pub mod cpu;
|
||||
pub mod interrupt;
|
||||
pub mod mem;
|
||||
pub mod power;
|
||||
|
@ -245,7 +245,7 @@ pub unsafe fn r_ra() -> u64 {
|
||||
x
|
||||
}
|
||||
|
||||
// Flush the TLB.
|
||||
// Flush the Translation Look-aside Buffer (TLB).
|
||||
#[inline(always)]
|
||||
pub unsafe fn sfence_vma() {
|
||||
// The "zero, zero" means flush all TLB entries.
|
||||
|
7
kernel/rustkernel/src/arch/riscv/hardware.rs
Normal file
7
kernel/rustkernel/src/arch/riscv/hardware.rs
Normal file
@ -0,0 +1,7 @@
|
||||
// QEMU puts UART registers here in physical memory.
|
||||
pub const UART0: usize = 0x10000000;
|
||||
pub const UART0_IRQ: usize = 10;
|
||||
|
||||
// Virtio MMIO interface
|
||||
pub const VIRTIO0: usize = 0x10001000;
|
||||
pub const VIRTIO0_IRQ: usize = 1;
|
91
kernel/rustkernel/src/arch/riscv/mem.rs
Normal file
91
kernel/rustkernel/src/arch/riscv/mem.rs
Normal file
@ -0,0 +1,91 @@
|
||||
// Physical memory layout
|
||||
|
||||
// QEMU -machine virt is setup like this,
|
||||
// based on QEMU's hw/riscv/virt.c
|
||||
//
|
||||
// 00001000 - boot ROM, provided by qemu
|
||||
// 02000000 - CLINT
|
||||
// 0C000000 - PLIC
|
||||
// 10000000 - uart0
|
||||
// 10001000 - virtio disk
|
||||
// 80000000 - boot ROM jumps here in machine mode (kernel loads the kernel here)
|
||||
// unused after 8000000
|
||||
|
||||
// The kernel uses physical memory as so:
|
||||
// 80000000 - entry.S, then kernel text and data
|
||||
// end - start of kernel page allocation data
|
||||
// PHYSTOP - end of RAM used by the kernel
|
||||
|
||||
pub type PagetableEntry = u64;
|
||||
pub type Pagetable = *mut [PagetableEntry; 512];
|
||||
|
||||
/// The PagetableEntry is valid.
|
||||
pub const PTE_V: i32 = 1 << 0;
|
||||
/// The PagetableEntry is readable.
|
||||
pub const PTE_R: i32 = 1 << 1;
|
||||
/// The PagetableEntry is writable.
|
||||
pub const PTE_W: i32 = 1 << 2;
|
||||
/// The PagetableEntry is executable.
|
||||
pub const PTE_X: i32 = 1 << 3;
|
||||
/// The PagetableEntry is user-accessible.
|
||||
pub const PTE_U: i32 = 1 << 4;
|
||||
|
||||
/// Page-based 39-bit virtual addressing.
|
||||
/// Details at section 5.4 of the RISC-V specification.
|
||||
pub const SATP_SV39: u64 = 8 << 60;
|
||||
|
||||
pub fn make_satp(pagetable: Pagetable) -> u64 {
|
||||
SATP_SV39 | (pagetable as usize as u64 >> 12)
|
||||
}
|
||||
|
||||
/// Bytes per page.
|
||||
pub const PAGE_SIZE: usize = 4096;
|
||||
/// Bits of offset within a page
|
||||
const PAGE_OFFSET: usize = 12;
|
||||
/// The kernel starts here.
|
||||
pub const KERNEL_BASE: usize = 0x8000_0000;
|
||||
/// The end of physical memory.
|
||||
pub const PHYSICAL_END: usize = KERNEL_BASE + (128 * 1024 * 1024);
|
||||
/// The maximum virtual address.
|
||||
///
|
||||
/// VIRTUAL_MAX is actually one bit less than the max allowed by
|
||||
/// Sv39 to avoid having to sign-extend virtual addresses
|
||||
/// that have the high bit set.
|
||||
pub const VIRTUAL_MAX: usize = 1 << (9 + 9 + 9 + 12 - 1);
|
||||
/// Map the trampoline page to the highest
|
||||
/// address in both user and kernel space.
|
||||
pub const TRAMPOLINE: usize = VIRTUAL_MAX - PAGE_SIZE;
|
||||
/// Map kernel stacks beneath the trampoline,
|
||||
/// each surrounded by invalid guard pages.
|
||||
pub fn kstack(page: usize) -> usize {
|
||||
TRAMPOLINE - (page + 1) * 2 * PAGE_SIZE
|
||||
}
|
||||
/// User memory layout.
|
||||
/// Address zero first:
|
||||
/// - text
|
||||
/// - original data and bss
|
||||
/// - fixed-size stack
|
||||
/// - expandable heap
|
||||
/// ...
|
||||
/// - TRAPFRAME (p->trapframe, used by the trampoline)
|
||||
/// - TRAMPOLINE (the same page as in the kernel)
|
||||
pub const TRAPFRAME: usize = TRAMPOLINE - PAGE_SIZE;
|
||||
|
||||
// Convert a physical address to a PagetableEntry.
|
||||
pub fn pa2pte(pa: usize) -> usize {
|
||||
(pa >> 12) << 10
|
||||
}
|
||||
// Convert a PagetableEntry to a physical address.
|
||||
pub fn pte2pa(pte: usize) -> usize {
|
||||
(pte >> 10) << 12
|
||||
}
|
||||
|
||||
// Extract the three 9-bit page table indices from a virtual address.
|
||||
const PXMASK: usize = 0x1ffusize; // 9 bits.
|
||||
|
||||
fn pxshift(level: usize) -> usize {
|
||||
PAGE_OFFSET + (level * 9)
|
||||
}
|
||||
pub fn px(level: usize, virtual_addr: usize) -> usize {
|
||||
(virtual_addr >> pxshift(level)) & PXMASK
|
||||
}
|
@ -1,54 +0,0 @@
|
||||
// Physical memory layout
|
||||
|
||||
// QEMU -machine virt is setup like this,
|
||||
// based on QEMU's hw/riscv/virt.c
|
||||
//
|
||||
// 00001000 - boot ROM, provided by qemu
|
||||
// 02000000 - CLINT
|
||||
// 0C000000 - PLIC
|
||||
// 10000000 - uart0
|
||||
// 10001000 - virtio disk
|
||||
// 80000000 - boot ROM jumps here in machine mode (kernel loads the kernel here)
|
||||
// unused after 8000000
|
||||
|
||||
// The kernel uses physical memory as so:
|
||||
// 80000000 - entry.S, then kernel text and data
|
||||
// end - start of kernel page allocation data
|
||||
// PHYSTOP - end of RAM used by the kernel
|
||||
|
||||
use super::{MAXVA, PGSIZE};
|
||||
|
||||
// QEMU puts UART registers here in physical memory.
|
||||
pub const UART0: usize = 0x10000000;
|
||||
pub const UART0_IRQ: usize = 10;
|
||||
|
||||
// Virtio MMIO interface
|
||||
pub const VIRTIO0: usize = 0x10001000;
|
||||
pub const VIRTIO0_IRQ: usize = 1;
|
||||
|
||||
// The kernel expects there to be RAM
|
||||
// for use by the kernel and user pages
|
||||
// from physical address 0x80000000 to PHYSTOP.
|
||||
pub const KERNBASE: u64 = 0x80000000;
|
||||
pub const PHYSTOP: u64 = KERNBASE + 128 * 1024 * 1024;
|
||||
|
||||
// Map the trampoline page to the highest address,
|
||||
// in both user and kernel space.
|
||||
pub const TRAMPOLINE: u64 = MAXVA - PGSIZE;
|
||||
|
||||
// Map kernel stacks beneath the trampoline,
|
||||
// each surrounded by invalid guard pages.
|
||||
pub fn kstack(p: u64) -> u64 {
|
||||
TRAMPOLINE - (p + 1) * 2 * PGSIZE
|
||||
}
|
||||
|
||||
// User memory layout.
|
||||
// Address zero first:
|
||||
// - text
|
||||
// - original data and bss
|
||||
// - fixed-size stack
|
||||
// - expandable heap
|
||||
// ...
|
||||
// - TRAPFRAME (p->trapframe, used by the trampoline)
|
||||
// - TRAMPOLINE (the same page as in the kernel)
|
||||
pub const TRAPFRAME: u64 = TRAMPOLINE - PGSIZE;
|
@ -1,17 +1,13 @@
|
||||
pub mod asm;
|
||||
pub mod clint;
|
||||
pub mod cpu;
|
||||
pub mod memlayout;
|
||||
pub mod mem;
|
||||
pub mod hardware;
|
||||
pub mod plic;
|
||||
pub mod power;
|
||||
pub mod start;
|
||||
|
||||
pub use asm::*;
|
||||
pub use memlayout::*;
|
||||
|
||||
pub type Pde = u64;
|
||||
pub type PagetableEntry = u64;
|
||||
pub type Pagetable = *mut [PagetableEntry; 512];
|
||||
pub use mem::make_satp;
|
||||
|
||||
/// Previous mode
|
||||
pub const MSTATUS_MPP_MASK: u64 = 3 << 11;
|
||||
@ -45,70 +41,3 @@ pub const MIE_MEIE: u64 = 1 << 11;
|
||||
pub const MIE_MTIE: u64 = 1 << 7;
|
||||
/// Machine-mode Software Interrupt Enable
|
||||
pub const MIE_MSIE: u64 = 1 << 3;
|
||||
|
||||
pub const SATP_SV39: u64 = 8 << 60;
|
||||
|
||||
pub fn make_satp(pagetable: Pagetable) -> u64 {
|
||||
SATP_SV39 | (pagetable as usize as u64 >> 12)
|
||||
}
|
||||
|
||||
/// Bytes per page
|
||||
pub const PGSIZE: u64 = 4096;
|
||||
/// Bits of offset within a page
|
||||
pub const PGSHIFT: usize = 12;
|
||||
|
||||
pub fn pg_round_up(sz: u64) -> u64 {
|
||||
(sz + PGSIZE - 1) & !(PGSIZE - 1)
|
||||
}
|
||||
pub fn pg_round_down(a: u64) -> u64 {
|
||||
a & !(PGSIZE - 1)
|
||||
}
|
||||
|
||||
// Valid.
|
||||
pub const PTE_V: i32 = 1 << 0;
|
||||
pub const PTE_R: i32 = 1 << 1;
|
||||
pub const PTE_W: i32 = 1 << 2;
|
||||
pub const PTE_X: i32 = 1 << 3;
|
||||
// User can access.
|
||||
pub const PTE_U: i32 = 1 << 4;
|
||||
|
||||
/*
|
||||
// shift a physical address to the right place for a PTE.
|
||||
#define PA2PTE(pa) ((((uint64)pa) >> 12) << 10)
|
||||
|
||||
#define PTE2PA(pte) (((pte) >> 10) << 12)
|
||||
|
||||
#define PTE_FLAGS(pte) ((pte) & 0x3FF)
|
||||
|
||||
// extract the three 9-bit page table indices from a virtual address.
|
||||
#define PXMASK 0x1FF // 9 bits
|
||||
#define PXSHIFT(level) (PGSHIFT+(9*(level)))
|
||||
#define PX(level, va) ((((uint64) (va)) >> PXSHIFT(level)) & PXMASK)
|
||||
*/
|
||||
|
||||
/// Shift a physical address to the right place for a PTE.
|
||||
pub fn pa2pte(pa: usize) -> usize {
|
||||
(pa >> 12) << 10
|
||||
}
|
||||
|
||||
pub fn pte2pa(pte: usize) -> usize {
|
||||
(pte >> 10) << 12
|
||||
}
|
||||
|
||||
// Extract the three 9-bit page table indices from a virtual address.
|
||||
pub const PXMASK: usize = 0x1ffusize; // 9 bits.
|
||||
|
||||
pub fn pxshift(level: usize) -> usize {
|
||||
PGSHIFT + (level * 9)
|
||||
}
|
||||
|
||||
pub fn px(level: usize, virtual_addr: usize) -> usize {
|
||||
(virtual_addr >> pxshift(level)) & PXMASK
|
||||
}
|
||||
|
||||
/// One beyond the highest possible virtual address.
|
||||
///
|
||||
/// MAXVA is actually one bit less than the max allowed by
|
||||
/// Sv39, to avoid having to sign-extend virtual addresses
|
||||
/// that have the high bit set.
|
||||
pub const MAXVA: u64 = 1u64 << (9 + 9 + 9 + 12 - 1);
|
||||
|
@ -1,9 +1,7 @@
|
||||
//! The RISC-V Platform Level Interrupt Controller (PLIC)
|
||||
|
||||
use crate::{
|
||||
arch::riscv::{UART0_IRQ, VIRTIO0_IRQ},
|
||||
proc::cpu::Cpu,
|
||||
};
|
||||
use super::hardware::{UART0_IRQ, VIRTIO0_IRQ};
|
||||
use crate::proc::cpu::Cpu;
|
||||
|
||||
// QEMU puts platform-level interrupt controller (PLIC) here.
|
||||
pub const PLIC: usize = 0x0c000000;
|
||||
|
@ -25,7 +25,7 @@ const LSR_RX_READY: u8 = 1 << 0;
|
||||
/// THR can accept another character to send
|
||||
const LSR_TX_IDLE: u8 = 1 << 5;
|
||||
|
||||
pub static UART0: Uart = Uart::new(crate::arch::riscv::memlayout::UART0);
|
||||
pub static UART0: Uart = Uart::new(crate::arch::hardware::UART0);
|
||||
|
||||
enum Register {
|
||||
ReceiveHolding,
|
||||
|
@ -3,7 +3,7 @@
|
||||
//! and pipe buffers. Allocates whole 4096-byte pages.
|
||||
|
||||
use crate::{
|
||||
arch::riscv::{memlayout::PHYSTOP, pg_round_up, PGSIZE},
|
||||
arch::mem::{round_up_page, PAGE_SIZE, PHYSICAL_END},
|
||||
mem::memset,
|
||||
sync::spinlock::Spinlock,
|
||||
};
|
||||
@ -34,15 +34,15 @@ pub struct KernelMemory {
|
||||
|
||||
pub unsafe fn kinit() {
|
||||
kmem.lock = Spinlock::new();
|
||||
freerange(addr_of_mut!(end).cast(), PHYSTOP as *mut u8)
|
||||
freerange(addr_of_mut!(end).cast(), PHYSICAL_END as *mut u8)
|
||||
}
|
||||
|
||||
unsafe fn freerange(pa_start: *mut u8, pa_end: *mut u8) {
|
||||
let mut p = pg_round_up(pa_start as usize as u64) as *mut u8;
|
||||
let mut p = round_up_page(pa_start as usize) as *mut u8;
|
||||
|
||||
while p.add(PGSIZE as usize) <= pa_end {
|
||||
while p.add(PAGE_SIZE) <= pa_end {
|
||||
kfree(p.cast());
|
||||
p = p.add(PGSIZE as usize);
|
||||
p = p.add(PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
@ -52,14 +52,14 @@ unsafe fn freerange(pa_start: *mut u8, pa_end: *mut u8) {
|
||||
/// allocator - see kinit above.
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn kfree(pa: *mut u8) {
|
||||
if (pa as usize as u64 % PGSIZE) != 0
|
||||
if (pa as usize % PAGE_SIZE) != 0
|
||||
|| pa <= addr_of_mut!(end) as *mut u8
|
||||
|| pa >= PHYSTOP as *mut u8
|
||||
|| pa >= PHYSICAL_END as *mut u8
|
||||
{
|
||||
panic!("kfree");
|
||||
}
|
||||
|
||||
memset(pa, 0, PGSIZE as u32);
|
||||
memset(pa, 0, PAGE_SIZE as u32);
|
||||
|
||||
let run: *mut Run = pa.cast();
|
||||
|
||||
@ -82,7 +82,7 @@ pub unsafe extern "C" fn kalloc() -> *mut u8 {
|
||||
}
|
||||
|
||||
if !run.is_null() {
|
||||
memset(run.cast(), 0, PGSIZE as u32);
|
||||
memset(run.cast(), 0, PAGE_SIZE as u32);
|
||||
}
|
||||
|
||||
run as *mut u8
|
||||
|
@ -1,11 +1,17 @@
|
||||
use crate::{
|
||||
arch::riscv::{
|
||||
asm, make_satp,
|
||||
memlayout::{KERNBASE, PHYSTOP, TRAMPOLINE, UART0, VIRTIO0},
|
||||
pg_round_down, pg_round_up,
|
||||
plic::PLIC,
|
||||
power::QEMU_POWER,
|
||||
pte2pa, Pagetable, PagetableEntry, MAXVA, PGSIZE, PTE_R, PTE_U, PTE_V, PTE_W, PTE_X,
|
||||
arch::{
|
||||
self,
|
||||
hardware::{UART0, VIRTIO0},
|
||||
mem::{
|
||||
round_down_page, round_up_page, Pagetable, PagetableEntry, KERNEL_BASE, PAGE_SIZE,
|
||||
PHYSICAL_END, TRAMPOLINE, VIRTUAL_MAX, PTE_R, PTE_U, PTE_V, PTE_W, PTE_X,
|
||||
},
|
||||
riscv::{
|
||||
asm, make_satp,
|
||||
plic::PLIC,
|
||||
power::QEMU_POWER,
|
||||
mem::pte2pa,
|
||||
},
|
||||
},
|
||||
mem::{
|
||||
kalloc::{kalloc, kfree},
|
||||
@ -28,26 +34,32 @@ pub static mut KERNEL_PAGETABLE: Pagetable = null_mut();
|
||||
/// Make a direct-map page table for the kernel.
|
||||
pub unsafe fn kvmmake() -> Pagetable {
|
||||
let pagetable = kalloc() as Pagetable;
|
||||
memset(pagetable.cast(), 0, PGSIZE as u32);
|
||||
memset(pagetable.cast(), 0, PAGE_SIZE as u32);
|
||||
|
||||
// QEMU test interface used for power management.
|
||||
kvmmap(
|
||||
pagetable,
|
||||
QEMU_POWER as u64,
|
||||
QEMU_POWER as u64,
|
||||
PGSIZE,
|
||||
PAGE_SIZE as u64,
|
||||
PTE_R | PTE_W,
|
||||
);
|
||||
|
||||
// UART registers
|
||||
kvmmap(pagetable, UART0 as u64, UART0 as u64, PGSIZE, PTE_R | PTE_W);
|
||||
kvmmap(
|
||||
pagetable,
|
||||
UART0 as u64,
|
||||
UART0 as u64,
|
||||
PAGE_SIZE as u64,
|
||||
PTE_R | PTE_W,
|
||||
);
|
||||
|
||||
// VirtIO MMIO disk interface
|
||||
kvmmap(
|
||||
pagetable,
|
||||
VIRTIO0 as u64,
|
||||
VIRTIO0 as u64,
|
||||
PGSIZE,
|
||||
PAGE_SIZE as u64,
|
||||
PTE_R | PTE_W,
|
||||
);
|
||||
|
||||
@ -65,9 +77,9 @@ pub unsafe fn kvmmake() -> Pagetable {
|
||||
// Map kernel text executable and read-only.
|
||||
kvmmap(
|
||||
pagetable,
|
||||
KERNBASE,
|
||||
KERNBASE,
|
||||
etext_addr - KERNBASE,
|
||||
KERNEL_BASE as u64,
|
||||
KERNEL_BASE as u64,
|
||||
etext_addr - KERNEL_BASE as u64,
|
||||
PTE_R | PTE_X,
|
||||
);
|
||||
|
||||
@ -76,7 +88,7 @@ pub unsafe fn kvmmake() -> Pagetable {
|
||||
pagetable,
|
||||
etext_addr,
|
||||
etext_addr,
|
||||
PHYSTOP - etext_addr,
|
||||
PHYSICAL_END as u64 - etext_addr,
|
||||
PTE_R | PTE_W,
|
||||
);
|
||||
|
||||
@ -84,9 +96,9 @@ pub unsafe fn kvmmake() -> Pagetable {
|
||||
// the highest virtual address in the kernel.
|
||||
kvmmap(
|
||||
pagetable,
|
||||
TRAMPOLINE,
|
||||
TRAMPOLINE as u64,
|
||||
addr_of!(trampoline) as usize as u64,
|
||||
PGSIZE,
|
||||
PAGE_SIZE as u64,
|
||||
PTE_R | PTE_X,
|
||||
);
|
||||
|
||||
@ -104,12 +116,12 @@ pub unsafe fn kvminit() {
|
||||
/// Switch hardware pagetable register to the kernel's pagetable and enable paging.
|
||||
pub unsafe fn kvminithart() {
|
||||
// Wait for any previous writes to the pagetable memory to finish.
|
||||
asm::sfence_vma();
|
||||
arch::mem::flush_cached_pages();
|
||||
|
||||
asm::w_satp(make_satp(KERNEL_PAGETABLE));
|
||||
|
||||
// Flush stale entries from the TLB.
|
||||
asm::sfence_vma();
|
||||
arch::mem::flush_cached_pages();
|
||||
}
|
||||
|
||||
/// Return the address of the PTE in pagetable
|
||||
@ -127,7 +139,7 @@ pub unsafe fn kvminithart() {
|
||||
/// - 30..39: 9 bits of level 0 index.
|
||||
/// - 39..64: Must be zero.
|
||||
pub unsafe fn walk(mut pagetable: Pagetable, virtual_addr: u64, alloc: i32) -> *mut PagetableEntry {
|
||||
if virtual_addr > MAXVA {
|
||||
if virtual_addr > VIRTUAL_MAX as u64 {
|
||||
panic!("walk");
|
||||
}
|
||||
|
||||
@ -150,7 +162,7 @@ pub unsafe fn walk(mut pagetable: Pagetable, virtual_addr: u64, alloc: i32) -> *
|
||||
return null_mut();
|
||||
}
|
||||
|
||||
memset(pagetable.cast(), 0, PGSIZE as u32);
|
||||
memset(pagetable.cast(), 0, PAGE_SIZE as u32);
|
||||
*pte = (((pagetable as usize) >> 12) << 10) as PagetableEntry | PTE_V as u64;
|
||||
}
|
||||
|
||||
@ -165,7 +177,7 @@ pub unsafe fn walk(mut pagetable: Pagetable, virtual_addr: u64, alloc: i32) -> *
|
||||
/// Can only be used to look up user pages.
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn walkaddr(pagetable: Pagetable, virtual_addr: u64) -> u64 {
|
||||
if virtual_addr > MAXVA {
|
||||
if virtual_addr > VIRTUAL_MAX as u64 {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -211,8 +223,8 @@ pub unsafe extern "C" fn mappages(
|
||||
panic!("mappages: size = 0");
|
||||
}
|
||||
|
||||
let mut a = pg_round_down(virtual_addr);
|
||||
let last = pg_round_down(virtual_addr + size - 1);
|
||||
let mut a = round_down_page(virtual_addr as usize) as u64;
|
||||
let last = round_down_page((virtual_addr + size - 1) as usize) as u64;
|
||||
|
||||
loop {
|
||||
let pte = walk(pagetable, a, 1);
|
||||
@ -229,8 +241,8 @@ pub unsafe extern "C" fn mappages(
|
||||
if a == last {
|
||||
break;
|
||||
} else {
|
||||
a += PGSIZE;
|
||||
physical_addr += PGSIZE;
|
||||
a += PAGE_SIZE as u64;
|
||||
physical_addr += PAGE_SIZE as u64;
|
||||
}
|
||||
}
|
||||
|
||||
@ -248,11 +260,11 @@ pub unsafe extern "C" fn uvmunmap(
|
||||
num_pages: u64,
|
||||
do_free: i32,
|
||||
) {
|
||||
if virtual_addr % PGSIZE != 0 {
|
||||
if virtual_addr % PAGE_SIZE as u64 != 0 {
|
||||
panic!("uvmunmap: not aligned");
|
||||
}
|
||||
let mut a = virtual_addr;
|
||||
while a < virtual_addr + num_pages * PGSIZE {
|
||||
while a < virtual_addr + num_pages * PAGE_SIZE as u64 {
|
||||
let pte = walk(pagetable, a, 0);
|
||||
if pte.is_null() {
|
||||
panic!("uvmunmap: walk");
|
||||
@ -266,7 +278,7 @@ pub unsafe extern "C" fn uvmunmap(
|
||||
}
|
||||
|
||||
*pte = 0;
|
||||
a += PGSIZE;
|
||||
a += PAGE_SIZE as u64;
|
||||
}
|
||||
}
|
||||
|
||||
@ -279,25 +291,25 @@ pub unsafe extern "C" fn uvmcreate() -> Pagetable {
|
||||
if pagetable.is_null() {
|
||||
return null_mut();
|
||||
}
|
||||
memset(pagetable.cast(), 0, PGSIZE as u32);
|
||||
memset(pagetable.cast(), 0, PAGE_SIZE as u32);
|
||||
pagetable
|
||||
}
|
||||
|
||||
/// Load the user initcode into address 0 of pagetable for the very first process.
|
||||
///
|
||||
/// `size` must be less than `PGSIZE`.
|
||||
/// `size` must be less than `PAGE_SIZE`.
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn uvmfirst(pagetable: Pagetable, src: *mut u8, size: u32) {
|
||||
if size >= PGSIZE as u32 {
|
||||
if size >= PAGE_SIZE as u32 {
|
||||
panic!("uvmfirst: more than a page");
|
||||
}
|
||||
|
||||
let mem = kalloc();
|
||||
memset(mem, 0, PGSIZE as u32);
|
||||
memset(mem, 0, PAGE_SIZE as u32);
|
||||
mappages(
|
||||
pagetable,
|
||||
0,
|
||||
PGSIZE,
|
||||
PAGE_SIZE as u64,
|
||||
mem as usize as u64,
|
||||
PTE_W | PTE_R | PTE_X | PTE_U,
|
||||
);
|
||||
@ -319,7 +331,7 @@ pub unsafe extern "C" fn uvmalloc(
|
||||
return old_size;
|
||||
}
|
||||
|
||||
old_size = pg_round_up(old_size);
|
||||
old_size = round_up_page(old_size as usize) as u64;
|
||||
let mut a = old_size;
|
||||
|
||||
while a < new_size {
|
||||
@ -329,12 +341,12 @@ pub unsafe extern "C" fn uvmalloc(
|
||||
return 0;
|
||||
}
|
||||
|
||||
memset(mem.cast(), 0, PGSIZE as u32);
|
||||
memset(mem.cast(), 0, PAGE_SIZE as u64 as u32);
|
||||
|
||||
if mappages(
|
||||
pagetable,
|
||||
a,
|
||||
PGSIZE,
|
||||
PAGE_SIZE as u64,
|
||||
mem as usize as u64,
|
||||
PTE_R | PTE_U | xperm,
|
||||
) != 0
|
||||
@ -344,7 +356,7 @@ pub unsafe extern "C" fn uvmalloc(
|
||||
return 0;
|
||||
}
|
||||
|
||||
a += PGSIZE;
|
||||
a += PAGE_SIZE as u64;
|
||||
}
|
||||
|
||||
new_size
|
||||
@ -361,9 +373,15 @@ pub unsafe extern "C" fn uvmdealloc(pagetable: Pagetable, old_size: u64, new_siz
|
||||
return old_size;
|
||||
}
|
||||
|
||||
if pg_round_up(new_size) < pg_round_up(old_size) {
|
||||
let num_pages = (pg_round_up(old_size) - pg_round_up(new_size)) / PGSIZE;
|
||||
uvmunmap(pagetable, pg_round_up(new_size), num_pages, 1);
|
||||
if round_up_page(new_size as usize) < round_up_page(old_size as usize) {
|
||||
let num_pages =
|
||||
(round_up_page(old_size as usize) - round_up_page(new_size as usize)) / PAGE_SIZE;
|
||||
uvmunmap(
|
||||
pagetable,
|
||||
round_up_page(new_size as usize) as u64,
|
||||
num_pages as u64,
|
||||
1,
|
||||
);
|
||||
}
|
||||
|
||||
new_size
|
||||
@ -393,7 +411,12 @@ pub unsafe extern "C" fn freewalk(pagetable: Pagetable) {
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn uvmfree(pagetable: Pagetable, size: u64) {
|
||||
if size > 0 {
|
||||
uvmunmap(pagetable, 0, pg_round_up(size) / PGSIZE, 1);
|
||||
uvmunmap(
|
||||
pagetable,
|
||||
0,
|
||||
(round_up_page(size as usize) / PAGE_SIZE) as u64,
|
||||
1,
|
||||
);
|
||||
}
|
||||
freewalk(pagetable);
|
||||
}
|
||||
@ -421,19 +444,23 @@ pub unsafe extern "C" fn uvmcopy(old: Pagetable, new: Pagetable, size: u64) -> i
|
||||
|
||||
let mem = kalloc();
|
||||
if mem.is_null() {
|
||||
uvmunmap(new, 0, i / PGSIZE, 1);
|
||||
uvmunmap(new, 0, i / PAGE_SIZE as u64, 1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
memmove(mem.cast(), (pa as usize as *mut u8).cast(), PGSIZE as u32);
|
||||
memmove(
|
||||
mem.cast(),
|
||||
(pa as usize as *mut u8).cast(),
|
||||
PAGE_SIZE as u64 as u32,
|
||||
);
|
||||
|
||||
if mappages(new, i, PGSIZE, mem as usize as u64, flags as i32) != 0 {
|
||||
if mappages(new, i, PAGE_SIZE as u64, mem as usize as u64, flags as i32) != 0 {
|
||||
kfree(mem.cast());
|
||||
uvmunmap(new, 0, i / PGSIZE, 1);
|
||||
uvmunmap(new, 0, i / PAGE_SIZE as u64, 1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
i += PGSIZE;
|
||||
i += PAGE_SIZE as u64;
|
||||
}
|
||||
|
||||
0
|
||||
@ -463,13 +490,13 @@ pub unsafe extern "C" fn copyout(
|
||||
mut len: u64,
|
||||
) -> i32 {
|
||||
while len > 0 {
|
||||
let va0 = pg_round_down(dst_virtual_addr);
|
||||
let va0 = round_down_page(dst_virtual_addr as usize) as u64;
|
||||
let pa0 = walkaddr(pagetable, va0);
|
||||
if pa0 == 0 {
|
||||
return -1;
|
||||
}
|
||||
|
||||
let mut n = PGSIZE - (dst_virtual_addr - va0);
|
||||
let mut n = PAGE_SIZE as u64 - (dst_virtual_addr - va0);
|
||||
if n > len {
|
||||
n = len;
|
||||
}
|
||||
@ -481,7 +508,7 @@ pub unsafe extern "C" fn copyout(
|
||||
|
||||
len -= n;
|
||||
src = src.add(n as usize);
|
||||
dst_virtual_addr = va0 + PGSIZE;
|
||||
dst_virtual_addr = va0 + PAGE_SIZE as u64;
|
||||
}
|
||||
0
|
||||
}
|
||||
@ -498,13 +525,13 @@ pub unsafe extern "C" fn copyin(
|
||||
mut len: u64,
|
||||
) -> i32 {
|
||||
while len > 0 {
|
||||
let va0 = pg_round_down(src_virtual_addr);
|
||||
let va0 = round_down_page(src_virtual_addr as usize) as u64;
|
||||
let pa0 = walkaddr(pagetable, va0);
|
||||
if pa0 == 0 {
|
||||
return -1;
|
||||
}
|
||||
|
||||
let mut n = PGSIZE - (src_virtual_addr - va0);
|
||||
let mut n = PAGE_SIZE as u64 - (src_virtual_addr - va0);
|
||||
if n > len {
|
||||
n = len;
|
||||
}
|
||||
@ -516,7 +543,7 @@ pub unsafe extern "C" fn copyin(
|
||||
|
||||
len -= n;
|
||||
dst = dst.add(n as usize);
|
||||
src_virtual_addr = va0 + PGSIZE;
|
||||
src_virtual_addr = va0 + PAGE_SIZE as u64;
|
||||
}
|
||||
0
|
||||
}
|
||||
@ -535,13 +562,13 @@ pub unsafe fn copyinstr(
|
||||
let mut got_null = false;
|
||||
|
||||
while !got_null && max > 0 {
|
||||
let va0 = pg_round_down(src_virtual_addr);
|
||||
let va0 = round_down_page(src_virtual_addr as usize) as u64;
|
||||
let pa0 = walkaddr(pagetable, va0);
|
||||
if pa0 == 0 {
|
||||
return -1;
|
||||
}
|
||||
|
||||
let mut n = PGSIZE - (src_virtual_addr - va0);
|
||||
let mut n = PAGE_SIZE as u64 - (src_virtual_addr - va0);
|
||||
if n > max {
|
||||
n = max;
|
||||
}
|
||||
@ -562,7 +589,7 @@ pub unsafe fn copyinstr(
|
||||
dst = dst.add(1);
|
||||
}
|
||||
|
||||
src_virtual_addr = va0 + PGSIZE;
|
||||
src_virtual_addr = va0 + PAGE_SIZE as u64;
|
||||
}
|
||||
|
||||
if got_null {
|
||||
|
@ -7,9 +7,9 @@ use super::{
|
||||
trapframe::Trapframe,
|
||||
};
|
||||
use crate::{
|
||||
arch::riscv::{
|
||||
memlayout::{TRAMPOLINE, TRAPFRAME},
|
||||
Pagetable, PGSIZE, PTE_R, PTE_W, PTE_X,
|
||||
arch::{
|
||||
mem::{Pagetable, PAGE_SIZE, TRAMPOLINE, TRAPFRAME,
|
||||
PTE_R, PTE_W, PTE_X},
|
||||
},
|
||||
fs::{
|
||||
file::{fileclose, filedup, File, Inode},
|
||||
@ -197,7 +197,7 @@ impl Process {
|
||||
core::mem::size_of::<Context>() as u32,
|
||||
);
|
||||
p.context.ra = forkret as usize as u64;
|
||||
p.context.sp = p.kernel_stack + PGSIZE;
|
||||
p.context.sp = p.kernel_stack + PAGE_SIZE as u64;
|
||||
|
||||
Ok(p)
|
||||
}
|
||||
@ -260,8 +260,8 @@ impl Process {
|
||||
// to and from user space, so not PTE_U.
|
||||
if mappages(
|
||||
pagetable,
|
||||
TRAMPOLINE,
|
||||
PGSIZE,
|
||||
TRAMPOLINE as u64,
|
||||
PAGE_SIZE as u64,
|
||||
addr_of!(trampoline) as usize as u64,
|
||||
PTE_R | PTE_X,
|
||||
) < 0
|
||||
@ -273,13 +273,13 @@ impl Process {
|
||||
// Map the trapframe page just below the trampoline page for trampoline.S.
|
||||
if mappages(
|
||||
pagetable,
|
||||
TRAPFRAME,
|
||||
PGSIZE,
|
||||
TRAPFRAME as u64,
|
||||
PAGE_SIZE as u64,
|
||||
self.trapframe as usize as u64,
|
||||
PTE_R | PTE_W,
|
||||
) < 0
|
||||
{
|
||||
uvmunmap(pagetable, TRAMPOLINE, 1, 0);
|
||||
uvmunmap(pagetable, TRAMPOLINE as u64, 1, 0);
|
||||
uvmfree(pagetable, 0);
|
||||
return Err(ProcessError::Allocation);
|
||||
}
|
||||
@ -288,8 +288,8 @@ impl Process {
|
||||
}
|
||||
/// Free a process's pagetable and free the physical memory it refers to.
|
||||
pub unsafe fn free_pagetable(pagetable: Pagetable, size: usize) {
|
||||
uvmunmap(pagetable, TRAMPOLINE, 1, 0);
|
||||
uvmunmap(pagetable, TRAPFRAME, 1, 0);
|
||||
uvmunmap(pagetable, TRAMPOLINE as u64, 1, 0);
|
||||
uvmunmap(pagetable, TRAPFRAME as u64, 1, 0);
|
||||
uvmfree(pagetable, size as u64)
|
||||
}
|
||||
|
||||
|
@ -4,7 +4,7 @@ use super::{
|
||||
process::{proc, Process, ProcessState},
|
||||
};
|
||||
use crate::{
|
||||
arch::riscv::{intr_get, intr_on},
|
||||
arch,
|
||||
sync::spinlock::{Spinlock, SpinlockGuard},
|
||||
};
|
||||
use core::{
|
||||
@ -39,7 +39,7 @@ pub unsafe fn scheduler() -> ! {
|
||||
|
||||
loop {
|
||||
// Avoid deadlock by ensuring that devices can interrupt.
|
||||
intr_on();
|
||||
arch::interrupt::enable_interrupts();
|
||||
|
||||
for p in &mut proc {
|
||||
let _guard = p.lock.lock();
|
||||
@ -76,7 +76,7 @@ pub unsafe fn sched() {
|
||||
panic!("sched locks");
|
||||
} else if p.state == ProcessState::Running {
|
||||
panic!("sched running");
|
||||
} else if intr_get() > 0 {
|
||||
} else if arch::interrupt::interrupts_enabled() > 0 {
|
||||
panic!("sched interruptible");
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,10 @@
|
||||
use crate::{
|
||||
arch::{self, riscv::*},
|
||||
arch::{
|
||||
self,
|
||||
mem::{PAGE_SIZE, TRAMPOLINE},
|
||||
hardware::{UART0_IRQ, VIRTIO0_IRQ},
|
||||
riscv::{asm, SSTATUS_SPP, SSTATUS_SPIE, mem::make_satp},
|
||||
},
|
||||
println,
|
||||
proc::{
|
||||
cpu::Cpu,
|
||||
@ -27,7 +32,7 @@ pub static CLOCK_TICKS: Mutex<usize> = Mutex::new(0);
|
||||
|
||||
/// Set up to take exceptions and traps while in the kernel.
|
||||
pub unsafe fn trapinithart() {
|
||||
w_stvec(kernelvec as usize as u64);
|
||||
asm::w_stvec(kernelvec as usize as u64);
|
||||
}
|
||||
|
||||
pub fn clockintr() {
|
||||
@ -43,7 +48,7 @@ pub fn clockintr() {
|
||||
///
|
||||
/// Returns 2 if timer interrupt, 1 if other device, 0 if not recognized.
|
||||
pub unsafe fn devintr() -> i32 {
|
||||
let scause = r_scause();
|
||||
let scause = asm::r_scause();
|
||||
|
||||
if (scause & 0x8000000000000000 > 0) && (scause & 0xff) == 9 {
|
||||
// This is a supervisor external interrupt, via PLIC.
|
||||
@ -77,7 +82,7 @@ pub unsafe fn devintr() -> i32 {
|
||||
|
||||
// Acknowledge the software interrupt by
|
||||
// clearing the SSIP bit in sip.
|
||||
w_sip(r_sip() & !2);
|
||||
asm::w_sip(asm::r_sip() & !2);
|
||||
|
||||
2
|
||||
} else {
|
||||
@ -90,10 +95,10 @@ pub struct InterruptBlocker;
|
||||
impl InterruptBlocker {
|
||||
pub fn new() -> InterruptBlocker {
|
||||
unsafe {
|
||||
let interrupts_before = intr_get();
|
||||
let interrupts_before = arch::interrupt::interrupts_enabled();
|
||||
let cpu = Cpu::current();
|
||||
|
||||
intr_off();
|
||||
arch::interrupt::disable_interrupts();
|
||||
|
||||
if cpu.interrupt_disable_layers == 0 {
|
||||
cpu.previous_interrupts_enabled = interrupts_before;
|
||||
@ -109,7 +114,7 @@ impl core::ops::Drop for InterruptBlocker {
|
||||
unsafe {
|
||||
let cpu = Cpu::current();
|
||||
|
||||
if intr_get() == 1 || cpu.interrupt_disable_layers < 1 {
|
||||
if arch::interrupt::interrupts_enabled() == 1 || cpu.interrupt_disable_layers < 1 {
|
||||
// panic!("pop_off mismatched");
|
||||
return;
|
||||
}
|
||||
@ -117,7 +122,7 @@ impl core::ops::Drop for InterruptBlocker {
|
||||
cpu.interrupt_disable_layers -= 1;
|
||||
|
||||
if cpu.interrupt_disable_layers == 0 && cpu.previous_interrupts_enabled == 1 {
|
||||
intr_on();
|
||||
arch::interrupt::enable_interrupts();
|
||||
}
|
||||
// crate::sync::spinlock::pop_off();
|
||||
}
|
||||
@ -133,36 +138,36 @@ pub unsafe extern "C" fn usertrapret() {
|
||||
// We're about to switch the destination of traps from
|
||||
// kerneltrap() to usertrap(), so turn off interrupts until
|
||||
// we're back in user space, where usertrap() is correct.
|
||||
intr_off();
|
||||
arch::interrupt::disable_interrupts();
|
||||
|
||||
// Send syscalls, interrupts, and exceptions to uservec in trampoline.S
|
||||
let trampoline_uservec =
|
||||
TRAMPOLINE + (addr_of!(uservec) as usize as u64) - (addr_of!(trampoline) as usize as u64);
|
||||
w_stvec(trampoline_uservec);
|
||||
TRAMPOLINE + (addr_of!(uservec) as usize) - (addr_of!(trampoline) as usize);
|
||||
asm::w_stvec(trampoline_uservec as u64);
|
||||
|
||||
// Set up trapframe values that uservec will need when
|
||||
// the process next traps into the kernel.
|
||||
// kernel page table
|
||||
(*proc.trapframe).kernel_satp = r_satp();
|
||||
(*proc.trapframe).kernel_satp = asm::r_satp();
|
||||
// process's kernel stack
|
||||
(*proc.trapframe).kernel_sp = proc.kernel_stack + PGSIZE;
|
||||
(*proc.trapframe).kernel_sp = proc.kernel_stack + PAGE_SIZE as u64;
|
||||
(*proc.trapframe).kernel_trap = usertrap as usize as u64;
|
||||
// hartid for Cpu::current_id()
|
||||
(*proc.trapframe).kernel_hartid = r_tp();
|
||||
(*proc.trapframe).kernel_hartid = asm::r_tp();
|
||||
|
||||
// Set up the registers that trampoline.S's
|
||||
// sret will use to get to user space.
|
||||
|
||||
// Set S Previous Privelege mode to User.
|
||||
let mut x = r_sstatus();
|
||||
let mut x = asm::r_sstatus();
|
||||
// Clear SPP to 0 for user mode.
|
||||
x &= !SSTATUS_SPP;
|
||||
// Enable interrupts in user mode.
|
||||
x |= SSTATUS_SPIE;
|
||||
w_sstatus(x);
|
||||
asm::w_sstatus(x);
|
||||
|
||||
// Set S Exception Program Counter to the saved user pc.
|
||||
w_sepc((*proc.trapframe).epc);
|
||||
asm::w_sepc((*proc.trapframe).epc);
|
||||
|
||||
// Tell trampoline.S the user page table to switch to.
|
||||
let satp = make_satp(proc.pagetable);
|
||||
@ -170,8 +175,8 @@ pub unsafe extern "C" fn usertrapret() {
|
||||
// Jump to userret in trampoline.S at the top of memory, which
|
||||
// switches to the user page table, restores user registers,
|
||||
// and switches to user mode with sret.
|
||||
let trampoline_userret = (TRAMPOLINE + (addr_of!(userret) as usize as u64)
|
||||
- (addr_of!(trampoline) as usize as u64)) as usize;
|
||||
let trampoline_userret =
|
||||
TRAMPOLINE + (addr_of!(userret) as usize) - (addr_of!(trampoline) as usize);
|
||||
let trampoline_userret = trampoline_userret as *const ();
|
||||
// Rust's most dangerous function: core::mem::transmute
|
||||
let trampoline_userret = core::mem::transmute::<*const (), fn(u64)>(trampoline_userret);
|
||||
@ -182,19 +187,19 @@ pub unsafe extern "C" fn usertrapret() {
|
||||
/// on whatever the current kernel stack is.
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn kerneltrap() {
|
||||
let sepc = r_sepc();
|
||||
let sstatus = r_sstatus();
|
||||
let scause = r_scause();
|
||||
let sepc = asm::r_sepc();
|
||||
let sstatus = asm::r_sstatus();
|
||||
let scause = asm::r_scause();
|
||||
|
||||
if sstatus & SSTATUS_SPP == 0 {
|
||||
panic!("kerneltrap: not from supervisor mode");
|
||||
} else if intr_get() != 0 {
|
||||
} else if arch::interrupt::interrupts_enabled() != 0 {
|
||||
panic!("kerneltrap: interrupts enabled");
|
||||
}
|
||||
|
||||
let which_dev = devintr();
|
||||
if which_dev == 0 {
|
||||
println!("scause {}\nsepc={} stval={}", scause, r_sepc(), r_stval());
|
||||
println!("scause {}\nsepc={} stval={}", scause, asm::r_sepc(), asm::r_stval());
|
||||
panic!("kerneltrap");
|
||||
} else if which_dev == 2
|
||||
&& Process::current().is_some()
|
||||
@ -206,8 +211,8 @@ pub unsafe extern "C" fn kerneltrap() {
|
||||
|
||||
// The yield() may have caused some traps to occur,
|
||||
// so restore trap registers for use by kernelvec.S's sepc instruction.
|
||||
w_sepc(sepc);
|
||||
w_sstatus(sstatus);
|
||||
asm::w_sepc(sepc);
|
||||
asm::w_sstatus(sstatus);
|
||||
}
|
||||
|
||||
/// Handle an interrupt, exception, or system call from userspace.
|
||||
@ -215,20 +220,20 @@ pub unsafe extern "C" fn kerneltrap() {
|
||||
/// Called from trampoline.S
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn usertrap() {
|
||||
if r_sstatus() & SSTATUS_SPP != 0 {
|
||||
if asm::r_sstatus() & SSTATUS_SPP != 0 {
|
||||
panic!("usertrap: not from user mode");
|
||||
}
|
||||
|
||||
// Send interrupts and exceptions to kerneltrap(),
|
||||
// since we're now in the kernel.
|
||||
w_stvec(kernelvec as usize as u64);
|
||||
asm::w_stvec(kernelvec as usize as u64);
|
||||
|
||||
let proc = Process::current().unwrap();
|
||||
|
||||
// Save user program counter.
|
||||
(*proc.trapframe).epc = r_sepc();
|
||||
(*proc.trapframe).epc = asm::r_sepc();
|
||||
|
||||
if r_scause() == 8 {
|
||||
if asm::r_scause() == 8 {
|
||||
// System call
|
||||
|
||||
if proc.is_killed() {
|
||||
@ -241,19 +246,19 @@ pub unsafe extern "C" fn usertrap() {
|
||||
|
||||
// An interrupt will change sepc, scause, and sstatus,
|
||||
// so enable only now that we're done with those registers.
|
||||
intr_on();
|
||||
arch::interrupt::enable_interrupts();
|
||||
|
||||
syscall();
|
||||
}
|
||||
|
||||
let which_dev = devintr();
|
||||
if r_scause() != 8 && which_dev == 0 {
|
||||
if asm::r_scause() != 8 && which_dev == 0 {
|
||||
println!(
|
||||
"usertrap(): unexpected scause {} {}\n\tsepc={} stval={}",
|
||||
r_scause(),
|
||||
asm::r_scause(),
|
||||
proc.pid,
|
||||
r_sepc(),
|
||||
r_stval()
|
||||
asm::r_sepc(),
|
||||
asm::r_stval()
|
||||
);
|
||||
proc.set_killed(true);
|
||||
}
|
||||
@ -275,10 +280,10 @@ pub unsafe extern "C" fn usertrap() {
|
||||
// are initially off, then push_intr_off, pop_intr_off leaves them off.
|
||||
|
||||
pub unsafe fn push_intr_off() {
|
||||
let old = intr_get();
|
||||
let old = arch::interrupt::interrupts_enabled();
|
||||
let cpu = Cpu::current();
|
||||
|
||||
intr_off();
|
||||
arch::interrupt::disable_interrupts();
|
||||
if cpu.interrupt_disable_layers == 0 {
|
||||
cpu.previous_interrupts_enabled = old;
|
||||
}
|
||||
@ -287,7 +292,7 @@ pub unsafe fn push_intr_off() {
|
||||
pub unsafe fn pop_intr_off() {
|
||||
let cpu = Cpu::current();
|
||||
|
||||
if intr_get() == 1 {
|
||||
if arch::interrupt::interrupts_enabled() == 1 {
|
||||
// crate::panic_byte(b'0');
|
||||
panic!("pop_intr_off - interruptible");
|
||||
} else if cpu.interrupt_disable_layers < 1 {
|
||||
@ -298,6 +303,6 @@ pub unsafe fn pop_intr_off() {
|
||||
cpu.interrupt_disable_layers -= 1;
|
||||
|
||||
if cpu.interrupt_disable_layers == 0 && cpu.previous_interrupts_enabled == 1 {
|
||||
intr_on();
|
||||
arch::interrupt::enable_interrupts();
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user