Rewrite vm.c in mem::virtual_memory.

This commit is contained in:
Garen Tyler 2023-10-28 20:28:37 -06:00
parent a1f88ff034
commit dd9cf556cb
Signed by: garentyler
GPG Key ID: D7A048C454CB7054
8 changed files with 590 additions and 537 deletions

View File

@ -2,7 +2,6 @@ R=rustkernel
KERNEL_SOURCES = \ KERNEL_SOURCES = \
entry.c \ entry.c \
vm.c \
proc.c \ proc.c \
swtch.c \ swtch.c \
trampoline.c \ trampoline.c \

View File

@ -156,8 +156,6 @@ void usertrapret(void);
void uartintr(void); void uartintr(void);
// vm.c // vm.c
void kvminit(void);
void kvminithart(void);
void kvmmap(pagetable_t, uint64, uint64, uint64, int); void kvmmap(pagetable_t, uint64, uint64, uint64, int);
int mappages(pagetable_t, uint64, uint64, uint64, int); int mappages(pagetable_t, uint64, uint64, uint64, int);
pagetable_t uvmcreate(void); pagetable_t uvmcreate(void);

View File

@ -1,4 +1,557 @@
use crate::{
mem::{
kalloc::{kalloc, kfree},
memmove, memset,
},
proc::proc_mapstacks,
riscv::{
memlayout::{KERNBASE, PHYSTOP, TRAMPOLINE},
*,
},
};
use core::ptr::{addr_of, addr_of_mut, null_mut};
extern "C" { extern "C" {
pub fn kvminit(); /// kernel.ld sets this to end of kernel code.
pub fn kvminithart(); pub static etext: [u8; 0];
/// trampoline.S
pub static trampoline: [u8; 0];
}
/// The kernel's pagetable.
pub static mut KERNEL_PAGETABLE: Pagetable = null_mut();
/// Make a direct-map page table for the kernel.
pub unsafe fn kvmmake() -> Pagetable {
let pagetable = kalloc() as Pagetable;
memset(pagetable.cast(), 0, PGSIZE as u32);
// QEMU test interface used for power management.
kvmmap(pagetable, QEMU_POWER, QEMU_POWER, PGSIZE, PTE_R | PTE_W);
// UART registers
kvmmap(pagetable, UART0, UART0, PGSIZE, PTE_R | PTE_W);
// VirtIO MMIO disk interface
kvmmap(pagetable, VIRTIO0, VIRTIO0, PGSIZE, PTE_R | PTE_W);
// PLIC
kvmmap(pagetable, PLIC, PLIC, 0x400000u64, PTE_R | PTE_W);
let etext_addr = addr_of!(etext) as usize as u64;
// Map kernel text executable and read-only.
kvmmap(
pagetable,
KERNBASE,
KERNBASE,
etext_addr - KERNBASE,
PTE_R | PTE_X,
);
// Map kernel data and the physical RAM we'll make use of.
kvmmap(
pagetable,
etext_addr,
etext_addr,
PHYSTOP - etext_addr,
PTE_R | PTE_W,
);
// Map the trampoine for trap entry/exit to
// the highest virtual address in the kernel.
kvmmap(
pagetable,
TRAMPOLINE,
addr_of!(trampoline) as usize as u64,
PGSIZE,
PTE_R | PTE_X,
);
// Allocate and map a kernel stack for each process.
proc_mapstacks(pagetable);
pagetable
}
/// Initialize the one kernel_pagetable.
pub unsafe fn kvminit() {
KERNEL_PAGETABLE = kvmmake();
}
/// Switch hardware pagetable register to the kernel's pagetable and enable paging.
pub unsafe fn kvminithart() {
// Wait for any previous writes to the pagetable memory to finish.
asm::sfence_vma();
asm::w_satp(make_satp(KERNEL_PAGETABLE));
// Flush stale entries from the TLB.
asm::sfence_vma();
}
/// Return the address of the PTE in pagetable
/// `pagetable` that corresponds to virtual address
/// `virtual_addr`. If `alloc` != 0, create any
/// required pagetable pages.
///
/// The RISC-V Sv39 scheme has three levels of pagetable
/// pages. A pagetable page contains 512 64-bit PTEs.
///
/// A 64-bit virtual address is split into five fields:
/// - 0..12: 12 bits of byte offset within the page.
/// - 12..20: 9 bits of level 0 index.
/// - 21..30: 9 bits of level 0 index.
/// - 30..39: 9 bits of level 0 index.
/// - 39..64: Must be zero.
#[no_mangle]
pub unsafe extern "C" fn walk(
mut pagetable: Pagetable,
virtual_addr: u64,
alloc: i32,
) -> *mut PagetableEntry {
if virtual_addr > MAXVA {
panic!("walk");
}
let mut level = 2;
while level > 0 {
let pte = addr_of_mut!(
pagetable.as_mut().unwrap()[((virtual_addr >> (12 + (level * 9))) & 0x1ffu64) as usize]
);
if (*pte) & PTE_V as u64 > 0 {
pagetable = (((*pte) >> 10) << 12) as usize as Pagetable;
} else {
if alloc == 0 {
return null_mut();
}
pagetable = kalloc() as Pagetable;
if pagetable.is_null() {
return null_mut();
}
memset(pagetable.cast(), 0, PGSIZE as u32);
*pte = (((pagetable as usize) >> 12) << 10) as PagetableEntry | PTE_V as u64;
}
level -= 1;
}
addr_of_mut!(pagetable.as_mut().unwrap()[(virtual_addr as usize >> 12) & 0x1ffusize])
}
/// Look up a virtual address and return the physical address or 0 if not mapped.
///
/// Can only be used to look up user pages.
#[no_mangle]
pub unsafe extern "C" fn walkaddr(pagetable: Pagetable, virtual_addr: u64) -> u64 {
if virtual_addr > MAXVA {
return 0;
}
let pte = walk(pagetable, virtual_addr, 0);
if pte.is_null() || *pte & PTE_V as u64 == 0 || *pte & PTE_U as u64 == 0 {
return 0;
}
pte2pa(*pte as usize) as u64
}
/// Add a mapping to the kernel page table.
///
/// Only used when booting.
/// Does not flush TLB or enable paging.
#[no_mangle]
pub unsafe extern "C" fn kvmmap(
pagetable: Pagetable,
virtual_addr: u64,
physical_addr: u64,
size: u64,
perm: i32,
) {
if mappages(pagetable, virtual_addr, size, physical_addr, perm) != 0 {
panic!("kvmmap");
}
}
/// Create PagetableEntries for virtual addresses starting at `virtual_addr`
/// that refer to physical addresses starting at `physical_addr`.
///
/// `virtual_addr` and size might not be page-aligned.
/// Returns 0 on success, -1 if walk() couldn't allocate a needed pagetable page.
#[no_mangle]
pub unsafe extern "C" fn mappages(
pagetable: Pagetable,
virtual_addr: u64,
size: u64,
mut physical_addr: u64,
perm: i32,
) -> i32 {
if size == 0 {
panic!("mappages: size = 0");
}
let mut a = pg_round_down(virtual_addr);
let last = pg_round_down(virtual_addr + size - 1);
loop {
let pte = walk(pagetable, a, 1);
if pte.is_null() {
return -1;
}
if (*pte) & PTE_V as u64 > 0 {
panic!("mappages: remap");
}
*pte = ((physical_addr >> 12) << 10) | perm as u64 | PTE_V as u64;
if a == last {
break;
} else {
a += PGSIZE;
physical_addr += PGSIZE;
}
}
0
}
/// Remove `npages` of mappings starting from `virtual_addr`.
///
/// `virtual_addr` amust be page-aligned. The mappings must exist.
/// Optionally free the physical memory.
#[no_mangle]
pub unsafe extern "C" fn uvmunmap(
pagetable: Pagetable,
virtual_addr: u64,
num_pages: u64,
do_free: i32,
) {
if virtual_addr % PGSIZE != 0 {
panic!("uvmunmap: not aligned");
}
let mut a = virtual_addr;
while a < virtual_addr + num_pages * PGSIZE {
let pte = walk(pagetable, a, 0);
if pte.is_null() {
panic!("uvmunmap: walk");
} else if (*pte) & PTE_V as u64 == 0 {
panic!("uvmunmap: not mapped");
} else if ((*pte) & 0x3ffu64) == PTE_V as u64 {
panic!("uvmunmap: not a leaf");
} else if do_free > 0 {
let physical_addr = (((*pte) >> 10) << 12) as usize as *mut u8;
kfree(physical_addr.cast());
}
*pte = 0;
a += PGSIZE;
}
}
/// Create an empty user pagetable.
///
/// Returns 0 if out of memory.
#[no_mangle]
pub unsafe extern "C" fn uvmcreate() -> Pagetable {
let pagetable = kalloc() as Pagetable;
if pagetable.is_null() {
return null_mut();
}
memset(pagetable.cast(), 0, PGSIZE as u32);
pagetable
}
/// Load the user initcode into address 0 of pagetable for the very first process.
///
/// `size` must be less than `PGSIZE`.
#[no_mangle]
pub unsafe extern "C" fn uvmfirst(pagetable: Pagetable, src: *mut u8, size: u32) {
if size >= PGSIZE as u32 {
panic!("uvmfirst: more than a page");
}
let mem = kalloc();
memset(mem, 0, PGSIZE as u32);
mappages(
pagetable,
0,
PGSIZE,
mem as usize as u64,
PTE_W | PTE_R | PTE_X | PTE_U,
);
memmove(mem, src, size);
}
/// Allocate PagetableEntries and physical memory to grow process
/// from `old_size` to `new_size`, which need not be page aligned.
///
/// Returns new size or 0 on error.
#[no_mangle]
pub unsafe extern "C" fn uvmalloc(
pagetable: Pagetable,
mut old_size: u64,
new_size: u64,
xperm: i32,
) -> u64 {
if new_size < old_size {
return old_size;
}
old_size = pg_round_up(old_size);
let mut a = old_size;
while a < new_size {
let mem = kalloc();
if mem.is_null() {
uvmdealloc(pagetable, a, old_size);
return 0;
}
memset(mem.cast(), 0, PGSIZE as u32);
if mappages(
pagetable,
a,
PGSIZE,
mem as usize as u64,
PTE_R | PTE_U | xperm,
) != 0
{
kfree(mem.cast());
uvmdealloc(pagetable, a, old_size);
return 0;
}
a += PGSIZE;
}
new_size
}
/// Deallocate user pages to bring the process size from `old_size` to `new_size`.
///
/// `old_size` and `new_size` need not be page-aligned, nor does `new_size` need
/// to be less than `old_size`. `old_size` can be larget than the actual process
/// size. Returns the new process size.
#[no_mangle]
pub unsafe extern "C" fn uvmdealloc(pagetable: Pagetable, old_size: u64, new_size: u64) -> u64 {
if new_size >= old_size {
return old_size;
}
if pg_round_up(new_size) < pg_round_up(old_size) {
let num_pages = (pg_round_up(old_size) - pg_round_up(new_size)) / PGSIZE;
uvmunmap(pagetable, pg_round_up(new_size), num_pages, 1);
}
new_size
}
/// Recursively free pagetable pages.
///
/// All leaf mappings must have already been removed.
#[no_mangle]
pub unsafe extern "C" fn freewalk(pagetable: Pagetable) {
// There are 2^9 = 512 PagetableEntry's in a Pagetable.
for i in 0..512 {
let pte: &mut PagetableEntry = &mut pagetable.as_mut().unwrap()[i];
if *pte & PTE_V as u64 > 0 && (*pte & (PTE_R | PTE_W | PTE_X) as u64) == 0 {
// This PagetableEntry points to a lower-level pagetable.
let child = ((*pte) >> 10) << 12;
freewalk(child as usize as Pagetable);
*pte = 0;
} else if *pte & PTE_V as u64 > 0 {
panic!("freewalk: leaf");
}
}
kfree(pagetable.cast());
}
/// Free user memory pages, then free pagetable pages.
#[no_mangle]
pub unsafe extern "C" fn uvmfree(pagetable: Pagetable, size: u64) {
if size > 0 {
uvmunmap(pagetable, 0, pg_round_up(size) / PGSIZE, 1);
}
freewalk(pagetable);
}
/// Given a parent process's pagetable, copy
/// its memory into a child's pagetable.
///
/// Copies both the pagetable and the physical memory.
/// Returns 0 on success, -1 on failure.
/// Frees any allocated pages on failure.
#[no_mangle]
pub unsafe extern "C" fn uvmcopy(old: Pagetable, new: Pagetable, size: u64) -> i32 {
let mut i = 0;
while i < size {
let pte = walk(old, i, 0);
if pte.is_null() {
panic!("uvmcopy: PagetableEntry should exist");
} else if (*pte) & PTE_V as u64 == 0 {
panic!("uvmcopy: page not present");
}
let pa = ((*pte) >> 10) << 12;
let flags = (*pte) & 0x3ffu64;
let mem = kalloc();
if mem.is_null() {
uvmunmap(new, 0, i / PGSIZE, 1);
return -1;
}
memmove(mem.cast(), (pa as usize as *mut u8).cast(), PGSIZE as u32);
if mappages(new, i, PGSIZE, mem as usize as u64, flags as i32) != 0 {
kfree(mem.cast());
uvmunmap(new, 0, i / PGSIZE, 1);
return -1;
}
i += PGSIZE;
}
0
}
/// Mark a PagetableEntry invalid for user access.
///
/// Used by exec for the user stack guard page.
#[no_mangle]
pub unsafe extern "C" fn uvmclear(pagetable: Pagetable, virtual_addr: u64) {
let pte = walk(pagetable, virtual_addr, 0);
if pte.is_null() {
panic!("uvmclear");
}
*pte &= !(PTE_U as u64);
}
/// Copy from kernel to user.
///
/// Copy `len` bytes from `src` to virtual address `dst_virtual_addr` in a given pagetable.
/// Returns 0 on success, -1 on error.
#[no_mangle]
pub unsafe extern "C" fn copyout(
pagetable: Pagetable,
mut dst_virtual_addr: u64,
mut src: *mut u8,
mut len: u64,
) -> i32 {
while len > 0 {
let va0 = pg_round_down(dst_virtual_addr);
let pa0 = walkaddr(pagetable, va0);
if pa0 == 0 {
return -1;
}
let mut n = PGSIZE - (dst_virtual_addr - va0);
if n > len {
n = len;
}
memmove(
((pa0 + dst_virtual_addr - va0) as usize as *mut u8).cast(),
src,
n as u32,
);
len -= n;
src = src.add(n as usize);
dst_virtual_addr = va0 + PGSIZE;
}
0
}
/// Copy from user to kernel.
///
/// Copy `len` bytes to `dst` from virtual address `src_virtual_addr` in a given pagetable.
/// Returns 0 on success, -1 on error.
#[no_mangle]
pub unsafe extern "C" fn copyin(
pagetable: Pagetable,
mut dst: *mut u8,
mut src_virtual_addr: u64,
mut len: u64,
) -> i32 {
while len > 0 {
let va0 = pg_round_down(src_virtual_addr);
let pa0 = walkaddr(pagetable, va0);
if pa0 == 0 {
return -1;
}
let mut n = PGSIZE - (src_virtual_addr - va0);
if n > len {
n = len;
}
memmove(
dst.cast(),
((pa0 + src_virtual_addr - va0) as usize as *mut u8).cast(),
n as u32,
);
len -= n;
dst = dst.add(n as usize);
src_virtual_addr = va0 + PGSIZE;
}
0
}
/// Copy a null-terminated string from user to kernel.
///
/// Copy bytes to `dst` from virtual address `src_virtual_addr`
/// in a given pagetable, until b'\0' or `max` is reached.
/// Returns 0 on success, -1 on error.
#[no_mangle]
pub unsafe extern "C" fn copyinstr(
pagetable: Pagetable,
mut dst: *mut u8,
mut src_virtual_addr: u64,
mut max: u64,
) -> i32 {
let mut got_null = false;
while !got_null && max > 0 {
let va0 = pg_round_down(src_virtual_addr);
let pa0 = walkaddr(pagetable, va0);
if pa0 == 0 {
return -1;
}
let mut n = PGSIZE - (src_virtual_addr - va0);
if n > max {
n = max;
}
let mut p = (pa0 + src_virtual_addr - va0) as *const u8;
while n > 0 {
if *p == b'\0' {
*dst = b'\0';
got_null = true;
break;
} else {
*dst = *p;
}
n -= 1;
max -= 1;
p = p.add(1);
dst = dst.add(1);
}
src_virtual_addr = va0 + PGSIZE;
}
if got_null {
0
} else {
-1
}
} }

View File

@ -32,6 +32,7 @@ extern "C" {
pub fn exit(status: i32) -> !; pub fn exit(status: i32) -> !;
pub fn wait(addr: u64) -> i32; pub fn wait(addr: u64) -> i32;
pub fn procdump(); pub fn procdump();
pub fn proc_mapstacks(kpgtbl: Pagetable);
pub fn proc_pagetable(p: *mut Proc) -> Pagetable; pub fn proc_pagetable(p: *mut Proc) -> Pagetable;
pub fn proc_freepagetable(pagetable: Pagetable, sz: u64); pub fn proc_freepagetable(pagetable: Pagetable, sz: u64);
pub fn wakeup(chan: *const c_void); pub fn wakeup(chan: *const c_void);
@ -253,52 +254,6 @@ pub unsafe extern "C" fn allocpid() -> i32 {
pid pid
} }
/*
/// Look in the process table for an UNUSED proc.
/// If found, initialize state required to run in the kernel,
/// and return with p->lock held. If there are no free procs,
/// or a memory allocation fails, return 0.
#[no_mangle]
pub unsafe extern "C" fn allocproc() -> *mut Proc {
for p in &mut proc {
let lock = addr_of_mut!(p.lock);
(*lock).lock_unguarded();
if p.state != ProcState::Unused {
(*lock).unlock();
continue;
}
let p = addr_of_mut!(*p);
(*p).pid = allocpid();
(*p).state = ProcState::Used;
// Allocate a trapframe page and
// create an empty user page table.
(*p).trapframe = kalloc().cast();
(*p).pagetable = proc_pagetable(p);
if (*p).trapframe.is_null() || (*p).pagetable.is_null() {
freeproc(p);
(*p).lock.unlock();
return null_mut();
}
// Set up new context to start executing
// at forkret which returns to user space.
memset(addr_of_mut!((*p).context).cast(), 0, size_of::<Context>() as u32);
// TODO: convert fn pointer to u64
(*p).context.ra = forkret as usize as u64;
(*p).context.sp = (*p).kstack + PGSIZE;
return p;
}
null_mut()
}
*/
/// Free a proc structure and the data hanging from it, including user pages. /// Free a proc structure and the data hanging from it, including user pages.
/// p->lock must be held. /// p->lock must be held.
#[no_mangle] #[no_mangle]
@ -321,23 +276,6 @@ pub unsafe extern "C" fn freeproc(p: *mut Proc) {
(*p).state = ProcState::Unused; (*p).state = ProcState::Unused;
} }
// /// Wake up all processes sleeping on chan.
// /// Must be called without any p->lock.
// #[no_mangle]
// pub unsafe extern "C" fn wakeup(chan: *mut c_void) {
// for p in &mut proc {
// let p: *mut Proc = addr_of_mut!(*p);
//
// if p != myproc() {
// (*p).lock.lock_unguarded();
// if (*p).state == ProcState::Sleeping && (*p).chan == chan {
// (*p).state = ProcState::Runnable;
// }
// (*p).lock.unlock();
// }
// }
// }
/// Pass p's abandoned children to init. /// Pass p's abandoned children to init.
/// Caller must hold wait_lock. /// Caller must hold wait_lock.
#[no_mangle] #[no_mangle]
@ -358,7 +296,7 @@ pub unsafe extern "C" fn growproc(n: i32) -> i32 {
let mut sz = (*p).sz; let mut sz = (*p).sz;
if n > 0 { if n > 0 {
sz = uvmalloc((*p).pagetable, sz, sz.wrapping_add(n as u64), PTE_W as i32); sz = uvmalloc((*p).pagetable, sz, sz.wrapping_add(n as u64), PTE_W);
if sz == 0 { if sz == 0 {
return -1; return -1;
} }
@ -455,20 +393,6 @@ pub unsafe fn sleep_mutex<T>(chan: *mut c_void, mutex: &mut SpinMutexGuard<T>) {
core::mem::forget(guard); core::mem::forget(guard);
} }
// pub unsafe fn sleep(chan: *mut c_void) {
// let p = myproc();
// let _guard = (*p).lock.lock();
//
// // Go to sleep.
// (*p).chan = chan;
// (*p).state = ProcState::Sleeping;
//
// sched();
//
// // Clean up.
// (*p).chan = null_mut();
// }
/// Kill the process with the given pid. /// Kill the process with the given pid.
/// The victim won't exit until it tries to return /// The victim won't exit until it tries to return
/// to user space (see usertrap() in trap.c). /// to user space (see usertrap() in trap.c).

View File

@ -5,8 +5,9 @@ pub mod plic;
pub use asm::*; pub use asm::*;
pub use memlayout::*; pub use memlayout::*;
pub type Pte = u64; pub type Pde = u64;
pub type Pagetable = *mut [Pte; 512]; pub type PagetableEntry = u64;
pub type Pagetable = *mut [PagetableEntry; 512];
/// Previous mode /// Previous mode
pub const MSTATUS_MPP_MASK: u64 = 3 << 11; pub const MSTATUS_MPP_MASK: u64 = 3 << 11;
@ -43,14 +44,14 @@ pub const MIE_MSIE: u64 = 1 << 3;
pub const SATP_SV39: u64 = 8 << 60; pub const SATP_SV39: u64 = 8 << 60;
pub fn make_satp(pagetable: u64) -> u64 { pub fn make_satp(pagetable: Pagetable) -> u64 {
SATP_SV39 | (pagetable >> 12) SATP_SV39 | (pagetable as usize as u64 >> 12)
} }
/// Bytes per page /// Bytes per page
pub const PGSIZE: u64 = 4096; pub const PGSIZE: u64 = 4096;
/// Bits of offset within a page /// Bits of offset within a page
pub const PGSHIFT: u64 = 12; pub const PGSHIFT: usize = 12;
pub fn pg_round_up(sz: u64) -> u64 { pub fn pg_round_up(sz: u64) -> u64 {
(sz + PGSIZE - 1) & !(PGSIZE - 1) (sz + PGSIZE - 1) & !(PGSIZE - 1)
@ -60,12 +61,12 @@ pub fn pg_round_down(a: u64) -> u64 {
} }
// Valid. // Valid.
pub const PTE_V: u64 = 1 << 0; pub const PTE_V: i32 = 1 << 0;
pub const PTE_R: u64 = 1 << 1; pub const PTE_R: i32 = 1 << 1;
pub const PTE_W: u64 = 1 << 2; pub const PTE_W: i32 = 1 << 2;
pub const PTE_X: u64 = 1 << 3; pub const PTE_X: i32 = 1 << 3;
// User can access. // User can access.
pub const PTE_U: u64 = 1 << 4; pub const PTE_U: i32 = 1 << 4;
/* /*
// shift a physical address to the right place for a PTE. // shift a physical address to the right place for a PTE.
@ -81,6 +82,26 @@ pub const PTE_U: u64 = 1 << 4;
#define PX(level, va) ((((uint64) (va)) >> PXSHIFT(level)) & PXMASK) #define PX(level, va) ((((uint64) (va)) >> PXSHIFT(level)) & PXMASK)
*/ */
/// Shift a physical address to the right place for a PTE.
pub fn pa2pte(pa: usize) -> usize {
(pa >> 12) << 10
}
pub fn pte2pa(pte: usize) -> usize {
(pte >> 10) << 12
}
// Extract the three 9-bit page table indices from a virtual address.
pub const PXMASK: usize = 0x1ffusize; // 9 bits.
pub fn pxshift(level: usize) -> usize {
PGSHIFT + (level * 9)
}
pub fn px(level: usize, virtual_addr: usize) -> usize {
(virtual_addr >> pxshift(level)) & PXMASK
}
/// One beyond the highest possible virtual address. /// One beyond the highest possible virtual address.
/// ///
/// MAXVA is actually one bit less than the max allowed by /// MAXVA is actually one bit less than the max allowed by

View File

@ -1,6 +1,6 @@
use crate::{ use crate::{
proc::{mycpu, Cpu}, proc::{mycpu, Cpu},
trap::{push_intr_off, pop_intr_off}, trap::{pop_intr_off, push_intr_off},
}; };
use core::{ use core::{
ffi::c_char, ffi::c_char,

View File

@ -177,7 +177,7 @@ pub unsafe extern "C" fn usertrapret() {
w_sepc((*(*p).trapframe).epc); w_sepc((*(*p).trapframe).epc);
// Tell trampoline.S the user page table to switch to. // Tell trampoline.S the user page table to switch to.
let satp = make_satp((*p).pagetable as usize as u64); let satp = make_satp((*p).pagetable);
// Jump to userret in trampoline.S at the top of memory, which // Jump to userret in trampoline.S at the top of memory, which
// switches to the user page table, restores user registers, // switches to the user page table, restores user registers,

View File

@ -1,442 +0,0 @@
#include "param.h"
#include "types.h"
#include "memlayout.h"
#include "elf.h"
#include "riscv.h"
#include "defs.h"
#include "fs.h"
/*
* the kernel's page table.
*/
pagetable_t kernel_pagetable;
extern char etext[]; // kernel.ld sets this to end of kernel code.
extern char trampoline[]; // trampoline.S
// Make a direct-map page table for the kernel.
pagetable_t
kvmmake(void)
{
pagetable_t kpgtbl;
kpgtbl = (pagetable_t) kalloc();
memset(kpgtbl, 0, PGSIZE);
// QEMU test interface used for power management.
kvmmap(kpgtbl, QEMU_POWER, QEMU_POWER, PGSIZE, PTE_R | PTE_W);
// uart registers
kvmmap(kpgtbl, UART0, UART0, PGSIZE, PTE_R | PTE_W);
// virtio mmio disk interface
kvmmap(kpgtbl, VIRTIO0, VIRTIO0, PGSIZE, PTE_R | PTE_W);
// PLIC
kvmmap(kpgtbl, PLIC, PLIC, 0x400000, PTE_R | PTE_W);
// map kernel text executable and read-only.
kvmmap(kpgtbl, KERNBASE, KERNBASE, (uint64)etext-KERNBASE, PTE_R | PTE_X);
// map kernel data and the physical RAM we'll make use of.
kvmmap(kpgtbl, (uint64)etext, (uint64)etext, PHYSTOP-(uint64)etext, PTE_R | PTE_W);
// map the trampoline for trap entry/exit to
// the highest virtual address in the kernel.
kvmmap(kpgtbl, TRAMPOLINE, (uint64)trampoline, PGSIZE, PTE_R | PTE_X);
// allocate and map a kernel stack for each process.
proc_mapstacks(kpgtbl);
return kpgtbl;
}
// Initialize the one kernel_pagetable
void
kvminit(void)
{
kernel_pagetable = kvmmake();
}
// Switch h/w page table register to the kernel's page table,
// and enable paging.
void
kvminithart()
{
// wait for any previous writes to the page table memory to finish.
sfence_vma();
w_satp(MAKE_SATP(kernel_pagetable));
// flush stale entries from the TLB.
sfence_vma();
}
// Return the address of the PTE in page table pagetable
// that corresponds to virtual address va. If alloc!=0,
// create any required page-table pages.
//
// The risc-v Sv39 scheme has three levels of page-table
// pages. A page-table page contains 512 64-bit PTEs.
// A 64-bit virtual address is split into five fields:
// 39..63 -- must be zero.
// 30..38 -- 9 bits of level-2 index.
// 21..29 -- 9 bits of level-1 index.
// 12..20 -- 9 bits of level-0 index.
// 0..11 -- 12 bits of byte offset within the page.
pte_t *
walk(pagetable_t pagetable, uint64 va, int alloc)
{
if(va >= MAXVA)
panic("walk");
for(int level = 2; level > 0; level--) {
pte_t *pte = &pagetable[PX(level, va)];
if(*pte & PTE_V) {
pagetable = (pagetable_t)PTE2PA(*pte);
} else {
if(!alloc || (pagetable = (pde_t*)kalloc()) == 0)
return 0;
memset(pagetable, 0, PGSIZE);
*pte = PA2PTE(pagetable) | PTE_V;
}
}
return &pagetable[PX(0, va)];
}
// Look up a virtual address, return the physical address,
// or 0 if not mapped.
// Can only be used to look up user pages.
uint64
walkaddr(pagetable_t pagetable, uint64 va)
{
pte_t *pte;
uint64 pa;
if(va >= MAXVA)
return 0;
pte = walk(pagetable, va, 0);
if(pte == 0)
return 0;
if((*pte & PTE_V) == 0)
return 0;
if((*pte & PTE_U) == 0)
return 0;
pa = PTE2PA(*pte);
return pa;
}
// add a mapping to the kernel page table.
// only used when booting.
// does not flush TLB or enable paging.
void
kvmmap(pagetable_t kpgtbl, uint64 va, uint64 pa, uint64 sz, int perm)
{
if(mappages(kpgtbl, va, sz, pa, perm) != 0)
panic("kvmmap");
}
// Create PTEs for virtual addresses starting at va that refer to
// physical addresses starting at pa. va and size might not
// be page-aligned. Returns 0 on success, -1 if walk() couldn't
// allocate a needed page-table page.
int
mappages(pagetable_t pagetable, uint64 va, uint64 size, uint64 pa, int perm)
{
uint64 a, last;
pte_t *pte;
if(size == 0)
panic("mappages: size");
a = PGROUNDDOWN(va);
last = PGROUNDDOWN(va + size - 1);
for(;;){
if((pte = walk(pagetable, a, 1)) == 0)
return -1;
if(*pte & PTE_V)
panic("mappages: remap");
*pte = PA2PTE(pa) | perm | PTE_V;
if(a == last)
break;
a += PGSIZE;
pa += PGSIZE;
}
return 0;
}
// Remove npages of mappings starting from va. va must be
// page-aligned. The mappings must exist.
// Optionally free the physical memory.
void
uvmunmap(pagetable_t pagetable, uint64 va, uint64 npages, int do_free)
{
uint64 a;
pte_t *pte;
if((va % PGSIZE) != 0)
panic("uvmunmap: not aligned");
for(a = va; a < va + npages*PGSIZE; a += PGSIZE){
if((pte = walk(pagetable, a, 0)) == 0)
panic("uvmunmap: walk");
if((*pte & PTE_V) == 0)
panic("uvmunmap: not mapped");
if(PTE_FLAGS(*pte) == PTE_V)
panic("uvmunmap: not a leaf");
if(do_free){
uint64 pa = PTE2PA(*pte);
kfree((void*)pa);
}
*pte = 0;
}
}
// create an empty user page table.
// returns 0 if out of memory.
pagetable_t
uvmcreate()
{
pagetable_t pagetable;
pagetable = (pagetable_t) kalloc();
if(pagetable == 0)
return 0;
memset(pagetable, 0, PGSIZE);
return pagetable;
}
// Load the user initcode into address 0 of pagetable,
// for the very first process.
// sz must be less than a page.
void
uvmfirst(pagetable_t pagetable, uchar *src, uint sz)
{
char *mem;
if(sz >= PGSIZE)
panic("uvmfirst: more than a page");
mem = kalloc();
memset(mem, 0, PGSIZE);
mappages(pagetable, 0, PGSIZE, (uint64)mem, PTE_W|PTE_R|PTE_X|PTE_U);
memmove(mem, src, sz);
}
// Allocate PTEs and physical memory to grow process from oldsz to
// newsz, which need not be page aligned. Returns new size or 0 on error.
uint64
uvmalloc(pagetable_t pagetable, uint64 oldsz, uint64 newsz, int xperm)
{
char *mem;
uint64 a;
if(newsz < oldsz)
return oldsz;
oldsz = PGROUNDUP(oldsz);
for(a = oldsz; a < newsz; a += PGSIZE){
mem = kalloc();
if(mem == 0){
uvmdealloc(pagetable, a, oldsz);
return 0;
}
memset(mem, 0, PGSIZE);
if(mappages(pagetable, a, PGSIZE, (uint64)mem, PTE_R|PTE_U|xperm) != 0){
kfree(mem);
uvmdealloc(pagetable, a, oldsz);
return 0;
}
}
return newsz;
}
// Deallocate user pages to bring the process size from oldsz to
// newsz. oldsz and newsz need not be page-aligned, nor does newsz
// need to be less than oldsz. oldsz can be larger than the actual
// process size. Returns the new process size.
uint64
uvmdealloc(pagetable_t pagetable, uint64 oldsz, uint64 newsz)
{
if(newsz >= oldsz)
return oldsz;
if(PGROUNDUP(newsz) < PGROUNDUP(oldsz)){
int npages = (PGROUNDUP(oldsz) - PGROUNDUP(newsz)) / PGSIZE;
uvmunmap(pagetable, PGROUNDUP(newsz), npages, 1);
}
return newsz;
}
// Recursively free page-table pages.
// All leaf mappings must already have been removed.
void
freewalk(pagetable_t pagetable)
{
// there are 2^9 = 512 PTEs in a page table.
for(int i = 0; i < 512; i++){
pte_t pte = pagetable[i];
if((pte & PTE_V) && (pte & (PTE_R|PTE_W|PTE_X)) == 0){
// this PTE points to a lower-level page table.
uint64 child = PTE2PA(pte);
freewalk((pagetable_t)child);
pagetable[i] = 0;
} else if(pte & PTE_V){
panic("freewalk: leaf");
}
}
kfree((void*)pagetable);
}
// Free user memory pages,
// then free page-table pages.
void
uvmfree(pagetable_t pagetable, uint64 sz)
{
if(sz > 0)
uvmunmap(pagetable, 0, PGROUNDUP(sz)/PGSIZE, 1);
freewalk(pagetable);
}
// Given a parent process's page table, copy
// its memory into a child's page table.
// Copies both the page table and the
// physical memory.
// returns 0 on success, -1 on failure.
// frees any allocated pages on failure.
int
uvmcopy(pagetable_t old, pagetable_t new, uint64 sz)
{
pte_t *pte;
uint64 pa, i;
uint flags;
char *mem;
for(i = 0; i < sz; i += PGSIZE){
if((pte = walk(old, i, 0)) == 0)
panic("uvmcopy: pte should exist");
if((*pte & PTE_V) == 0)
panic("uvmcopy: page not present");
pa = PTE2PA(*pte);
flags = PTE_FLAGS(*pte);
if((mem = kalloc()) == 0)
goto err;
memmove(mem, (char*)pa, PGSIZE);
if(mappages(new, i, PGSIZE, (uint64)mem, flags) != 0){
kfree(mem);
goto err;
}
}
return 0;
err:
uvmunmap(new, 0, i / PGSIZE, 1);
return -1;
}
// mark a PTE invalid for user access.
// used by exec for the user stack guard page.
void
uvmclear(pagetable_t pagetable, uint64 va)
{
pte_t *pte;
pte = walk(pagetable, va, 0);
if(pte == 0)
panic("uvmclear");
*pte &= ~PTE_U;
}
// Copy from kernel to user.
// Copy len bytes from src to virtual address dstva in a given page table.
// Return 0 on success, -1 on error.
int
copyout(pagetable_t pagetable, uint64 dstva, char *src, uint64 len)
{
uint64 n, va0, pa0;
while(len > 0){
va0 = PGROUNDDOWN(dstva);
pa0 = walkaddr(pagetable, va0);
if(pa0 == 0)
return -1;
n = PGSIZE - (dstva - va0);
if(n > len)
n = len;
memmove((void *)(pa0 + (dstva - va0)), src, n);
len -= n;
src += n;
dstva = va0 + PGSIZE;
}
return 0;
}
// Copy from user to kernel.
// Copy len bytes to dst from virtual address srcva in a given page table.
// Return 0 on success, -1 on error.
int
copyin(pagetable_t pagetable, char *dst, uint64 srcva, uint64 len)
{
uint64 n, va0, pa0;
while(len > 0){
va0 = PGROUNDDOWN(srcva);
pa0 = walkaddr(pagetable, va0);
if(pa0 == 0)
return -1;
n = PGSIZE - (srcva - va0);
if(n > len)
n = len;
memmove(dst, (void *)(pa0 + (srcva - va0)), n);
len -= n;
dst += n;
srcva = va0 + PGSIZE;
}
return 0;
}
// Copy a null-terminated string from user to kernel.
// Copy bytes to dst from virtual address srcva in a given page table,
// until a '\0', or max.
// Return 0 on success, -1 on error.
int
copyinstr(pagetable_t pagetable, char *dst, uint64 srcva, uint64 max)
{
uint64 n, va0, pa0;
int got_null = 0;
while(got_null == 0 && max > 0){
va0 = PGROUNDDOWN(srcva);
pa0 = walkaddr(pagetable, va0);
if(pa0 == 0)
return -1;
n = PGSIZE - (srcva - va0);
if(n > max)
n = max;
char *p = (char *) (pa0 + (srcva - va0));
while(n > 0){
if(*p == '\0'){
*dst = '\0';
got_null = 1;
break;
} else {
*dst = *p;
}
--n;
--max;
p++;
dst++;
}
srcva = va0 + PGSIZE;
}
if(got_null){
return 0;
} else {
return -1;
}
}