change u64 to usize where makes sense

This commit is contained in:
Garen Tyler 2023-11-15 21:05:58 -07:00
parent e6c0bfc581
commit ca962854c5
Signed by: garentyler
GPG Key ID: D7A048C454CB7054
11 changed files with 184 additions and 183 deletions

View File

@ -106,7 +106,7 @@ void initsleeplock(struct sleeplock *, char *);
// string.c
void *memmove(void *, const void *, uint);
void *memset(void *, int, uint);
void *memset(void *, uint8, uint64);
char *safestrcpy(char *, const char *, int);
int strlen(const char *);
int strncmp(const char *, const char *, uint);
@ -123,15 +123,7 @@ int fetchaddr(uint64, uint64 *);
void usertrapret(void);
// vm.c
void kvmmap(pagetable_t, uint64, uint64, uint64, int);
int mappages(pagetable_t, uint64, uint64, uint64, int);
pagetable_t uvmcreate(void);
void uvmfirst(pagetable_t, uchar *, uint);
uint64 uvmalloc(pagetable_t, uint64, uint64, int);
uint64 uvmdealloc(pagetable_t, uint64, uint64);
int uvmcopy(pagetable_t, pagetable_t, uint64);
void uvmfree(pagetable_t, uint64);
void uvmunmap(pagetable_t, uint64, uint64, int);
void uvmclear(pagetable_t, uint64);
uint64 walkaddr(pagetable_t, uint64);
int copyout(pagetable_t, uint64, char *, uint64);

View File

@ -47,8 +47,8 @@ pub mod virtual_memory {
#[cfg(target_arch = "riscv64")]
pub use super::riscv::virtual_memory::{
copyin, copyinstr, copyout, either_copyin, either_copyout, kvminit as init,
kvminithart as inithart, mappages, uvmalloc, uvmcopy, uvmcreate, uvmdealloc, uvmfree,
uvmunmap, uvmfirst
kvminithart as inithart, mappages, uvmalloc, uvmcopy, uvmcreate, uvmdealloc, uvmfirst,
uvmfree, uvmunmap,
};
}

View File

@ -40,14 +40,14 @@ pub unsafe fn kvmmake() -> Pagetable {
if pagetable.is_null() {
panic!("kalloc");
}
memset(pagetable.cast(), 0, PAGE_SIZE as u32);
memset(pagetable.cast(), 0, PAGE_SIZE);
// QEMU test interface used for power management.
kvmmap(
pagetable,
QEMU_POWER as u64,
QEMU_POWER as u64,
PAGE_SIZE as u64,
QEMU_POWER,
QEMU_POWER,
PAGE_SIZE,
PTE_R | PTE_W,
);
@ -55,9 +55,9 @@ pub unsafe fn kvmmake() -> Pagetable {
for (_, uart) in &crate::hardware::UARTS {
kvmmap(
pagetable,
uart.base_address as u64,
uart.base_address as u64,
PAGE_SIZE as u64,
uart.base_address,
uart.base_address,
PAGE_SIZE,
PTE_R | PTE_W,
);
}
@ -65,29 +65,29 @@ pub unsafe fn kvmmake() -> Pagetable {
// VirtIO MMIO disk interface
kvmmap(
pagetable,
VIRTIO0 as u64,
VIRTIO0 as u64,
PAGE_SIZE as u64,
VIRTIO0,
VIRTIO0,
PAGE_SIZE,
PTE_R | PTE_W,
);
// PLIC
kvmmap(
pagetable,
PLIC as u64,
PLIC as u64,
0x400000u64,
PLIC,
PLIC,
0x400000,
PTE_R | PTE_W,
);
let etext_addr = addr_of!(etext) as usize as u64;
let etext_addr = addr_of!(etext) as usize;
// Map kernel text executable and read-only.
kvmmap(
pagetable,
KERNEL_BASE as u64,
KERNEL_BASE as u64,
etext_addr - KERNEL_BASE as u64,
KERNEL_BASE,
KERNEL_BASE,
etext_addr - KERNEL_BASE,
PTE_R | PTE_X,
);
@ -96,7 +96,7 @@ pub unsafe fn kvmmake() -> Pagetable {
pagetable,
etext_addr,
etext_addr,
PHYSICAL_END as u64 - etext_addr,
PHYSICAL_END - etext_addr,
PTE_R | PTE_W,
);
@ -104,9 +104,9 @@ pub unsafe fn kvmmake() -> Pagetable {
// the highest virtual address in the kernel.
kvmmap(
pagetable,
TRAMPOLINE as u64,
addr_of!(trampoline) as usize as u64,
PAGE_SIZE as u64,
TRAMPOLINE,
addr_of!(trampoline) as usize,
PAGE_SIZE,
PTE_R | PTE_X,
);
@ -116,12 +116,12 @@ pub unsafe fn kvmmake() -> Pagetable {
if page.is_null() {
panic!("kalloc");
}
let virtual_addr = kstack(i) as u64;
let virtual_addr = kstack(i);
kvmmap(
pagetable,
virtual_addr,
page as u64,
PAGE_SIZE as u64,
page as usize,
PAGE_SIZE,
PTE_R | PTE_W,
);
}
@ -159,21 +159,21 @@ pub unsafe fn kvminithart() {
/// - 21..30: 9 bits of level 0 index.
/// - 30..39: 9 bits of level 0 index.
/// - 39..64: Must be zero.
pub unsafe fn walk(mut pagetable: Pagetable, virtual_addr: u64, alloc: i32) -> *mut PagetableEntry {
if virtual_addr > VIRTUAL_MAX as u64 {
pub unsafe fn walk(mut pagetable: Pagetable, virtual_addr: usize, alloc: bool) -> *mut PagetableEntry {
if virtual_addr > VIRTUAL_MAX {
panic!("walk");
}
let mut level = 2;
while level > 0 {
let pte = addr_of_mut!(
pagetable.as_mut().unwrap()[((virtual_addr >> (12 + (level * 9))) & 0x1ffu64) as usize]
pagetable.as_mut().unwrap()[(virtual_addr >> (12 + (level * 9))) & 0x1ff]
);
if (*pte) & PTE_V as u64 > 0 {
pagetable = (((*pte) >> 10) << 12) as usize as Pagetable;
} else {
if alloc == 0 {
if !alloc {
return null_mut();
}
@ -183,26 +183,26 @@ pub unsafe fn walk(mut pagetable: Pagetable, virtual_addr: u64, alloc: i32) -> *
return null_mut();
}
memset(pagetable.cast(), 0, PAGE_SIZE as u32);
memset(pagetable.cast(), 0, PAGE_SIZE);
*pte = (((pagetable as usize) >> 12) << 10) as PagetableEntry | PTE_V as u64;
}
level -= 1;
}
addr_of_mut!(pagetable.as_mut().unwrap()[(virtual_addr as usize >> 12) & 0x1ffusize])
addr_of_mut!(pagetable.as_mut().unwrap()[(virtual_addr >> 12) & 0x1ff])
}
/// Look up a virtual address and return the physical address or 0 if not mapped.
///
/// Can only be used to look up user pages.
#[no_mangle]
pub unsafe extern "C" fn walkaddr(pagetable: Pagetable, virtual_addr: u64) -> u64 {
if virtual_addr > VIRTUAL_MAX as u64 {
pub unsafe extern "C" fn walkaddr(pagetable: Pagetable, virtual_addr: usize) -> u64 {
if virtual_addr > VIRTUAL_MAX {
return 0;
}
let pte = walk(pagetable, virtual_addr, 0);
let pte = walk(pagetable, virtual_addr , false);
if pte.is_null() || *pte & PTE_V as u64 == 0 || *pte & PTE_U as u64 == 0 {
return 0;
}
@ -214,12 +214,11 @@ pub unsafe extern "C" fn walkaddr(pagetable: Pagetable, virtual_addr: u64) -> u6
///
/// Only used when booting.
/// Does not flush TLB or enable paging.
#[no_mangle]
pub unsafe extern "C" fn kvmmap(
pub unsafe fn kvmmap(
pagetable: Pagetable,
virtual_addr: u64,
physical_addr: u64,
size: u64,
virtual_addr: usize,
physical_addr: usize,
size: usize,
perm: i32,
) {
if mappages(pagetable, virtual_addr, size, physical_addr, perm) != 0 {
@ -232,23 +231,22 @@ pub unsafe extern "C" fn kvmmap(
///
/// `virtual_addr` and size might not be page-aligned.
/// Returns 0 on success, -1 if walk() couldn't allocate a needed pagetable page.
#[no_mangle]
pub unsafe extern "C" fn mappages(
pub unsafe fn mappages(
pagetable: Pagetable,
virtual_addr: u64,
size: u64,
mut physical_addr: u64,
virtual_addr: usize,
size: usize,
mut physical_addr: usize,
perm: i32,
) -> i32 {
if size == 0 {
panic!("mappages: size = 0");
}
let mut a = round_down_page(virtual_addr as usize) as u64;
let last = round_down_page((virtual_addr + size - 1) as usize) as u64;
let mut a = round_down_page(virtual_addr);
let last = round_down_page(virtual_addr + size - 1);
loop {
let pte = walk(pagetable, a, 1);
let pte = walk(pagetable, a, true);
if pte.is_null() {
return -1;
@ -257,13 +255,13 @@ pub unsafe extern "C" fn mappages(
panic!("mappages: remap");
}
*pte = ((physical_addr >> 12) << 10) | perm as u64 | PTE_V as u64;
*pte = ((physical_addr as u64 >> 12) << 10) | perm as u64 | PTE_V as u64;
if a == last {
break;
} else {
a += PAGE_SIZE as u64;
physical_addr += PAGE_SIZE as u64;
a += PAGE_SIZE;
physical_addr += PAGE_SIZE;
}
}
@ -274,67 +272,64 @@ pub unsafe extern "C" fn mappages(
///
/// `virtual_addr` amust be page-aligned. The mappings must exist.
/// Optionally free the physical memory.
#[no_mangle]
pub unsafe extern "C" fn uvmunmap(
pub unsafe fn uvmunmap(
pagetable: Pagetable,
virtual_addr: u64,
num_pages: u64,
do_free: i32,
virtual_addr: usize,
num_pages: usize,
free: bool,
) {
if virtual_addr % PAGE_SIZE as u64 != 0 {
if virtual_addr % PAGE_SIZE != 0 {
panic!("uvmunmap: not aligned");
}
let mut a = virtual_addr;
while a < virtual_addr + num_pages * PAGE_SIZE as u64 {
let pte = walk(pagetable, a, 0);
while a < virtual_addr + num_pages * PAGE_SIZE {
let pte = walk(pagetable, a, false);
if pte.is_null() {
panic!("uvmunmap: walk");
} else if (*pte) & PTE_V as u64 == 0 {
panic!("uvmunmap: not mapped");
} else if ((*pte) & 0x3ffu64) == PTE_V as u64 {
panic!("uvmunmap: not a leaf");
} else if do_free > 0 {
} else if free {
let physical_addr = (((*pte) >> 10) << 12) as usize as *mut u8;
kfree(physical_addr.cast());
}
*pte = 0;
a += PAGE_SIZE as u64;
a += PAGE_SIZE;
}
}
/// Create an empty user pagetable.
///
/// Returns 0 if out of memory.
#[no_mangle]
pub unsafe extern "C" fn uvmcreate() -> Pagetable {
pub unsafe fn uvmcreate() -> Pagetable {
let pagetable = kalloc() as Pagetable;
if pagetable.is_null() {
return null_mut();
}
memset(pagetable.cast(), 0, PAGE_SIZE as u32);
memset(pagetable.cast(), 0, PAGE_SIZE);
pagetable
}
/// Load the user initcode into address 0 of pagetable for the very first process.
///
/// `size` must be less than `PAGE_SIZE`.
#[no_mangle]
pub unsafe extern "C" fn uvmfirst(pagetable: Pagetable, src: *mut u8, size: u32) {
if size >= PAGE_SIZE as u32 {
pub unsafe fn uvmfirst(pagetable: Pagetable, src: *mut u8, size: usize) {
if size >= PAGE_SIZE {
panic!("uvmfirst: more than a page");
}
let mem = kalloc();
memset(mem, 0, PAGE_SIZE as u32);
memset(mem, 0, PAGE_SIZE);
mappages(
pagetable,
0,
PAGE_SIZE as u64,
mem as usize as u64,
PAGE_SIZE,
mem as usize,
PTE_W | PTE_R | PTE_X | PTE_U,
);
memmove(mem, src, size);
memmove(mem, src, size as u32);
}
/// Allocate PagetableEntries and physical memory to grow process
@ -344,15 +339,15 @@ pub unsafe extern "C" fn uvmfirst(pagetable: Pagetable, src: *mut u8, size: u32)
#[no_mangle]
pub unsafe extern "C" fn uvmalloc(
pagetable: Pagetable,
mut old_size: u64,
new_size: u64,
mut old_size: usize,
new_size: usize,
xperm: i32,
) -> u64 {
if new_size < old_size {
return old_size;
return old_size as u64;
}
old_size = round_up_page(old_size as usize) as u64;
old_size = round_up_page(old_size);
let mut a = old_size;
while a < new_size {
@ -362,13 +357,13 @@ pub unsafe extern "C" fn uvmalloc(
return 0;
}
memset(mem.cast(), 0, PAGE_SIZE as u64 as u32);
memset(mem.cast(), 0, PAGE_SIZE);
if mappages(
pagetable,
a,
PAGE_SIZE as u64,
mem as usize as u64,
PAGE_SIZE,
mem as usize,
PTE_R | PTE_U | xperm,
) != 0
{
@ -377,10 +372,10 @@ pub unsafe extern "C" fn uvmalloc(
return 0;
}
a += PAGE_SIZE as u64;
a += PAGE_SIZE;
}
new_size
new_size as u64
}
/// Deallocate user pages to bring the process size from `old_size` to `new_size`.
@ -389,30 +384,29 @@ pub unsafe extern "C" fn uvmalloc(
/// to be less than `old_size`. `old_size` can be larget than the actual process
/// size. Returns the new process size.
#[no_mangle]
pub unsafe extern "C" fn uvmdealloc(pagetable: Pagetable, old_size: u64, new_size: u64) -> u64 {
pub unsafe extern "C" fn uvmdealloc(pagetable: Pagetable, old_size: usize, new_size: usize) -> u64 {
if new_size >= old_size {
return old_size;
return old_size as u64;
}
if round_up_page(new_size as usize) < round_up_page(old_size as usize) {
if round_up_page(new_size) < round_up_page(old_size) {
let num_pages =
(round_up_page(old_size as usize) - round_up_page(new_size as usize)) / PAGE_SIZE;
(round_up_page(old_size) - round_up_page(new_size)) / PAGE_SIZE;
uvmunmap(
pagetable,
round_up_page(new_size as usize) as u64,
num_pages as u64,
1,
round_up_page(new_size),
num_pages,
true,
);
}
new_size
new_size as u64
}
/// Recursively free pagetable pages.
///
/// All leaf mappings must have already been removed.
#[no_mangle]
pub unsafe extern "C" fn freewalk(pagetable: Pagetable) {
pub unsafe fn freewalk(pagetable: Pagetable) {
// There are 2^9 = 512 PagetableEntry's in a Pagetable.
for i in 0..512 {
let pte: &mut PagetableEntry = &mut pagetable.as_mut().unwrap()[i];
@ -429,16 +423,13 @@ pub unsafe extern "C" fn freewalk(pagetable: Pagetable) {
}
/// Free user memory pages, then free pagetable pages.
#[no_mangle]
pub unsafe extern "C" fn uvmfree(pagetable: Pagetable, size: u64) {
if size > 0 {
uvmunmap(
pagetable,
0,
(round_up_page(size as usize) / PAGE_SIZE) as u64,
1,
);
}
pub unsafe fn uvmfree(pagetable: Pagetable, size: usize) {
uvmunmap(
pagetable,
0,
round_up_page(size) / PAGE_SIZE,
true,
);
freewalk(pagetable);
}
@ -448,12 +439,11 @@ pub unsafe extern "C" fn uvmfree(pagetable: Pagetable, size: u64) {
/// Copies both the pagetable and the physical memory.
/// Returns 0 on success, -1 on failure.
/// Frees any allocated pages on failure.
#[no_mangle]
pub unsafe extern "C" fn uvmcopy(old: Pagetable, new: Pagetable, size: u64) -> i32 {
pub unsafe fn uvmcopy(old: Pagetable, new: Pagetable, size: usize) -> i32 {
let mut i = 0;
while i < size {
let pte = walk(old, i, 0);
let pte = walk(old, i, false);
if pte.is_null() {
panic!("uvmcopy: PagetableEntry should exist");
} else if (*pte) & PTE_V as u64 == 0 {
@ -465,7 +455,7 @@ pub unsafe extern "C" fn uvmcopy(old: Pagetable, new: Pagetable, size: u64) -> i
let mem = kalloc();
if mem.is_null() {
uvmunmap(new, 0, i / PAGE_SIZE as u64, 1);
uvmunmap(new, 0, i / PAGE_SIZE, true);
return -1;
}
@ -475,13 +465,13 @@ pub unsafe extern "C" fn uvmcopy(old: Pagetable, new: Pagetable, size: u64) -> i
PAGE_SIZE as u64 as u32,
);
if mappages(new, i, PAGE_SIZE as u64, mem as usize as u64, flags as i32) != 0 {
if mappages(new, i, PAGE_SIZE, mem as usize, flags as i32) != 0 {
kfree(mem.cast());
uvmunmap(new, 0, i / PAGE_SIZE as u64, 1);
uvmunmap(new, 0, i / PAGE_SIZE, true);
return -1;
}
i += PAGE_SIZE as u64;
i += PAGE_SIZE;
}
0
@ -491,8 +481,8 @@ pub unsafe extern "C" fn uvmcopy(old: Pagetable, new: Pagetable, size: u64) -> i
///
/// Used by exec for the user stack guard page.
#[no_mangle]
pub unsafe extern "C" fn uvmclear(pagetable: Pagetable, virtual_addr: u64) {
let pte = walk(pagetable, virtual_addr, 0);
pub unsafe extern "C" fn uvmclear(pagetable: Pagetable, virtual_addr: usize) {
let pte = walk(pagetable, virtual_addr, false);
if pte.is_null() {
panic!("uvmclear");
}
@ -506,30 +496,30 @@ pub unsafe extern "C" fn uvmclear(pagetable: Pagetable, virtual_addr: u64) {
#[no_mangle]
pub unsafe extern "C" fn copyout(
pagetable: Pagetable,
mut dst_virtual_addr: u64,
mut dst_virtual_addr: usize,
mut src: *mut u8,
mut len: u64,
mut len: usize,
) -> i32 {
while len > 0 {
let va0 = round_down_page(dst_virtual_addr as usize) as u64;
let pa0 = walkaddr(pagetable, va0);
let va0 = round_down_page(dst_virtual_addr);
let pa0 = walkaddr(pagetable, va0) as usize;
if pa0 == 0 {
return -1;
}
let mut n = PAGE_SIZE as u64 - (dst_virtual_addr - va0);
let mut n = PAGE_SIZE - (dst_virtual_addr - va0);
if n > len {
n = len;
}
memmove(
((pa0 + dst_virtual_addr - va0) as usize as *mut u8).cast(),
((pa0 + dst_virtual_addr - va0) as *mut u8).cast(),
src,
n as u32,
);
len -= n;
src = src.add(n as usize);
dst_virtual_addr = va0 + PAGE_SIZE as u64;
src = src.add(n);
dst_virtual_addr = va0 + PAGE_SIZE;
}
0
}
@ -542,29 +532,29 @@ pub unsafe extern "C" fn copyout(
pub unsafe extern "C" fn copyin(
pagetable: Pagetable,
mut dst: *mut u8,
mut src_virtual_addr: u64,
mut len: u64,
mut src_virtual_addr: usize,
mut len: usize,
) -> i32 {
while len > 0 {
let va0 = round_down_page(src_virtual_addr as usize) as u64;
let pa0 = walkaddr(pagetable, va0);
let va0 = round_down_page(src_virtual_addr);
let pa0 = walkaddr(pagetable, va0) as usize;
if pa0 == 0 {
return -1;
}
let mut n = PAGE_SIZE as u64 - (src_virtual_addr - va0);
let mut n = PAGE_SIZE - (src_virtual_addr - va0);
if n > len {
n = len;
}
memmove(
dst.cast(),
((pa0 + src_virtual_addr - va0) as usize as *mut u8).cast(),
((pa0 + src_virtual_addr - va0) as *mut u8).cast(),
n as u32,
);
len -= n;
dst = dst.add(n as usize);
src_virtual_addr = va0 + PAGE_SIZE as u64;
dst = dst.add(n);
src_virtual_addr = va0 + PAGE_SIZE;
}
0
}
@ -573,7 +563,7 @@ pub unsafe extern "C" fn copyin(
// depending on usr_dst.
// Returns 0 on success, -1 on error.
#[no_mangle]
pub unsafe extern "C" fn either_copyout(user_dst: i32, dst: u64, src: *mut u8, len: u64) -> i32 {
pub unsafe extern "C" fn either_copyout(user_dst: i32, dst: usize, src: *mut u8, len: usize) -> i32 {
let p = Process::current().unwrap();
if user_dst > 0 {
@ -588,7 +578,7 @@ pub unsafe extern "C" fn either_copyout(user_dst: i32, dst: u64, src: *mut u8, l
// depending on usr_src.
// Returns 0 on success, -1 on error.
#[no_mangle]
pub unsafe extern "C" fn either_copyin(dst: *mut u8, user_src: i32, src: u64, len: u64) -> i32 {
pub unsafe extern "C" fn either_copyin(dst: *mut u8, user_src: i32, src: usize, len: usize) -> i32 {
let p = Process::current().unwrap();
if user_src > 0 {
@ -607,19 +597,19 @@ pub unsafe extern "C" fn either_copyin(dst: *mut u8, user_src: i32, src: u64, le
pub unsafe fn copyinstr(
pagetable: Pagetable,
mut dst: *mut u8,
mut src_virtual_addr: u64,
mut max: u64,
mut src_virtual_addr: usize,
mut max: usize,
) -> i32 {
let mut got_null = false;
while !got_null && max > 0 {
let va0 = round_down_page(src_virtual_addr as usize) as u64;
let pa0 = walkaddr(pagetable, va0);
let va0 = round_down_page(src_virtual_addr);
let pa0 = walkaddr(pagetable, va0) as usize;
if pa0 == 0 {
return -1;
}
let mut n = PAGE_SIZE as u64 - (src_virtual_addr - va0);
let mut n = PAGE_SIZE - (src_virtual_addr - va0);
if n > max {
n = max;
}
@ -640,7 +630,7 @@ pub unsafe fn copyinstr(
dst = dst.add(1);
}
src_virtual_addr = va0 + PAGE_SIZE as u64;
src_virtual_addr = va0 + PAGE_SIZE;
}
if got_null {

View File

@ -87,7 +87,13 @@ pub fn consolewrite(user_src: i32, src: u64, n: i32) -> i32 {
for i in 0..n {
let mut c = 0i8;
if either_copyin(addr_of_mut!(c).cast(), user_src, src + i as u64, 1) == -1 {
if either_copyin(
addr_of_mut!(c).cast(),
user_src,
src as usize + i as u32 as usize,
1,
) == -1
{
return i;
} else {
UART0.write_byte_buffered(c as u8);
@ -137,7 +143,7 @@ pub fn consoleread(user_dst: i32, mut dst: u64, mut n: i32) -> i32 {
// Copy the input byte to the user-space buffer.
cbuf = c;
if either_copyout(user_dst, dst, addr_of_mut!(cbuf).cast(), 1) == -1 {
if either_copyout(user_dst, dst as usize, addr_of_mut!(cbuf).cast(), 1) == -1 {
break;
}

View File

@ -147,9 +147,9 @@ pub unsafe fn filestat(file: *mut File, addr: u64) -> i32 {
if copyout(
proc.pagetable,
addr,
addr as usize,
addr_of_mut!(stat).cast(),
core::mem::size_of::<Stat>() as u64,
core::mem::size_of::<Stat>(),
) < 0
{
return -1;

View File

@ -99,7 +99,7 @@ impl Pipe {
guard.sleep(addr_of!(self.bytes_written).cast_mut().cast());
} else {
let mut b = 0u8;
if copyin(proc.pagetable, addr_of_mut!(b), addr + i as u64, 1) == -1 {
if copyin(proc.pagetable, addr_of_mut!(b), addr as usize + i, 1) == -1 {
break;
}
let index = self.bytes_written as usize % PIPESIZE;
@ -134,7 +134,7 @@ impl Pipe {
}
let b = self.data[self.bytes_read as usize % PIPESIZE];
self.as_mut().bytes_read += 1;
if copyout(proc.pagetable, addr + i as u64, addr_of!(b).cast_mut(), 1) == -1 {
if copyout(proc.pagetable, addr as usize + i, addr_of!(b).cast_mut(), 1) == -1 {
break;
}
i += 1;

View File

@ -59,7 +59,7 @@ pub unsafe extern "C" fn kfree(pa: *mut u8) {
panic!("kfree");
}
memset(pa, 0, PAGE_SIZE as u32);
memset(pa, 0, PAGE_SIZE);
let run: *mut Run = pa.cast();
@ -82,7 +82,7 @@ pub unsafe extern "C" fn kalloc() -> *mut u8 {
}
if !run.is_null() {
memset(run.cast(), 0, PAGE_SIZE as u32);
memset(run.cast(), 0, PAGE_SIZE);
}
run as *mut u8

View File

@ -1,7 +1,7 @@
pub mod kalloc;
#[no_mangle]
pub unsafe extern "C" fn memset(dst: *mut u8, data: i32, max_bytes: u32) -> *mut u8 {
pub unsafe extern "C" fn memset(dst: *mut u8, data: u8, max_bytes: usize) -> *mut u8 {
for i in 0..max_bytes {
*dst.add(i as usize) = data as u8;
}

View File

@ -11,7 +11,8 @@ use crate::{
mem::{kstack, Pagetable, PAGE_SIZE, PTE_R, PTE_W, PTE_X, TRAMPOLINE, TRAPFRAME},
trap::{usertrapret, InterruptBlocker},
virtual_memory::{
copyout, mappages, uvmalloc, uvmcopy, uvmcreate, uvmdealloc, uvmfree, uvmunmap, uvmfirst,
copyout, mappages, uvmalloc, uvmcopy, uvmcreate, uvmdealloc, uvmfirst, uvmfree,
uvmunmap,
},
},
fs::{
@ -28,12 +29,12 @@ use crate::{
sync::spinlock::Spinlock,
uprintln,
};
use arrayvec::ArrayVec;
use core::{
ffi::{c_char, c_void, CStr},
ptr::{addr_of, addr_of_mut, null_mut},
sync::atomic::{AtomicI32, Ordering},
};
use arrayvec::ArrayVec;
extern "C" {
// trampoline.S
@ -67,18 +68,15 @@ pub unsafe fn userinit() {
INITPROC = addr_of_mut!(*p) as usize;
let initcode: &[u8] = &[
0x17, 0x05, 0x00, 0x00, 0x13, 0x05, 0x45, 0x02,
0x97, 0x05, 0x00, 0x00, 0x93, 0x85, 0x35, 0x02,
0x93, 0x08, 0x70, 0x00, 0x73, 0x00, 0x00, 0x00,
0x93, 0x08, 0x20, 0x00, 0x73, 0x00, 0x00, 0x00,
0xef, 0xf0, 0x9f, 0xff, 0x2f, 0x69, 0x6e, 0x69,
0x74, 0x00, 0x00, 0x24, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00
0x17, 0x05, 0x00, 0x00, 0x13, 0x05, 0x45, 0x02, 0x97, 0x05, 0x00, 0x00, 0x93, 0x85, 0x35,
0x02, 0x93, 0x08, 0x70, 0x00, 0x73, 0x00, 0x00, 0x00, 0x93, 0x08, 0x20, 0x00, 0x73, 0x00,
0x00, 0x00, 0xef, 0xf0, 0x9f, 0xff, 0x2f, 0x69, 0x6e, 0x69, 0x74, 0x00, 0x00, 0x24, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
];
// Allocate one user page and copy initcode's
// instructions and data into it.
uvmfirst(p.pagetable, initcode.as_ptr().cast_mut(), initcode.len() as u32);
uvmfirst(p.pagetable, initcode.as_ptr().cast_mut(), initcode.len());
p.memory_allocated = PAGE_SIZE as u64;
// Prepare for the very first "return" from kernel to user.
@ -87,7 +85,13 @@ pub unsafe fn userinit() {
// User stack pointer
(*p.trapframe).sp = PAGE_SIZE as u64;
p.current_dir = namei(CStr::from_bytes_with_nul(b"/\0").unwrap().as_ptr().cast_mut().cast());
p.current_dir = namei(
CStr::from_bytes_with_nul(b"/\0")
.unwrap()
.as_ptr()
.cast_mut()
.cast(),
);
p.state = ProcessState::Runnable;
p.lock.unlock();
}
@ -233,7 +237,7 @@ impl Process {
memset(
addr_of_mut!(p.context).cast(),
0,
core::mem::size_of::<Context>() as u32,
core::mem::size_of::<Context>(),
);
p.context.ra = Process::forkret as usize as u64;
p.context.sp = p.kernel_stack + PAGE_SIZE as u64;
@ -268,8 +272,8 @@ impl Process {
if num_bytes > 0 {
size = uvmalloc(
self.pagetable,
size,
size.wrapping_add(num_bytes as u64),
size as usize,
size.wrapping_add(num_bytes as u64) as usize,
PTE_W,
);
@ -277,7 +281,11 @@ impl Process {
return Err(ProcessError::Allocation);
}
} else if num_bytes < 0 {
size = uvmdealloc(self.pagetable, size, size.wrapping_add(num_bytes as u64));
size = uvmdealloc(
self.pagetable,
size as usize,
size.wrapping_add(num_bytes as u64) as usize,
);
}
self.memory_allocated = size;
@ -299,9 +307,9 @@ impl Process {
// to and from user space, so not PTE_U.
if mappages(
pagetable,
TRAMPOLINE as u64,
PAGE_SIZE as u64,
addr_of!(trampoline) as usize as u64,
TRAMPOLINE,
PAGE_SIZE,
addr_of!(trampoline) as usize,
PTE_R | PTE_X,
) < 0
{
@ -312,13 +320,13 @@ impl Process {
// Map the trapframe page just below the trampoline page for trampoline.S.
if mappages(
pagetable,
TRAPFRAME as u64,
PAGE_SIZE as u64,
self.trapframe as usize as u64,
TRAPFRAME,
PAGE_SIZE,
self.trapframe as usize,
PTE_R | PTE_W,
) < 0
{
uvmunmap(pagetable, TRAMPOLINE as u64, 1, 0);
uvmunmap(pagetable, TRAMPOLINE, 1, false);
uvmfree(pagetable, 0);
return Err(ProcessError::Allocation);
}
@ -327,9 +335,9 @@ impl Process {
}
/// Free a process's pagetable and free the physical memory it refers to.
pub unsafe fn free_pagetable(pagetable: Pagetable, size: usize) {
uvmunmap(pagetable, TRAMPOLINE as u64, 1, 0);
uvmunmap(pagetable, TRAPFRAME as u64, 1, 0);
uvmfree(pagetable, size as u64)
uvmunmap(pagetable, TRAMPOLINE, 1, false);
uvmunmap(pagetable, TRAPFRAME, 1, false);
uvmfree(pagetable, size)
}
/// Create a new process, copying the parent.
@ -339,7 +347,12 @@ impl Process {
let child = Process::alloc()?;
// Copy user memory from parent to child.
if uvmcopy(parent.pagetable, child.pagetable, parent.memory_allocated) < 0 {
if uvmcopy(
parent.pagetable,
child.pagetable,
parent.memory_allocated as usize,
) < 0
{
child.free();
child.lock.unlock();
return Err(ProcessError::Allocation);
@ -466,9 +479,9 @@ impl Process {
if addr != 0
&& copyout(
self.pagetable,
addr,
addr as usize,
addr_of_mut!(p.exit_status).cast(),
core::mem::size_of::<i32>() as u64,
core::mem::size_of::<i32>(),
) < 0
{
p.lock.unlock();

View File

@ -1,7 +1,7 @@
use super::{
context::Context,
cpu::Cpu,
process::{PROCESSES, Process, ProcessState},
process::{Process, ProcessState, PROCESSES},
};
use crate::{
arch,

View File

@ -277,8 +277,8 @@ pub unsafe extern "C" fn fetchaddr(addr: u64, ip: *mut u64) -> i32 {
|| copyin(
proc.pagetable,
ip.cast(),
addr,
size_of::<*mut u64>() as u64,
addr as usize,
size_of::<*mut u64>(),
) != 0
{
-1
@ -294,7 +294,7 @@ pub unsafe extern "C" fn fetchaddr(addr: u64, ip: *mut u64) -> i32 {
pub unsafe extern "C" fn fetchstr(addr: u64, buf: *mut u8, max: i32) -> i32 {
let proc = Process::current().unwrap();
if copyinstr(proc.pagetable, buf, addr, max as u64) < 0 {
if copyinstr(proc.pagetable, buf, addr as usize, max as u32 as usize) < 0 {
-1
} else {
strlen(buf.cast())