initial work to adhere spec in handling memory access faults

This commit is contained in:
Erin 2023-06-21 01:56:26 +02:00 committed by ondra05
parent c95deefcb2
commit bf78cc751a
2 changed files with 55 additions and 38 deletions

View file

@ -52,7 +52,7 @@ impl Memory {
}
/// Load value from an address
pub unsafe fn load(&self, addr: u64, target: *mut u8, count: usize) -> Result<(), ()> {
pub unsafe fn load(&self, addr: u64, target: *mut u8, count: usize) -> Result<(), AccessFault> {
self.memory_access(
addr,
target,
@ -68,7 +68,12 @@ impl Memory {
}
/// Store value to an address
pub unsafe fn store(&mut self, addr: u64, source: *const u8, count: usize) -> Result<(), ()> {
pub unsafe fn store(
&mut self,
addr: u64,
source: *const u8,
count: usize,
) -> Result<(), AccessFault> {
self.memory_access(
addr,
source.cast_mut(),
@ -80,7 +85,7 @@ impl Memory {
/// Copy a block of memory
pub unsafe fn block_copy(&mut self, src: u64, dst: u64, count: u64) -> Result<(), ()> {
let count = usize::try_from(count).expect("?conradluget a better CPU");
/* let count = usize::try_from(count).expect("?conradluget a better CPU");
let mut srcs = PageSplitter::new(src, count, self.root_pt);
let mut dsts = PageSplitter::new(dst, count, self.root_pt);
@ -107,7 +112,9 @@ impl Memory {
(Some(src), Some(dst)) => (c_src, c_dst) = (src, dst),
_ => return Err(()),
}
}
} */
todo!("Block memory copy")
}
#[inline]
@ -127,10 +134,11 @@ impl Memory {
len: usize,
permission_check: impl Fn(Permission) -> bool,
action: impl Fn(*mut u8, *mut u8, usize),
) -> Result<(), ()> {
for PageSplitResult { ptr, size, perm } in PageSplitter::new(src, len, self.root_pt) {
) -> Result<(), AccessFault> {
for item in PageSplitter::new(src, len, self.root_pt) {
let PageSplitResult { ptr, size, perm } = item?;
if !permission_check(perm) {
return Err(());
return Err(AccessFault::Permission);
}
action(ptr, dst, size);
@ -141,6 +149,7 @@ impl Memory {
}
}
#[derive(Debug)]
struct PageSplitResult {
ptr: *mut u8,
size: usize,
@ -164,7 +173,7 @@ impl PageSplitter {
}
impl Iterator for PageSplitter {
type Item = PageSplitResult;
type Item = Result<PageSplitResult, AccessFault>;
fn next(&mut self) -> Option<Self::Item> {
if self.size == 0 {
@ -182,7 +191,12 @@ impl Iterator for PageSplitter {
let ptr = entry.ptr();
match entry.permission() {
Permission::Empty => return None,
Permission::Empty => {
return Some(Err(AccessFault::NoPage {
addr: self.addr,
remaining: self.size,
}))
}
Permission::Node => current_pt = ptr as _,
perm => {
break 'a (
@ -192,7 +206,7 @@ impl Iterator for PageSplitter {
0 => 4096,
1 => 1024_usize.pow(2) * 2,
2 => 1024_usize.pow(3),
_ => return None,
_ => return Some(Err(AccessFault::TooShallow)),
},
self.addr as usize & ((1 << (lvl * 9 + 12)) - 1),
)
@ -200,16 +214,25 @@ impl Iterator for PageSplitter {
}
}
}
return None;
return Some(Err(AccessFault::TooDeep));
};
extern crate std;
let avail = (size - offset).clamp(0, self.size);
self.addr += size as u64;
self.size = self.size.saturating_sub(size);
Some(PageSplitResult {
std::dbg!(Some(Ok(PageSplitResult {
ptr: unsafe { base.add(offset) },
size: avail,
perm,
})
})))
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum AccessFault {
NoPage { addr: u64, remaining: usize },
Permission,
TooShallow,
TooDeep,
}

View file

@ -11,6 +11,8 @@
// program size. If you are (rightfully) worried about the UB, for now just
// append your program with 11 zeroes.
use self::mem::AccessFault;
mod mem;
mod value;
@ -196,30 +198,22 @@ impl<'a> Vm<'a> {
_ => 0,
};
if self
.memory
.load(
self.read_reg(base).as_u64() + off + n as u64,
self.registers.as_mut_ptr().add(usize::from(dst) + n).cast(),
usize::from(count).saturating_sub(n),
)
.is_err()
{
return HaltReason::LoadAccessEx;
if let Err(e) = self.memory.load(
self.read_reg(base).as_u64() + off + n as u64,
self.registers.as_mut_ptr().add(usize::from(dst) + n).cast(),
usize::from(count).saturating_sub(n),
) {
return HaltReason::LoadAccessEx(e);
}
}
ST => {
let ParamBBDH(dst, base, off, count) = param!(self, ParamBBDH);
if self
.memory
.store(
self.read_reg(base).as_u64() + off,
self.registers.as_ptr().add(usize::from(dst)).cast(),
count.into(),
)
.is_err()
{
return HaltReason::LoadAccessEx;
if let Err(e) = self.memory.store(
self.read_reg(base).as_u64() + off,
self.registers.as_ptr().add(usize::from(dst)).cast(),
count.into(),
) {
return HaltReason::StoreAccessEx(e);
}
}
BMC => {
@ -233,7 +227,7 @@ impl<'a> Vm<'a> {
)
.is_err()
{
return HaltReason::LoadAccessEx;
todo!("Block memory copy fault");
}
}
BRC => {
@ -274,7 +268,7 @@ impl<'a> Vm<'a> {
}
ADDFI => binary_op_imm!(self, as_f64, ops::Add::add),
MULFI => binary_op_imm!(self, as_f64, ops::Mul::mul),
_ => return HaltReason::InvalidOpcode,
op => return HaltReason::InvalidOpEx(op),
}
}
}
@ -302,7 +296,7 @@ impl<'a> Vm<'a> {
pub enum HaltReason {
ProgramEnd,
Ecall,
InvalidOpcode,
LoadAccessEx,
StoreAccessEx,
InvalidOpEx(u8),
LoadAccessEx(AccessFault),
StoreAccessEx(AccessFault),
}