Address type, changed behaviour on address overflow

This commit is contained in:
Erin 2023-08-18 02:31:49 +02:00
parent 30070818ae
commit 3034469e89
11 changed files with 254 additions and 118 deletions

View file

@ -3,11 +3,14 @@
use { use {
hbbytecode::valider::validate, hbbytecode::valider::validate,
hbvm::{ hbvm::{
mem::softpaging::{ mem::{
paging::{PageTable, Permission}, softpaging::{
HandlePageFault, PageSize, SoftPagedMem, paging::{PageTable, Permission},
HandlePageFault, PageSize, SoftPagedMem,
},
Address, MemoryAccessReason,
}, },
MemoryAccessReason, Vm, Vm,
}, },
libfuzzer_sys::fuzz_target, libfuzzer_sys::fuzz_target,
}; };
@ -22,7 +25,7 @@ fuzz_target!(|data: &[u8]| {
root_pt: Box::into_raw(Default::default()), root_pt: Box::into_raw(Default::default()),
icache: Default::default(), icache: Default::default(),
}, },
0, Address::new(4),
) )
}; };
@ -32,8 +35,6 @@ fuzz_target!(|data: &[u8]| {
alloc_and_map(&mut vm.memory, 4096), alloc_and_map(&mut vm.memory, 4096),
]; ];
unsafe { vm.memory.write() };
// Run VM // Run VM
let _ = vm.run(); let _ = vm.run();
@ -50,14 +51,14 @@ fn alloc_and_map(memory: &mut SoftPagedMem<TestTrapHandler>, at: u64) -> *mut u8
let ptr = Box::into_raw(Box::<Page>::default()).cast(); let ptr = Box::into_raw(Box::<Page>::default()).cast();
unsafe { unsafe {
memory memory
.map(ptr, at, Permission::Write, PageSize::Size4K) .map(ptr, Address::new(at), Permission::Write, PageSize::Size4K)
.unwrap() .unwrap()
}; };
ptr ptr
} }
fn unmap_and_dealloc(memory: &mut SoftPagedMem<TestTrapHandler>, ptr: *mut u8, from: u64) { fn unmap_and_dealloc(memory: &mut SoftPagedMem<TestTrapHandler>, ptr: *mut u8, from: u64) {
memory.unmap(from).unwrap(); memory.unmap(Address::new(from)).unwrap();
let _ = unsafe { Box::from_raw(ptr.cast::<Page>()) }; let _ = unsafe { Box::from_raw(ptr.cast::<Page>()) };
} }
@ -75,7 +76,7 @@ impl HandlePageFault for TestTrapHandler {
&mut self, &mut self,
_: MemoryAccessReason, _: MemoryAccessReason,
_: &mut PageTable, _: &mut PageTable,
_: u64, _: Address,
_: PageSize, _: PageSize,
_: *mut u8, _: *mut u8,
) -> bool { ) -> bool {

View file

@ -1,7 +1,8 @@
//! Block memory copier state machine //! Block memory copier state machine
use { use {
super::{Memory, mem::MemoryAccessReason, VmRunError}, super::{mem::MemoryAccessReason, Memory, VmRunError},
crate::mem::Address,
core::{mem::MaybeUninit, task::Poll}, core::{mem::MaybeUninit, task::Poll},
}; };
@ -15,9 +16,9 @@ struct AlignedBuf([MaybeUninit<u8>; BUF_SIZE]);
/// State for block memory copy /// State for block memory copy
pub struct BlockCopier { pub struct BlockCopier {
/// Source address /// Source address
src: u64, src: Address,
/// Destination address /// Destination address
dst: u64, dst: Address,
/// How many buffer sizes to copy? /// How many buffer sizes to copy?
n_buffers: usize, n_buffers: usize,
/// …and what remainds after? /// …and what remainds after?
@ -27,7 +28,7 @@ pub struct BlockCopier {
impl BlockCopier { impl BlockCopier {
/// Construct a new one /// Construct a new one
#[inline] #[inline]
pub fn new(src: u64, dst: u64, count: usize) -> Self { pub fn new(src: Address, dst: Address, count: usize) -> Self {
Self { Self {
src, src,
dst, dst,
@ -57,17 +58,8 @@ impl BlockCopier {
} }
// Bump source and destination address // Bump source and destination address
// self.src += BUF_SIZE;
// If we are over the address space, bail. self.dst += BUF_SIZE;
match self.src.checked_add(BUF_SIZE as u64) {
Some(n) => self.src = n,
None => return Poll::Ready(Err(BlkCopyError::OutOfBounds)),
};
match self.dst.checked_add(BUF_SIZE as u64) {
Some(n) => self.dst = n,
None => return Poll::Ready(Err(BlkCopyError::OutOfBounds)),
};
self.n_buffers -= 1; self.n_buffers -= 1;
@ -100,15 +92,15 @@ impl BlockCopier {
#[inline] #[inline]
unsafe fn act( unsafe fn act(
memory: &mut impl Memory, memory: &mut impl Memory,
src: u64, src: Address,
dst: u64, dst: Address,
buf: *mut u8, buf: *mut u8,
count: usize, count: usize,
) -> Result<(), BlkCopyError> { ) -> Result<(), BlkCopyError> {
// Load to buffer // Load to buffer
memory memory
.load(src, buf, count) .load(src, buf, count)
.map_err(|super::mem::LoadError(addr)| BlkCopyError::Access { .map_err(|super::mem::LoadError(addr)| BlkCopyError {
access_reason: MemoryAccessReason::Load, access_reason: MemoryAccessReason::Load,
addr, addr,
})?; })?;
@ -116,7 +108,7 @@ unsafe fn act(
// Store from buffer // Store from buffer
memory memory
.store(dst, buf, count) .store(dst, buf, count)
.map_err(|super::mem::StoreError(addr)| BlkCopyError::Access { .map_err(|super::mem::StoreError(addr)| BlkCopyError {
access_reason: MemoryAccessReason::Store, access_reason: MemoryAccessReason::Store,
addr, addr,
})?; })?;
@ -126,30 +118,18 @@ unsafe fn act(
/// Error occured when copying a block of memory /// Error occured when copying a block of memory
#[derive(Clone, Copy, Debug, PartialEq, Eq)] #[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum BlkCopyError { pub struct BlkCopyError {
/// Memory access error /// Kind of access
Access { access_reason: MemoryAccessReason,
/// Kind of access /// VM Address
access_reason: MemoryAccessReason, addr: Address,
/// VM Address
addr: u64,
},
/// Address out of bounds
OutOfBounds,
} }
impl From<BlkCopyError> for VmRunError { impl From<BlkCopyError> for VmRunError {
fn from(value: BlkCopyError) -> Self { fn from(value: BlkCopyError) -> Self {
match value { match value.access_reason {
BlkCopyError::Access { MemoryAccessReason::Load => Self::LoadAccessEx(value.addr),
access_reason: MemoryAccessReason::Load, MemoryAccessReason::Store => Self::StoreAccessEx(value.addr),
addr,
} => Self::LoadAccessEx(addr),
BlkCopyError::Access {
access_reason: MemoryAccessReason::Store,
addr,
} => Self::StoreAccessEx(addr),
BlkCopyError::OutOfBounds => Self::AddrOutOfBounds,
} }
} }
} }

View file

@ -12,9 +12,9 @@
#![no_std] #![no_std]
#![cfg_attr(feature = "nightly", feature(fn_align))] #![cfg_attr(feature = "nightly", feature(fn_align))]
#![warn(missing_docs, clippy::missing_docs_in_private_items)] #![warn(missing_docs)]
use mem::Memory; use mem::{Memory, Address};
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
extern crate alloc; extern crate alloc;
@ -39,7 +39,7 @@ pub struct Vm<Mem, const TIMER_QUOTIENT: usize> {
pub memory: Mem, pub memory: Mem,
/// Program counter /// Program counter
pub pc: usize, pub pc: Address,
/// Program timer /// Program timer
timer: usize, timer: usize,
@ -56,11 +56,11 @@ where
/// ///
/// # Safety /// # Safety
/// Program code has to be validated /// Program code has to be validated
pub unsafe fn new(memory: Mem, entry: u64) -> Self { pub unsafe fn new(memory: Mem, entry: Address) -> Self {
Self { Self {
registers: [Value::from(0_u64); 256], registers: [Value::from(0_u64); 256],
memory, memory,
pc: entry as _, pc: entry,
timer: 0, timer: 0,
copier: None, copier: None,
} }
@ -75,13 +75,13 @@ pub enum VmRunError {
InvalidOpcode(u8), InvalidOpcode(u8),
/// Unhandled load access exception /// Unhandled load access exception
LoadAccessEx(u64), LoadAccessEx(Address),
/// Unhandled instruction load access exception /// Unhandled instruction load access exception
ProgramFetchLoadEx(u64), ProgramFetchLoadEx(Address),
/// Unhandled store access exception /// Unhandled store access exception
StoreAccessEx(u64), StoreAccessEx(Address),
/// Register out-of-bounds access /// Register out-of-bounds access
RegOutOfBounds, RegOutOfBounds,

View file

@ -1,3 +1,5 @@
use hbvm::mem::Address;
use { use {
hbbytecode::valider::validate, hbbytecode::valider::validate,
hbvm::{ hbvm::{
@ -26,7 +28,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
root_pt: Box::into_raw(Default::default()), root_pt: Box::into_raw(Default::default()),
icache: Default::default(), icache: Default::default(),
}, },
4, Address::new(4),
); );
let data = { let data = {
let ptr = std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align_unchecked( let ptr = std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align_unchecked(
@ -41,7 +43,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
vm.memory vm.memory
.map( .map(
data, data,
8192, Address::new(8192),
hbvm::mem::softpaging::paging::Permission::Write, hbvm::mem::softpaging::paging::Permission::Write,
PageSize::Size4K, PageSize::Size4K,
) )
@ -54,7 +56,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
data, data,
std::alloc::Layout::from_size_align_unchecked(4096, 4096), std::alloc::Layout::from_size_align_unchecked(4096, 4096),
); );
vm.memory.unmap(8192).unwrap(); vm.memory.unmap(Address::new(8192)).unwrap();
let _ = Box::from_raw(vm.memory.root_pt); let _ = Box::from_raw(vm.memory.root_pt);
} }
} }
@ -72,7 +74,7 @@ impl HandlePageFault for TestTrapHandler {
&mut self, &mut self,
_: MemoryAccessReason, _: MemoryAccessReason,
_: &mut PageTable, _: &mut PageTable,
_: u64, _: Address,
_: PageSize, _: PageSize,
_: *mut u8, _: *mut u8,
) -> bool { ) -> bool {

108
hbvm/src/mem/addr.rs Normal file
View file

@ -0,0 +1,108 @@
//! Virtual(?) memory address
use {
core::{fmt::Debug, ops},
derive_more::Display,
};
/// Memory address
#[derive(Clone, Copy, Display, PartialEq, Eq, PartialOrd, Ord)]
#[display(fmt = "{_0:x}")]
pub struct Address(u64);
impl Address {
/// A null address
pub const NULL: Self = Self(0);
/// Saturating integer addition. Computes self + rhs, saturating at the numeric bounds instead of overflowing.
#[inline]
pub fn saturating_add<T: AddressOp>(self, rhs: T) -> Self {
Self(self.0.saturating_add(rhs.cast_u64()))
}
/// Saturating integer subtraction. Computes self - rhs, saturating at the numeric bounds instead of overflowing.
#[inline]
pub fn saturating_sub<T: AddressOp>(self, rhs: T) -> Self {
Self(self.0.saturating_sub(rhs.cast_u64()))
}
/// Cast or if smaller, truncate to [`usize`]
pub fn truncate_usize(self) -> usize {
self.0 as _
}
/// Get inner value
#[inline(always)]
pub fn get(self) -> u64 {
self.0
}
/// Construct new address
#[inline(always)]
pub fn new(val: u64) -> Self {
Self(val)
}
/// Do something with inner value
#[inline(always)]
pub fn map(self, f: impl Fn(u64) -> u64) -> Self {
Self(f(self.0))
}
}
impl<T: AddressOp> ops::Add<T> for Address {
type Output = Self;
#[inline]
fn add(self, rhs: T) -> Self::Output {
Self(self.0.wrapping_add(rhs.cast_u64()))
}
}
impl<T: AddressOp> ops::Sub<T> for Address {
type Output = Self;
#[inline]
fn sub(self, rhs: T) -> Self::Output {
Self(self.0.wrapping_sub(rhs.cast_u64()))
}
}
impl<T: AddressOp> ops::AddAssign<T> for Address {
fn add_assign(&mut self, rhs: T) {
self.0 = self.0.wrapping_add(rhs.cast_u64())
}
}
impl<T: AddressOp> ops::SubAssign<T> for Address {
fn sub_assign(&mut self, rhs: T) {
self.0 = self.0.wrapping_sub(rhs.cast_u64())
}
}
impl From<Address> for u64 {
#[inline(always)]
fn from(value: Address) -> Self {
value.0
}
}
impl Debug for Address {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "[{:0x}]", self.0)
}
}
/// Can perform address operations with
pub trait AddressOp {
/// Cast to u64, truncating or extending
fn cast_u64(self) -> u64;
}
macro_rules! impl_address_ops(($($ty:ty),* $(,)?) => {
$(impl AddressOp for $ty {
#[inline(always)]
fn cast_u64(self) -> u64 { self as _ }
})*
});
impl_address_ops!(u8, u16, u32, u64, usize);

View file

@ -1,16 +1,24 @@
//! Memory implementations //! Memory implementations
use {derive_more::Display, hbbytecode::ProgramVal};
pub mod softpaging; pub mod softpaging;
mod addr;
pub use addr::Address;
use {derive_more::Display, hbbytecode::ProgramVal};
/// Load-store memory access /// Load-store memory access
pub trait Memory { pub trait Memory {
/// Load data from memory on address /// Load data from memory on address
/// ///
/// # Safety /// # Safety
/// - Shall not overrun the buffer /// - Shall not overrun the buffer
unsafe fn load(&mut self, addr: u64, target: *mut u8, count: usize) -> Result<(), LoadError>; unsafe fn load(
&mut self,
addr: Address,
target: *mut u8,
count: usize,
) -> Result<(), LoadError>;
/// Store data to memory on address /// Store data to memory on address
/// ///
@ -18,7 +26,7 @@ pub trait Memory {
/// - Shall not overrun the buffer /// - Shall not overrun the buffer
unsafe fn store( unsafe fn store(
&mut self, &mut self,
addr: u64, addr: Address,
source: *const u8, source: *const u8,
count: usize, count: usize,
) -> Result<(), StoreError>; ) -> Result<(), StoreError>;
@ -27,24 +35,24 @@ pub trait Memory {
/// ///
/// # Safety /// # Safety
/// - Data read have to be valid /// - Data read have to be valid
unsafe fn prog_read<T: ProgramVal>(&mut self, addr: u64) -> Option<T>; unsafe fn prog_read<T: ProgramVal>(&mut self, addr: Address) -> Option<T>;
/// Read from program memory to exectue /// Read from program memory to exectue
/// ///
/// # Safety /// # Safety
/// - You have to be really sure that these bytes are there, understand? /// - You have to be really sure that these bytes are there, understand?
unsafe fn prog_read_unchecked<T: ProgramVal>(&mut self, addr: u64) -> T; unsafe fn prog_read_unchecked<T: ProgramVal>(&mut self, addr: Address) -> T;
} }
/// Unhandled load access trap /// Unhandled load access trap
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)] #[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
#[display(fmt = "Load access error at address {_0:#x}")] #[display(fmt = "Load access error at address {_0}")]
pub struct LoadError(pub u64); pub struct LoadError(pub Address);
/// Unhandled store access trap /// Unhandled store access trap
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)] #[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
#[display(fmt = "Store access error at address {_0:#x}")] #[display(fmt = "Store access error at address {_0}")]
pub struct StoreError(pub u64); pub struct StoreError(pub Address);
/// Reason to access memory /// Reason to access memory
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)] #[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]

View file

@ -1,5 +1,7 @@
//! Program instruction cache //! Program instruction cache
use crate::mem::Address;
use { use {
super::{lookup::AddrPageLookuper, paging::PageTable, PageSize}, super::{lookup::AddrPageLookuper, paging::PageTable, PageSize},
core::{ core::{
@ -12,7 +14,7 @@ use {
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct ICache { pub struct ICache {
/// Current page address base /// Current page address base
base: u64, base: Address,
/// Curent page pointer /// Curent page pointer
data: Option<NonNull<u8>>, data: Option<NonNull<u8>>,
/// Current page size /// Current page size
@ -24,7 +26,7 @@ pub struct ICache {
impl Default for ICache { impl Default for ICache {
fn default() -> Self { fn default() -> Self {
Self { Self {
base: Default::default(), base: Address::NULL,
data: Default::default(), data: Default::default(),
size: PageSize::Size4K, size: PageSize::Size4K,
mask: Default::default(), mask: Default::default(),
@ -37,22 +39,26 @@ impl ICache {
/// ///
/// # Safety /// # Safety
/// `T` should be valid to read from instruction memory /// `T` should be valid to read from instruction memory
pub(super) unsafe fn fetch<T>(&mut self, addr: u64, root_pt: *const PageTable) -> Option<T> { pub(super) unsafe fn fetch<T>(
&mut self,
addr: Address,
root_pt: *const PageTable,
) -> Option<T> {
let mut ret = MaybeUninit::<T>::uninit(); let mut ret = MaybeUninit::<T>::uninit();
let pbase = self let pbase = self
.data .data
.or_else(|| self.fetch_page(self.base.checked_add(self.size as _)?, root_pt))?; .or_else(|| self.fetch_page(self.base + self.size, root_pt))?;
// Get address base // Get address base
let base = addr & self.mask; let base = addr.map(|x| x & self.mask);
// Base not matching, fetch anew // Base not matching, fetch anew
if base != self.base { if base != self.base {
self.fetch_page(base, root_pt)?; self.fetch_page(base, root_pt)?;
}; };
let offset = addr & !self.mask; let offset = addr.get() & !self.mask;
let requ_size = size_of::<T>(); let requ_size = size_of::<T>();
// Page overflow // Page overflow
@ -66,7 +72,7 @@ impl ICache {
// Copy overflow // Copy overflow
if rem != 0 { if rem != 0 {
let pbase = self.fetch_page(self.base.checked_add(self.size as _)?, root_pt)?; let pbase = self.fetch_page(self.base + self.size, root_pt)?;
// Unlikely, unsupported scenario // Unlikely, unsupported scenario
if rem > self.size as _ { if rem > self.size as _ {
@ -84,7 +90,7 @@ impl ICache {
} }
/// Fetch a page /// Fetch a page
unsafe fn fetch_page(&mut self, addr: u64, pt: *const PageTable) -> Option<NonNull<u8>> { unsafe fn fetch_page(&mut self, addr: Address, pt: *const PageTable) -> Option<NonNull<u8>> {
let res = AddrPageLookuper::new(addr, 0, pt).next()?.ok()?; let res = AddrPageLookuper::new(addr, 0, pt).next()?.ok()?;
if !super::perm_check::executable(res.perm) { if !super::perm_check::executable(res.perm) {
return None; return None;
@ -97,7 +103,7 @@ impl ICache {
_ => return None, _ => return None,
}; };
self.data = Some(NonNull::new(res.ptr)?); self.data = Some(NonNull::new(res.ptr)?);
self.base = addr & self.mask; self.base = addr.map(|x| x & self.mask);
self.data self.data
} }
} }

View file

@ -1,5 +1,7 @@
//! Address lookup //! Address lookup
use crate::mem::addr::Address;
use super::{ use super::{
addr_extract_index, addr_extract_index,
paging::{PageTable, Permission}, paging::{PageTable, Permission},
@ -9,7 +11,7 @@ use super::{
/// Good result from address split /// Good result from address split
pub struct AddrPageLookupOk { pub struct AddrPageLookupOk {
/// Virtual address /// Virtual address
pub vaddr: u64, pub vaddr: Address,
/// Pointer to the start for perform operation /// Pointer to the start for perform operation
pub ptr: *mut u8, pub ptr: *mut u8,
@ -24,7 +26,7 @@ pub struct AddrPageLookupOk {
/// Errornous address split result /// Errornous address split result
pub struct AddrPageLookupError { pub struct AddrPageLookupError {
/// Address of failure /// Address of failure
pub addr: u64, pub addr: Address,
/// Requested page size /// Requested page size
pub size: PageSize, pub size: PageSize,
@ -33,7 +35,7 @@ pub struct AddrPageLookupError {
/// Address splitter into pages /// Address splitter into pages
pub struct AddrPageLookuper { pub struct AddrPageLookuper {
/// Current address /// Current address
addr: u64, addr: Address,
/// Size left /// Size left
size: usize, size: usize,
@ -45,7 +47,7 @@ pub struct AddrPageLookuper {
impl AddrPageLookuper { impl AddrPageLookuper {
/// Create a new page lookuper /// Create a new page lookuper
#[inline] #[inline]
pub const fn new(addr: u64, size: usize, pagetable: *const PageTable) -> Self { pub const fn new(addr: Address, size: usize, pagetable: *const PageTable) -> Self {
Self { Self {
addr, addr,
size, size,
@ -55,7 +57,7 @@ impl AddrPageLookuper {
/// Bump address by size X /// Bump address by size X
pub fn bump(&mut self, page_size: PageSize) { pub fn bump(&mut self, page_size: PageSize) {
self.addr += page_size as u64; self.addr += page_size;
self.size = self.size.saturating_sub(page_size as _); self.size = self.size.saturating_sub(page_size as _);
} }
} }

View file

@ -1,5 +1,7 @@
//! Automatic memory mapping //! Automatic memory mapping
use crate::mem::addr::Address;
use { use {
super::{ super::{
addr_extract_index, addr_extract_index,
@ -10,7 +12,7 @@ use {
derive_more::Display, derive_more::Display,
}; };
impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> { impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> {
/// Maps host's memory into VM's memory /// Maps host's memory into VM's memory
/// ///
/// # Safety /// # Safety
@ -20,7 +22,7 @@ impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> {
pub unsafe fn map( pub unsafe fn map(
&mut self, &mut self,
host: *mut u8, host: *mut u8,
target: u64, target: Address,
perm: Permission, perm: Permission,
pagesize: PageSize, pagesize: PageSize,
) -> Result<(), MapError> { ) -> Result<(), MapError> {
@ -82,7 +84,7 @@ impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> {
/// ///
/// If errors, it only means there is no entry to unmap and in most cases /// If errors, it only means there is no entry to unmap and in most cases
/// just should be ignored. /// just should be ignored.
pub fn unmap(&mut self, addr: u64) -> Result<(), NothingToUnmap> { pub fn unmap(&mut self, addr: Address) -> Result<(), NothingToUnmap> {
let mut current_pt = self.root_pt; let mut current_pt = self.root_pt;
let mut page_tables = [core::ptr::null_mut(); 5]; let mut page_tables = [core::ptr::null_mut(); 5];

View file

@ -1,9 +1,5 @@
//! Platform independent, software paged memory implementation //! Platform independent, software paged memory implementation
use core::mem::size_of;
use self::icache::ICache;
pub mod icache; pub mod icache;
pub mod lookup; pub mod lookup;
pub mod paging; pub mod paging;
@ -12,7 +8,9 @@ pub mod paging;
pub mod mapping; pub mod mapping;
use { use {
super::{LoadError, Memory, MemoryAccessReason, StoreError}, super::{addr::Address, LoadError, Memory, MemoryAccessReason, StoreError},
core::mem::size_of,
icache::ICache,
lookup::{AddrPageLookupError, AddrPageLookupOk, AddrPageLookuper}, lookup::{AddrPageLookupError, AddrPageLookupOk, AddrPageLookuper},
paging::{PageTable, Permission}, paging::{PageTable, Permission},
}; };
@ -41,7 +39,12 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> Memory
/// ///
/// # Safety /// # Safety
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`] /// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
unsafe fn load(&mut self, addr: u64, target: *mut u8, count: usize) -> Result<(), LoadError> { unsafe fn load(
&mut self,
addr: Address,
target: *mut u8,
count: usize,
) -> Result<(), LoadError> {
self.memory_access( self.memory_access(
MemoryAccessReason::Load, MemoryAccessReason::Load,
addr, addr,
@ -59,7 +62,7 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> Memory
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`] /// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
unsafe fn store( unsafe fn store(
&mut self, &mut self,
addr: u64, addr: Address,
source: *const u8, source: *const u8,
count: usize, count: usize,
) -> Result<(), StoreError> { ) -> Result<(), StoreError> {
@ -75,27 +78,31 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> Memory
} }
#[inline(always)] #[inline(always)]
unsafe fn prog_read<T>(&mut self, addr: u64) -> Option<T> { unsafe fn prog_read<T>(&mut self, addr: Address) -> Option<T> {
if OUT_PROG_EXEC && addr as usize > self.program.len() { if OUT_PROG_EXEC && addr.truncate_usize() > self.program.len() {
return self.icache.fetch::<T>(addr, self.root_pt); return self.icache.fetch::<T>(addr, self.root_pt);
} }
let addr = addr as usize; let addr = addr.truncate_usize();
self.program self.program
.get(addr..addr + size_of::<T>()) .get(addr..addr + size_of::<T>())
.map(|x| x.as_ptr().cast::<T>().read()) .map(|x| x.as_ptr().cast::<T>().read())
} }
#[inline(always)] #[inline(always)]
unsafe fn prog_read_unchecked<T>(&mut self, addr: u64) -> T { unsafe fn prog_read_unchecked<T>(&mut self, addr: Address) -> T {
if OUT_PROG_EXEC && addr as usize > self.program.len() { if OUT_PROG_EXEC && addr.truncate_usize() > self.program.len() {
return self return self
.icache .icache
.fetch::<T>(addr as _, self.root_pt) .fetch::<T>(addr, self.root_pt)
.unwrap_or_else(|| core::mem::zeroed()); .unwrap_or_else(|| core::mem::zeroed());
} }
self.program.as_ptr().add(addr as _).cast::<T>().read() self.program
.as_ptr()
.add(addr.truncate_usize())
.cast::<T>()
.read()
} }
} }
@ -110,32 +117,32 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, PfH,
fn memory_access( fn memory_access(
&mut self, &mut self,
reason: MemoryAccessReason, reason: MemoryAccessReason,
src: u64, src: Address,
mut dst: *mut u8, mut dst: *mut u8,
len: usize, len: usize,
permission_check: fn(Permission) -> bool, permission_check: fn(Permission) -> bool,
action: fn(*mut u8, *mut u8, usize), action: fn(*mut u8, *mut u8, usize),
) -> Result<(), u64> { ) -> Result<(), Address> {
// Memory load from program section // Memory load from program section
let (src, len) = if src < self.program.len() as _ { let (src, len) = if src.truncate_usize() < self.program.len() as _ {
// Allow only loads // Allow only loads
if reason != MemoryAccessReason::Load { if reason != MemoryAccessReason::Load {
return Err(src); return Err(src);
} }
// Determine how much data to copy from here // Determine how much data to copy from here
let to_copy = len.clamp(0, self.program.len().saturating_sub(src as _)); let to_copy = len.clamp(0, self.program.len().saturating_sub(src.truncate_usize()));
// Perform action // Perform action
action( action(
unsafe { self.program.as_ptr().add(src as _).cast_mut() }, unsafe { self.program.as_ptr().add(src.truncate_usize()).cast_mut() },
dst, dst,
to_copy, to_copy,
); );
// Return shifted from what we've already copied // Return shifted from what we've already copied
( (
src.saturating_add(to_copy as _), src.saturating_add(to_copy as u64),
len.saturating_sub(to_copy), len.saturating_sub(to_copy),
) )
} else { } else {
@ -196,8 +203,9 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, PfH,
/// ///
/// The level shall not be larger than 4, otherwise /// The level shall not be larger than 4, otherwise
/// the output of the function is unspecified (yes, it can also panic :) /// the output of the function is unspecified (yes, it can also panic :)
pub fn addr_extract_index(addr: u64, lvl: u8) -> usize { pub fn addr_extract_index(addr: Address, lvl: u8) -> usize {
debug_assert!(lvl <= 4); debug_assert!(lvl <= 4);
let addr = addr.get();
usize::try_from((addr >> (lvl * 8 + 12)) & ((1 << 8) - 1)).expect("?conradluget a better CPU") usize::try_from((addr >> (lvl * 8 + 12)) & ((1 << 8) - 1)).expect("?conradluget a better CPU")
} }
@ -226,6 +234,22 @@ impl PageSize {
} }
} }
impl core::ops::Add<PageSize> for Address {
type Output = Self;
#[inline(always)]
fn add(self, rhs: PageSize) -> Self::Output {
self + (rhs as u64)
}
}
impl core::ops::AddAssign<PageSize> for Address {
#[inline(always)]
fn add_assign(&mut self, rhs: PageSize) {
*self = Self::new(self.get().wrapping_add(rhs as u64));
}
}
/// Permisison checks /// Permisison checks
pub mod perm_check { pub mod perm_check {
use super::paging::Permission; use super::paging::Permission;
@ -263,7 +287,7 @@ pub trait HandlePageFault {
&mut self, &mut self,
reason: MemoryAccessReason, reason: MemoryAccessReason,
pagetable: &mut PageTable, pagetable: &mut PageTable,
vaddr: u64, vaddr: Address,
size: PageSize, size: PageSize,
dataptr: *mut u8, dataptr: *mut u8,
) -> bool ) -> bool

View file

@ -2,6 +2,8 @@
//! //!
//! Have fun //! Have fun
use crate::mem::Address;
use { use {
super::{ super::{
bmc::BlockCopier, bmc::BlockCopier,
@ -202,8 +204,8 @@ where
self.pc -= size_of::<ParamBBD>() + 1; self.pc -= size_of::<ParamBBD>() + 1;
self.copier = Some(BlockCopier::new( self.copier = Some(BlockCopier::new(
self.read_reg(src).cast(), Address::new(self.read_reg(src).cast()),
self.read_reg(dst).cast(), Address::new(self.read_reg(dst).cast()),
count as _, count as _,
)); ));
@ -244,16 +246,16 @@ where
// Jump and link. Save PC after this instruction to // Jump and link. Save PC after this instruction to
// specified register and jump to reg + offset. // specified register and jump to reg + offset.
let ParamBBD(save, reg, offset) = self.decode(); let ParamBBD(save, reg, offset) = self.decode();
self.write_reg(save, self.pc as u64); self.write_reg(save, self.pc.get());
self.pc = self.pc =
(self.read_reg(reg).cast::<u64>().saturating_add(offset)) as usize; Address::new(self.read_reg(reg).cast::<u64>().saturating_add(offset));
} }
// Conditional jumps, jump only to immediates // Conditional jumps, jump only to immediates
JEQ => self.cond_jmp::<u64>(Ordering::Equal), JEQ => self.cond_jmp::<u64>(Ordering::Equal),
JNE => { JNE => {
let ParamBBD(a0, a1, jt) = self.decode(); let ParamBBD(a0, a1, jt) = self.decode();
if self.read_reg(a0).cast::<u64>() != self.read_reg(a1).cast::<u64>() { if self.read_reg(a0).cast::<u64>() != self.read_reg(a1).cast::<u64>() {
self.pc = jt as usize; self.pc = Address::new(jt);
} }
} }
JLT => self.cond_jmp::<u64>(Ordering::Less), JLT => self.cond_jmp::<u64>(Ordering::Less),
@ -317,7 +319,7 @@ where
/// Decode instruction operands /// Decode instruction operands
#[inline(always)] #[inline(always)]
unsafe fn decode<T: ProgramVal>(&mut self) -> T { unsafe fn decode<T: ProgramVal>(&mut self) -> T {
let pc1 = self.pc + 1; let pc1 = self.pc + 1_u64;
let data = self.memory.prog_read_unchecked::<T>(pc1 as _); let data = self.memory.prog_read_unchecked::<T>(pc1 as _);
self.pc += 1 + size_of::<T>(); self.pc += 1 + size_of::<T>();
data data
@ -360,7 +362,7 @@ where
.cmp(&self.read_reg(a1).cast::<T>()) .cmp(&self.read_reg(a1).cast::<T>())
== expected == expected
{ {
self.pc = ja as usize; self.pc = Address::new(ja);
} }
} }
@ -388,7 +390,7 @@ where
offset: u64, offset: u64,
size: u16, size: u16,
adder: u8, adder: u8,
) -> Result<u64, VmRunError> { ) -> Result<Address, VmRunError> {
let reg = dst.checked_add(adder).ok_or(VmRunError::RegOutOfBounds)?; let reg = dst.checked_add(adder).ok_or(VmRunError::RegOutOfBounds)?;
if usize::from(reg) * 8 + usize::from(size) > 2048 { if usize::from(reg) * 8 + usize::from(size) > 2048 {
@ -399,6 +401,7 @@ where
.checked_add(offset) .checked_add(offset)
.and_then(|x| x.checked_add(adder.into())) .and_then(|x| x.checked_add(adder.into()))
.ok_or(VmRunError::AddrOutOfBounds) .ok_or(VmRunError::AddrOutOfBounds)
.map(Address::new)
} }
} }
} }