diff --git a/hbvm/fuzz/fuzz_targets/vm.rs b/hbvm/fuzz/fuzz_targets/vm.rs index f733600..5cd2d2a 100644 --- a/hbvm/fuzz/fuzz_targets/vm.rs +++ b/hbvm/fuzz/fuzz_targets/vm.rs @@ -3,11 +3,14 @@ use { hbbytecode::valider::validate, hbvm::{ - mem::softpaging::{ - paging::{PageTable, Permission}, - HandlePageFault, PageSize, SoftPagedMem, + mem::{ + softpaging::{ + paging::{PageTable, Permission}, + HandlePageFault, PageSize, SoftPagedMem, + }, + Address, MemoryAccessReason, }, - MemoryAccessReason, Vm, + Vm, }, libfuzzer_sys::fuzz_target, }; @@ -22,7 +25,7 @@ fuzz_target!(|data: &[u8]| { root_pt: Box::into_raw(Default::default()), icache: Default::default(), }, - 0, + Address::new(4), ) }; @@ -32,8 +35,6 @@ fuzz_target!(|data: &[u8]| { alloc_and_map(&mut vm.memory, 4096), ]; - unsafe { vm.memory.write() }; - // Run VM let _ = vm.run(); @@ -50,14 +51,14 @@ fn alloc_and_map(memory: &mut SoftPagedMem, at: u64) -> *mut u8 let ptr = Box::into_raw(Box::::default()).cast(); unsafe { memory - .map(ptr, at, Permission::Write, PageSize::Size4K) + .map(ptr, Address::new(at), Permission::Write, PageSize::Size4K) .unwrap() }; ptr } fn unmap_and_dealloc(memory: &mut SoftPagedMem, ptr: *mut u8, from: u64) { - memory.unmap(from).unwrap(); + memory.unmap(Address::new(from)).unwrap(); let _ = unsafe { Box::from_raw(ptr.cast::()) }; } @@ -75,7 +76,7 @@ impl HandlePageFault for TestTrapHandler { &mut self, _: MemoryAccessReason, _: &mut PageTable, - _: u64, + _: Address, _: PageSize, _: *mut u8, ) -> bool { diff --git a/hbvm/src/bmc.rs b/hbvm/src/bmc.rs index ef0d9cd..21d30b6 100644 --- a/hbvm/src/bmc.rs +++ b/hbvm/src/bmc.rs @@ -1,7 +1,8 @@ //! Block memory copier state machine use { - super::{Memory, mem::MemoryAccessReason, VmRunError}, + super::{mem::MemoryAccessReason, Memory, VmRunError}, + crate::mem::Address, core::{mem::MaybeUninit, task::Poll}, }; @@ -15,9 +16,9 @@ struct AlignedBuf([MaybeUninit; BUF_SIZE]); /// State for block memory copy pub struct BlockCopier { /// Source address - src: u64, + src: Address, /// Destination address - dst: u64, + dst: Address, /// How many buffer sizes to copy? n_buffers: usize, /// …and what remainds after? @@ -27,7 +28,7 @@ pub struct BlockCopier { impl BlockCopier { /// Construct a new one #[inline] - pub fn new(src: u64, dst: u64, count: usize) -> Self { + pub fn new(src: Address, dst: Address, count: usize) -> Self { Self { src, dst, @@ -57,17 +58,8 @@ impl BlockCopier { } // Bump source and destination address - // - // If we are over the address space, bail. - match self.src.checked_add(BUF_SIZE as u64) { - Some(n) => self.src = n, - None => return Poll::Ready(Err(BlkCopyError::OutOfBounds)), - }; - - match self.dst.checked_add(BUF_SIZE as u64) { - Some(n) => self.dst = n, - None => return Poll::Ready(Err(BlkCopyError::OutOfBounds)), - }; + self.src += BUF_SIZE; + self.dst += BUF_SIZE; self.n_buffers -= 1; @@ -100,15 +92,15 @@ impl BlockCopier { #[inline] unsafe fn act( memory: &mut impl Memory, - src: u64, - dst: u64, + src: Address, + dst: Address, buf: *mut u8, count: usize, ) -> Result<(), BlkCopyError> { // Load to buffer memory .load(src, buf, count) - .map_err(|super::mem::LoadError(addr)| BlkCopyError::Access { + .map_err(|super::mem::LoadError(addr)| BlkCopyError { access_reason: MemoryAccessReason::Load, addr, })?; @@ -116,7 +108,7 @@ unsafe fn act( // Store from buffer memory .store(dst, buf, count) - .map_err(|super::mem::StoreError(addr)| BlkCopyError::Access { + .map_err(|super::mem::StoreError(addr)| BlkCopyError { access_reason: MemoryAccessReason::Store, addr, })?; @@ -126,30 +118,18 @@ unsafe fn act( /// Error occured when copying a block of memory #[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub enum BlkCopyError { - /// Memory access error - Access { - /// Kind of access - access_reason: MemoryAccessReason, - /// VM Address - addr: u64, - }, - /// Address out of bounds - OutOfBounds, +pub struct BlkCopyError { + /// Kind of access + access_reason: MemoryAccessReason, + /// VM Address + addr: Address, } impl From for VmRunError { fn from(value: BlkCopyError) -> Self { - match value { - BlkCopyError::Access { - access_reason: MemoryAccessReason::Load, - addr, - } => Self::LoadAccessEx(addr), - BlkCopyError::Access { - access_reason: MemoryAccessReason::Store, - addr, - } => Self::StoreAccessEx(addr), - BlkCopyError::OutOfBounds => Self::AddrOutOfBounds, + match value.access_reason { + MemoryAccessReason::Load => Self::LoadAccessEx(value.addr), + MemoryAccessReason::Store => Self::StoreAccessEx(value.addr), } } } diff --git a/hbvm/src/lib.rs b/hbvm/src/lib.rs index b0ee143..9651262 100644 --- a/hbvm/src/lib.rs +++ b/hbvm/src/lib.rs @@ -12,9 +12,9 @@ #![no_std] #![cfg_attr(feature = "nightly", feature(fn_align))] -#![warn(missing_docs, clippy::missing_docs_in_private_items)] +#![warn(missing_docs)] -use mem::Memory; +use mem::{Memory, Address}; #[cfg(feature = "alloc")] extern crate alloc; @@ -39,7 +39,7 @@ pub struct Vm { pub memory: Mem, /// Program counter - pub pc: usize, + pub pc: Address, /// Program timer timer: usize, @@ -56,11 +56,11 @@ where /// /// # Safety /// Program code has to be validated - pub unsafe fn new(memory: Mem, entry: u64) -> Self { + pub unsafe fn new(memory: Mem, entry: Address) -> Self { Self { registers: [Value::from(0_u64); 256], memory, - pc: entry as _, + pc: entry, timer: 0, copier: None, } @@ -75,13 +75,13 @@ pub enum VmRunError { InvalidOpcode(u8), /// Unhandled load access exception - LoadAccessEx(u64), + LoadAccessEx(Address), /// Unhandled instruction load access exception - ProgramFetchLoadEx(u64), + ProgramFetchLoadEx(Address), /// Unhandled store access exception - StoreAccessEx(u64), + StoreAccessEx(Address), /// Register out-of-bounds access RegOutOfBounds, diff --git a/hbvm/src/main.rs b/hbvm/src/main.rs index b28173f..d23e994 100644 --- a/hbvm/src/main.rs +++ b/hbvm/src/main.rs @@ -1,3 +1,5 @@ +use hbvm::mem::Address; + use { hbbytecode::valider::validate, hbvm::{ @@ -26,7 +28,7 @@ fn main() -> Result<(), Box> { root_pt: Box::into_raw(Default::default()), icache: Default::default(), }, - 4, + Address::new(4), ); let data = { let ptr = std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align_unchecked( @@ -41,7 +43,7 @@ fn main() -> Result<(), Box> { vm.memory .map( data, - 8192, + Address::new(8192), hbvm::mem::softpaging::paging::Permission::Write, PageSize::Size4K, ) @@ -54,7 +56,7 @@ fn main() -> Result<(), Box> { data, std::alloc::Layout::from_size_align_unchecked(4096, 4096), ); - vm.memory.unmap(8192).unwrap(); + vm.memory.unmap(Address::new(8192)).unwrap(); let _ = Box::from_raw(vm.memory.root_pt); } } @@ -72,7 +74,7 @@ impl HandlePageFault for TestTrapHandler { &mut self, _: MemoryAccessReason, _: &mut PageTable, - _: u64, + _: Address, _: PageSize, _: *mut u8, ) -> bool { diff --git a/hbvm/src/mem/addr.rs b/hbvm/src/mem/addr.rs new file mode 100644 index 0000000..cd0bbd3 --- /dev/null +++ b/hbvm/src/mem/addr.rs @@ -0,0 +1,108 @@ +//! Virtual(?) memory address + +use { + core::{fmt::Debug, ops}, + derive_more::Display, +}; + +/// Memory address +#[derive(Clone, Copy, Display, PartialEq, Eq, PartialOrd, Ord)] +#[display(fmt = "{_0:x}")] +pub struct Address(u64); +impl Address { + /// A null address + pub const NULL: Self = Self(0); + + /// Saturating integer addition. Computes self + rhs, saturating at the numeric bounds instead of overflowing. + #[inline] + pub fn saturating_add(self, rhs: T) -> Self { + Self(self.0.saturating_add(rhs.cast_u64())) + } + + /// Saturating integer subtraction. Computes self - rhs, saturating at the numeric bounds instead of overflowing. + #[inline] + pub fn saturating_sub(self, rhs: T) -> Self { + Self(self.0.saturating_sub(rhs.cast_u64())) + } + + /// Cast or if smaller, truncate to [`usize`] + pub fn truncate_usize(self) -> usize { + self.0 as _ + } + + /// Get inner value + #[inline(always)] + pub fn get(self) -> u64 { + self.0 + } + + /// Construct new address + #[inline(always)] + pub fn new(val: u64) -> Self { + Self(val) + } + + /// Do something with inner value + #[inline(always)] + pub fn map(self, f: impl Fn(u64) -> u64) -> Self { + Self(f(self.0)) + } +} + +impl ops::Add for Address { + type Output = Self; + + #[inline] + fn add(self, rhs: T) -> Self::Output { + Self(self.0.wrapping_add(rhs.cast_u64())) + } +} + +impl ops::Sub for Address { + type Output = Self; + + #[inline] + fn sub(self, rhs: T) -> Self::Output { + Self(self.0.wrapping_sub(rhs.cast_u64())) + } +} + +impl ops::AddAssign for Address { + fn add_assign(&mut self, rhs: T) { + self.0 = self.0.wrapping_add(rhs.cast_u64()) + } +} + +impl ops::SubAssign for Address { + fn sub_assign(&mut self, rhs: T) { + self.0 = self.0.wrapping_sub(rhs.cast_u64()) + } +} + +impl From
for u64 { + #[inline(always)] + fn from(value: Address) -> Self { + value.0 + } +} + +impl Debug for Address { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "[{:0x}]", self.0) + } +} + +/// Can perform address operations with +pub trait AddressOp { + /// Cast to u64, truncating or extending + fn cast_u64(self) -> u64; +} + +macro_rules! impl_address_ops(($($ty:ty),* $(,)?) => { + $(impl AddressOp for $ty { + #[inline(always)] + fn cast_u64(self) -> u64 { self as _ } + })* +}); + +impl_address_ops!(u8, u16, u32, u64, usize); diff --git a/hbvm/src/mem/mod.rs b/hbvm/src/mem/mod.rs index 318f8e3..0d9522b 100644 --- a/hbvm/src/mem/mod.rs +++ b/hbvm/src/mem/mod.rs @@ -1,16 +1,24 @@ //! Memory implementations -use {derive_more::Display, hbbytecode::ProgramVal}; - pub mod softpaging; +mod addr; + +pub use addr::Address; +use {derive_more::Display, hbbytecode::ProgramVal}; + /// Load-store memory access pub trait Memory { /// Load data from memory on address /// /// # Safety /// - Shall not overrun the buffer - unsafe fn load(&mut self, addr: u64, target: *mut u8, count: usize) -> Result<(), LoadError>; + unsafe fn load( + &mut self, + addr: Address, + target: *mut u8, + count: usize, + ) -> Result<(), LoadError>; /// Store data to memory on address /// @@ -18,7 +26,7 @@ pub trait Memory { /// - Shall not overrun the buffer unsafe fn store( &mut self, - addr: u64, + addr: Address, source: *const u8, count: usize, ) -> Result<(), StoreError>; @@ -27,24 +35,24 @@ pub trait Memory { /// /// # Safety /// - Data read have to be valid - unsafe fn prog_read(&mut self, addr: u64) -> Option; + unsafe fn prog_read(&mut self, addr: Address) -> Option; /// Read from program memory to exectue /// /// # Safety /// - You have to be really sure that these bytes are there, understand? - unsafe fn prog_read_unchecked(&mut self, addr: u64) -> T; + unsafe fn prog_read_unchecked(&mut self, addr: Address) -> T; } /// Unhandled load access trap #[derive(Clone, Copy, Display, Debug, PartialEq, Eq)] -#[display(fmt = "Load access error at address {_0:#x}")] -pub struct LoadError(pub u64); +#[display(fmt = "Load access error at address {_0}")] +pub struct LoadError(pub Address); /// Unhandled store access trap #[derive(Clone, Copy, Display, Debug, PartialEq, Eq)] -#[display(fmt = "Store access error at address {_0:#x}")] -pub struct StoreError(pub u64); +#[display(fmt = "Store access error at address {_0}")] +pub struct StoreError(pub Address); /// Reason to access memory #[derive(Clone, Copy, Display, Debug, PartialEq, Eq)] diff --git a/hbvm/src/mem/softpaging/icache.rs b/hbvm/src/mem/softpaging/icache.rs index 049d26b..963970a 100644 --- a/hbvm/src/mem/softpaging/icache.rs +++ b/hbvm/src/mem/softpaging/icache.rs @@ -1,5 +1,7 @@ //! Program instruction cache +use crate::mem::Address; + use { super::{lookup::AddrPageLookuper, paging::PageTable, PageSize}, core::{ @@ -12,7 +14,7 @@ use { #[derive(Clone, Debug)] pub struct ICache { /// Current page address base - base: u64, + base: Address, /// Curent page pointer data: Option>, /// Current page size @@ -24,7 +26,7 @@ pub struct ICache { impl Default for ICache { fn default() -> Self { Self { - base: Default::default(), + base: Address::NULL, data: Default::default(), size: PageSize::Size4K, mask: Default::default(), @@ -37,22 +39,26 @@ impl ICache { /// /// # Safety /// `T` should be valid to read from instruction memory - pub(super) unsafe fn fetch(&mut self, addr: u64, root_pt: *const PageTable) -> Option { + pub(super) unsafe fn fetch( + &mut self, + addr: Address, + root_pt: *const PageTable, + ) -> Option { let mut ret = MaybeUninit::::uninit(); let pbase = self .data - .or_else(|| self.fetch_page(self.base.checked_add(self.size as _)?, root_pt))?; + .or_else(|| self.fetch_page(self.base + self.size, root_pt))?; // Get address base - let base = addr & self.mask; + let base = addr.map(|x| x & self.mask); // Base not matching, fetch anew if base != self.base { self.fetch_page(base, root_pt)?; }; - let offset = addr & !self.mask; + let offset = addr.get() & !self.mask; let requ_size = size_of::(); // Page overflow @@ -66,7 +72,7 @@ impl ICache { // Copy overflow if rem != 0 { - let pbase = self.fetch_page(self.base.checked_add(self.size as _)?, root_pt)?; + let pbase = self.fetch_page(self.base + self.size, root_pt)?; // Unlikely, unsupported scenario if rem > self.size as _ { @@ -84,7 +90,7 @@ impl ICache { } /// Fetch a page - unsafe fn fetch_page(&mut self, addr: u64, pt: *const PageTable) -> Option> { + unsafe fn fetch_page(&mut self, addr: Address, pt: *const PageTable) -> Option> { let res = AddrPageLookuper::new(addr, 0, pt).next()?.ok()?; if !super::perm_check::executable(res.perm) { return None; @@ -97,7 +103,7 @@ impl ICache { _ => return None, }; self.data = Some(NonNull::new(res.ptr)?); - self.base = addr & self.mask; + self.base = addr.map(|x| x & self.mask); self.data } } diff --git a/hbvm/src/mem/softpaging/lookup.rs b/hbvm/src/mem/softpaging/lookup.rs index 1e9c3e3..04b17d2 100644 --- a/hbvm/src/mem/softpaging/lookup.rs +++ b/hbvm/src/mem/softpaging/lookup.rs @@ -1,5 +1,7 @@ //! Address lookup +use crate::mem::addr::Address; + use super::{ addr_extract_index, paging::{PageTable, Permission}, @@ -9,7 +11,7 @@ use super::{ /// Good result from address split pub struct AddrPageLookupOk { /// Virtual address - pub vaddr: u64, + pub vaddr: Address, /// Pointer to the start for perform operation pub ptr: *mut u8, @@ -24,7 +26,7 @@ pub struct AddrPageLookupOk { /// Errornous address split result pub struct AddrPageLookupError { /// Address of failure - pub addr: u64, + pub addr: Address, /// Requested page size pub size: PageSize, @@ -33,7 +35,7 @@ pub struct AddrPageLookupError { /// Address splitter into pages pub struct AddrPageLookuper { /// Current address - addr: u64, + addr: Address, /// Size left size: usize, @@ -45,7 +47,7 @@ pub struct AddrPageLookuper { impl AddrPageLookuper { /// Create a new page lookuper #[inline] - pub const fn new(addr: u64, size: usize, pagetable: *const PageTable) -> Self { + pub const fn new(addr: Address, size: usize, pagetable: *const PageTable) -> Self { Self { addr, size, @@ -55,7 +57,7 @@ impl AddrPageLookuper { /// Bump address by size X pub fn bump(&mut self, page_size: PageSize) { - self.addr += page_size as u64; + self.addr += page_size; self.size = self.size.saturating_sub(page_size as _); } } diff --git a/hbvm/src/mem/softpaging/mapping.rs b/hbvm/src/mem/softpaging/mapping.rs index 8f406a4..e07c0dd 100644 --- a/hbvm/src/mem/softpaging/mapping.rs +++ b/hbvm/src/mem/softpaging/mapping.rs @@ -1,5 +1,7 @@ //! Automatic memory mapping +use crate::mem::addr::Address; + use { super::{ addr_extract_index, @@ -10,7 +12,7 @@ use { derive_more::Display, }; -impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> { +impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> { /// Maps host's memory into VM's memory /// /// # Safety @@ -20,7 +22,7 @@ impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> { pub unsafe fn map( &mut self, host: *mut u8, - target: u64, + target: Address, perm: Permission, pagesize: PageSize, ) -> Result<(), MapError> { @@ -82,7 +84,7 @@ impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> { /// /// If errors, it only means there is no entry to unmap and in most cases /// just should be ignored. - pub fn unmap(&mut self, addr: u64) -> Result<(), NothingToUnmap> { + pub fn unmap(&mut self, addr: Address) -> Result<(), NothingToUnmap> { let mut current_pt = self.root_pt; let mut page_tables = [core::ptr::null_mut(); 5]; diff --git a/hbvm/src/mem/softpaging/mod.rs b/hbvm/src/mem/softpaging/mod.rs index bec6d76..331ad4d 100644 --- a/hbvm/src/mem/softpaging/mod.rs +++ b/hbvm/src/mem/softpaging/mod.rs @@ -1,9 +1,5 @@ //! Platform independent, software paged memory implementation -use core::mem::size_of; - -use self::icache::ICache; - pub mod icache; pub mod lookup; pub mod paging; @@ -12,7 +8,9 @@ pub mod paging; pub mod mapping; use { - super::{LoadError, Memory, MemoryAccessReason, StoreError}, + super::{addr::Address, LoadError, Memory, MemoryAccessReason, StoreError}, + core::mem::size_of, + icache::ICache, lookup::{AddrPageLookupError, AddrPageLookupOk, AddrPageLookuper}, paging::{PageTable, Permission}, }; @@ -41,7 +39,12 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> Memory /// /// # Safety /// Applies same conditions as for [`core::ptr::copy_nonoverlapping`] - unsafe fn load(&mut self, addr: u64, target: *mut u8, count: usize) -> Result<(), LoadError> { + unsafe fn load( + &mut self, + addr: Address, + target: *mut u8, + count: usize, + ) -> Result<(), LoadError> { self.memory_access( MemoryAccessReason::Load, addr, @@ -59,7 +62,7 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> Memory /// Applies same conditions as for [`core::ptr::copy_nonoverlapping`] unsafe fn store( &mut self, - addr: u64, + addr: Address, source: *const u8, count: usize, ) -> Result<(), StoreError> { @@ -75,27 +78,31 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> Memory } #[inline(always)] - unsafe fn prog_read(&mut self, addr: u64) -> Option { - if OUT_PROG_EXEC && addr as usize > self.program.len() { + unsafe fn prog_read(&mut self, addr: Address) -> Option { + if OUT_PROG_EXEC && addr.truncate_usize() > self.program.len() { return self.icache.fetch::(addr, self.root_pt); } - let addr = addr as usize; + let addr = addr.truncate_usize(); self.program .get(addr..addr + size_of::()) .map(|x| x.as_ptr().cast::().read()) } #[inline(always)] - unsafe fn prog_read_unchecked(&mut self, addr: u64) -> T { - if OUT_PROG_EXEC && addr as usize > self.program.len() { + unsafe fn prog_read_unchecked(&mut self, addr: Address) -> T { + if OUT_PROG_EXEC && addr.truncate_usize() > self.program.len() { return self .icache - .fetch::(addr as _, self.root_pt) + .fetch::(addr, self.root_pt) .unwrap_or_else(|| core::mem::zeroed()); } - self.program.as_ptr().add(addr as _).cast::().read() + self.program + .as_ptr() + .add(addr.truncate_usize()) + .cast::() + .read() } } @@ -110,32 +117,32 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, PfH, fn memory_access( &mut self, reason: MemoryAccessReason, - src: u64, + src: Address, mut dst: *mut u8, len: usize, permission_check: fn(Permission) -> bool, action: fn(*mut u8, *mut u8, usize), - ) -> Result<(), u64> { + ) -> Result<(), Address> { // Memory load from program section - let (src, len) = if src < self.program.len() as _ { + let (src, len) = if src.truncate_usize() < self.program.len() as _ { // Allow only loads if reason != MemoryAccessReason::Load { return Err(src); } // Determine how much data to copy from here - let to_copy = len.clamp(0, self.program.len().saturating_sub(src as _)); + let to_copy = len.clamp(0, self.program.len().saturating_sub(src.truncate_usize())); // Perform action action( - unsafe { self.program.as_ptr().add(src as _).cast_mut() }, + unsafe { self.program.as_ptr().add(src.truncate_usize()).cast_mut() }, dst, to_copy, ); // Return shifted from what we've already copied ( - src.saturating_add(to_copy as _), + src.saturating_add(to_copy as u64), len.saturating_sub(to_copy), ) } else { @@ -196,8 +203,9 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, PfH, /// /// The level shall not be larger than 4, otherwise /// the output of the function is unspecified (yes, it can also panic :) -pub fn addr_extract_index(addr: u64, lvl: u8) -> usize { +pub fn addr_extract_index(addr: Address, lvl: u8) -> usize { debug_assert!(lvl <= 4); + let addr = addr.get(); usize::try_from((addr >> (lvl * 8 + 12)) & ((1 << 8) - 1)).expect("?conradluget a better CPU") } @@ -226,6 +234,22 @@ impl PageSize { } } +impl core::ops::Add for Address { + type Output = Self; + + #[inline(always)] + fn add(self, rhs: PageSize) -> Self::Output { + self + (rhs as u64) + } +} + +impl core::ops::AddAssign for Address { + #[inline(always)] + fn add_assign(&mut self, rhs: PageSize) { + *self = Self::new(self.get().wrapping_add(rhs as u64)); + } +} + /// Permisison checks pub mod perm_check { use super::paging::Permission; @@ -263,7 +287,7 @@ pub trait HandlePageFault { &mut self, reason: MemoryAccessReason, pagetable: &mut PageTable, - vaddr: u64, + vaddr: Address, size: PageSize, dataptr: *mut u8, ) -> bool diff --git a/hbvm/src/vmrun.rs b/hbvm/src/vmrun.rs index c0e78fd..ecbeb12 100644 --- a/hbvm/src/vmrun.rs +++ b/hbvm/src/vmrun.rs @@ -2,6 +2,8 @@ //! //! Have fun +use crate::mem::Address; + use { super::{ bmc::BlockCopier, @@ -202,8 +204,8 @@ where self.pc -= size_of::() + 1; self.copier = Some(BlockCopier::new( - self.read_reg(src).cast(), - self.read_reg(dst).cast(), + Address::new(self.read_reg(src).cast()), + Address::new(self.read_reg(dst).cast()), count as _, )); @@ -244,16 +246,16 @@ where // Jump and link. Save PC after this instruction to // specified register and jump to reg + offset. let ParamBBD(save, reg, offset) = self.decode(); - self.write_reg(save, self.pc as u64); + self.write_reg(save, self.pc.get()); self.pc = - (self.read_reg(reg).cast::().saturating_add(offset)) as usize; + Address::new(self.read_reg(reg).cast::().saturating_add(offset)); } // Conditional jumps, jump only to immediates JEQ => self.cond_jmp::(Ordering::Equal), JNE => { let ParamBBD(a0, a1, jt) = self.decode(); if self.read_reg(a0).cast::() != self.read_reg(a1).cast::() { - self.pc = jt as usize; + self.pc = Address::new(jt); } } JLT => self.cond_jmp::(Ordering::Less), @@ -317,7 +319,7 @@ where /// Decode instruction operands #[inline(always)] unsafe fn decode(&mut self) -> T { - let pc1 = self.pc + 1; + let pc1 = self.pc + 1_u64; let data = self.memory.prog_read_unchecked::(pc1 as _); self.pc += 1 + size_of::(); data @@ -360,7 +362,7 @@ where .cmp(&self.read_reg(a1).cast::()) == expected { - self.pc = ja as usize; + self.pc = Address::new(ja); } } @@ -388,7 +390,7 @@ where offset: u64, size: u16, adder: u8, - ) -> Result { + ) -> Result { let reg = dst.checked_add(adder).ok_or(VmRunError::RegOutOfBounds)?; if usize::from(reg) * 8 + usize::from(size) > 2048 { @@ -399,6 +401,7 @@ where .checked_add(offset) .and_then(|x| x.checked_add(adder.into())) .ok_or(VmRunError::AddrOutOfBounds) + .map(Address::new) } } }