From 66ef81d8a005a8b4ad0238d58bb2c2ed6c44a162 Mon Sep 17 00:00:00 2001 From: Erin Date: Wed, 26 Jul 2023 02:04:26 +0200 Subject: [PATCH] BMC is now interruptable --- hbvm/fuzz/fuzz_targets/vm.rs | 2 +- hbvm/src/lib.rs | 55 ++++++++++--- hbvm/src/mem/bmc.rs | 152 +++++++++++++++++++++++++++++++++++ hbvm/src/mem/mod.rs | 107 +----------------------- rustfmt.toml | 4 +- 5 files changed, 199 insertions(+), 121 deletions(-) create mode 100644 hbvm/src/mem/bmc.rs diff --git a/hbvm/fuzz/fuzz_targets/vm.rs b/hbvm/fuzz/fuzz_targets/vm.rs index 72e6da5..82df873 100644 --- a/hbvm/fuzz/fuzz_targets/vm.rs +++ b/hbvm/fuzz/fuzz_targets/vm.rs @@ -9,7 +9,7 @@ use { }; fuzz_target!(|data: &[u8]| { - if let Ok(mut vm) = Vm::<_, 0>::new_validated(data, TestTrapHandler, Default::default()) { + if let Ok(mut vm) = Vm::<_, 100>::new_validated(data, TestTrapHandler, Default::default()) { let _ = vm.run(); } }); diff --git a/hbvm/src/lib.rs b/hbvm/src/lib.rs index 4fdf7a0..c2c28a4 100644 --- a/hbvm/src/lib.rs +++ b/hbvm/src/lib.rs @@ -19,13 +19,12 @@ pub mod mem; pub mod value; use { - self::{mem::HandlePageFault, value::ValueVariant}, - core::{cmp::Ordering, ops}, + core::{cmp::Ordering, mem::size_of, ops}, hbbytecode::{ valider, OpParam, ParamBB, ParamBBB, ParamBBBB, ParamBBD, ParamBBDH, ParamBBW, ParamBD, }, - mem::Memory, - value::Value, + mem::{bmc::BlockCopier, HandlePageFault, Memory}, + value::{Value, ValueVariant}, }; /// HoleyBytes Virtual Machine @@ -53,6 +52,9 @@ pub struct Vm<'a, PfHandler, const TIMER_QUOTIENT: usize> { /// Program timer timer: usize, + + /// Saved block copier + copier: Option, } impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize> @@ -71,6 +73,7 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize> program_len: program.len() - 12, program, timer: 0, + copier: None, } } @@ -255,13 +258,41 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize> } BMC => { // Block memory copy - let ParamBBD(src, dst, count) = self.decode(); - self.memory.block_copy( - self.read_reg(src).cast::(), - self.read_reg(dst).cast::(), - count as _, - &mut self.pfhandler, - )?; + match if let Some(copier) = &mut self.copier { + // There is some copier, poll. + copier.poll(&mut self.memory, &mut self.pfhandler) + } else { + // There is none, make one! + let ParamBBD(src, dst, count) = self.decode(); + + // So we are still on BMC on next cycle + self.pc -= size_of::() + 1; + + self.copier = Some(BlockCopier::new( + self.read_reg(src).cast(), + self.read_reg(dst).cast(), + count as _, + )); + + self.copier + .as_mut() + .unwrap_unchecked() // SAFETY: We just assigned there + .poll(&mut self.memory, &mut self.pfhandler) + } { + // We are done, shift program counter + core::task::Poll::Ready(Ok(())) => { + self.copier = None; + self.pc += size_of::() + 1; + } + // Error, shift program counter (for consistency) + // and yield error + core::task::Poll::Ready(Err(e)) => { + self.pc += size_of::() + 1; + return Err(e.into()); + } + // Not done yet, proceed to next cycle + core::task::Poll::Pending => (), + } } BRC => { // Block register copy @@ -353,7 +384,7 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize> #[inline] unsafe fn decode(&mut self) -> T { let data = self.program.as_ptr().add(self.pc + 1).cast::().read(); - self.pc += 1 + core::mem::size_of::(); + self.pc += 1 + size_of::(); data } diff --git a/hbvm/src/mem/bmc.rs b/hbvm/src/mem/bmc.rs new file mode 100644 index 0000000..b561b90 --- /dev/null +++ b/hbvm/src/mem/bmc.rs @@ -0,0 +1,152 @@ +use { + super::MemoryAccessReason, + crate::{ + mem::{perm_check, HandlePageFault, Memory}, + VmRunError, + }, + core::{mem::MaybeUninit, task::Poll}, +}; + +// Buffer size (defaults to 4 KiB, a smallest page size on most platforms) +const BUF_SIZE: usize = 4096; + +// This should be equal to `BUF_SIZE` +#[repr(align(4096))] +struct AlignedBuf([MaybeUninit; BUF_SIZE]); + +pub struct BlockCopier { + /// Source address + src: u64, + /// Destination address + dst: u64, + /// How many buffer sizes to copy? + n_buffers: usize, + /// …and what remainds after? + rem: usize, +} + +impl BlockCopier { + pub fn new(src: u64, dst: u64, count: usize) -> Self { + Self { + src, + dst, + n_buffers: count / BUF_SIZE, + rem: count % BUF_SIZE, + } + } + + /// Copy one block + /// + /// # Safety + /// - Same as for [`Memory::load`] and [`Memory::store`] + pub unsafe fn poll( + &mut self, + memory: &mut Memory, + traph: &mut impl HandlePageFault, + ) -> Poll> { + // Safety: Assuming uninit of array of MaybeUninit is sound + let mut buf = AlignedBuf(MaybeUninit::uninit().assume_init()); + + if self.n_buffers != 0 { + if let Err(e) = act( + memory, + self.src, + self.dst, + buf.0.as_mut_ptr().cast(), + BUF_SIZE, + traph, + ) { + return Poll::Ready(Err(e)); + } + + self.src += BUF_SIZE as u64; + self.dst += BUF_SIZE as u64; + self.n_buffers -= 1; + + return if self.n_buffers + self.rem == 0 { + // If there is nothing left, we are done + Poll::Ready(Ok(())) + } else { + // Otherwise let's advice to run it again + Poll::Pending + }; + } + + if self.rem != 0 { + if let Err(e) = act( + memory, + self.src, + self.dst, + buf.0.as_mut_ptr().cast(), + self.rem, + traph, + ) { + return Poll::Ready(Err(e)); + } + } + + Poll::Ready(Ok(())) + } +} + +#[inline] +unsafe fn act( + memory: &mut Memory, + src: u64, + dst: u64, + buf: *mut u8, + count: usize, + traph: &mut impl HandlePageFault, +) -> Result<(), BlkCopyError> { + // Load to buffer + memory + .memory_access( + MemoryAccessReason::Load, + src, + buf, + count, + perm_check::readable, + |src, dst, count| core::ptr::copy(src, dst, count), + traph, + ) + .map_err(|addr| BlkCopyError { + access_reason: MemoryAccessReason::Load, + addr, + })?; + + // Store from buffer + memory + .memory_access( + MemoryAccessReason::Store, + dst, + buf, + count, + perm_check::writable, + |dst, src, count| core::ptr::copy(src, dst, count), + traph, + ) + .map_err(|addr| BlkCopyError { + access_reason: MemoryAccessReason::Store, + addr, + })?; + + Ok(()) +} + +/// Error occured when copying a block of memory +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub struct BlkCopyError { + /// Kind of access + access_reason: MemoryAccessReason, + /// VM Address + addr: u64, +} + +impl From for VmRunError { + fn from(value: BlkCopyError) -> Self { + match value.access_reason { + MemoryAccessReason::Load => Self::LoadAccessEx(value.addr), + MemoryAccessReason::Store => Self::StoreAccessEx(value.addr), + } + } +} diff --git a/hbvm/src/mem/mod.rs b/hbvm/src/mem/mod.rs index acc190d..493de63 100644 --- a/hbvm/src/mem/mod.rs +++ b/hbvm/src/mem/mod.rs @@ -1,6 +1,7 @@ //! Program memory implementation pub mod paging; +pub mod bmc; mod pfhandler; @@ -8,7 +9,6 @@ pub use pfhandler::HandlePageFault; use { super::VmRunError, - core::mem::MaybeUninit, derive_more::Display, paging::{PageTable, Permission}, }; @@ -215,93 +215,6 @@ impl Memory { .map_err(StoreError) } - /// Copy a block of memory - /// - /// # Safety - /// - Same as for [`Self::load`] and [`Self::store`] - /// - This function has been rewritten and is now pretty much boring - pub unsafe fn block_copy( - &mut self, - mut src: u64, - mut dst: u64, - count: usize, - traph: &mut impl HandlePageFault, - ) -> Result<(), BlkCopyError> { - // Yea, i know it is possible to do this more efficiently, but I am too lazy. - - impl Memory { - #[inline] - unsafe fn act( - &mut self, - src: u64, - dst: u64, - buf: *mut u8, - count: usize, - traph: &mut impl HandlePageFault, - ) -> Result<(), BlkCopyError> { - // Load to buffer - self.memory_access( - MemoryAccessReason::Load, - src, - buf, - count, - perm_check::readable, - |src, dst, count| core::ptr::copy(src, dst, count), - traph, - ) - .map_err(|addr| BlkCopyError { - access_reason: MemoryAccessReason::Load, - addr, - })?; - - // Store from buffer - self.memory_access( - MemoryAccessReason::Store, - dst, - buf, - count, - perm_check::writable, - |dst, src, count| core::ptr::copy(src, dst, count), - traph, - ) - .map_err(|addr| BlkCopyError { - access_reason: MemoryAccessReason::Store, - addr, - })?; - - Ok(()) - } - } - - // Buffer size (defaults to 4 KiB, a smallest page size on most platforms) - const BUF_SIZE: usize = 4096; - - // This should be equal to `BUF_SIZE` - #[repr(align(4096))] - struct AlignedBuf([MaybeUninit; BUF_SIZE]); - - // Safety: Assuming uninit of array of MaybeUninit is sound - let mut buf = AlignedBuf(MaybeUninit::uninit().assume_init()); - - // Calculate how many times we need to copy buffer-sized blocks if any and the rest. - let n_buffers = count / BUF_SIZE; - let rem = count % BUF_SIZE; - - // Copy buffer-sized blocks - for _ in 0..n_buffers { - self.act(src, dst, buf.0.as_mut_ptr().cast(), BUF_SIZE, traph)?; - src += BUF_SIZE as u64; - dst += BUF_SIZE as u64; - } - - // Copy the rest (if any) - if rem != 0 { - self.act(src, dst, buf.0.as_mut_ptr().cast(), rem, traph)?; - } - - Ok(()) - } - // Everyone behold, the holy function, the god of HBVM memory accesses! /// Split address to pages, check their permissions and feed pointers with offset @@ -534,24 +447,6 @@ pub enum MemoryAccessReason { Store, } -/// Error occured when copying a block of memory -#[derive(Clone, Copy, Debug, PartialEq, Eq)] -pub struct BlkCopyError { - /// Kind of access - access_reason: MemoryAccessReason, - /// VM Address - addr: u64, -} - -impl From for VmRunError { - fn from(value: BlkCopyError) -> Self { - match value.access_reason { - MemoryAccessReason::Load => Self::LoadAccessEx(value.addr), - MemoryAccessReason::Store => Self::StoreAccessEx(value.addr), - } - } -} - impl From for VmRunError { fn from(value: LoadError) -> Self { Self::LoadAccessEx(value.0) diff --git a/rustfmt.toml b/rustfmt.toml index 90a36c7..907ba34 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -1,4 +1,4 @@ hex_literal_case = "Upper" imports_granularity = "One" -struct_field_align_threshold = 5 -enum_discrim_align_threshold = 5 \ No newline at end of file +struct_field_align_threshold = 8 +enum_discrim_align_threshold = 8 \ No newline at end of file