diff --git a/Cargo.lock b/Cargo.lock index 708a868..4cb5d25 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -126,7 +126,6 @@ version = "0.1.0" dependencies = [ "delegate", "derive_more", - "hashbrown 0.13.2", "hbbytecode", "log", "paste", diff --git a/hbvm/Cargo.toml b/hbvm/Cargo.toml index 0346a40..87b74fa 100644 --- a/hbvm/Cargo.toml +++ b/hbvm/Cargo.toml @@ -6,10 +6,13 @@ edition = "2021" [profile.release] lto = true +[features] +default = ["alloc"] +alloc = [] + [dependencies] delegate = "0.9" derive_more = "0.99" -hashbrown = "0.13" hbbytecode.path = "../hbbytecode" log = "0.4" paste = "1.0" diff --git a/hbvm/fuzz/fuzz_targets/vm.rs b/hbvm/fuzz/fuzz_targets/vm.rs index 1371903..72e6da5 100644 --- a/hbvm/fuzz/fuzz_targets/vm.rs +++ b/hbvm/fuzz/fuzz_targets/vm.rs @@ -1,7 +1,7 @@ #![no_main] use { - hbvm::vm::{ + hbvm::{ mem::{HandlePageFault, Memory, MemoryAccessReason, PageSize}, Vm, }, @@ -9,7 +9,7 @@ use { }; fuzz_target!(|data: &[u8]| { - if let Ok(mut vm) = Vm::<_, 0>::new_validated(data, TestTrapHandler) { + if let Ok(mut vm) = Vm::<_, 0>::new_validated(data, TestTrapHandler, Default::default()) { let _ = vm.run(); } }); diff --git a/hbvm/src/lib.rs b/hbvm/src/lib.rs index 1feaa73..4fdf7a0 100644 --- a/hbvm/src/lib.rs +++ b/hbvm/src/lib.rs @@ -1,6 +1,458 @@ -#![doc = include_str!("../README.md")] +//! HoleyBytes Virtual Machine +//! +//! # Alloc feature +//! - Enabled by default +//! - Provides [`mem::Memory`] mapping / unmapping, as well as +//! [`Default`] and [`Drop`] implementation + +// # General safety notice: +// - Validation has to assure there is 256 registers (r0 - r255) +// - Instructions have to be valid as specified (values and sizes) +// - Mapped pages should be at least 4 KiB + #![no_std] +#[cfg(feature = "alloc")] extern crate alloc; -pub mod vm; +pub mod mem; +pub mod value; + +use { + self::{mem::HandlePageFault, value::ValueVariant}, + core::{cmp::Ordering, ops}, + hbbytecode::{ + valider, OpParam, ParamBB, ParamBBB, ParamBBBB, ParamBBD, ParamBBDH, ParamBBW, ParamBD, + }, + mem::Memory, + value::Value, +}; + +/// HoleyBytes Virtual Machine +pub struct Vm<'a, PfHandler, const TIMER_QUOTIENT: usize> { + /// Holds 256 registers + /// + /// Writing to register 0 is considered undefined behaviour + /// in terms of HoleyBytes program execution + pub registers: [Value; 256], + + /// Memory implementation + pub memory: Memory, + + /// Trap handler + pub pfhandler: PfHandler, + + /// Program counter + pub pc: usize, + + /// Program + program: &'a [u8], + + /// Cached program length (without unreachable end) + program_len: usize, + + /// Program timer + timer: usize, +} + +impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize> + Vm<'a, PfHandler, TIMER_QUOTIENT> +{ + /// Create a new VM with program and trap handler + /// + /// # Safety + /// Program code has to be validated + pub unsafe fn new_unchecked(program: &'a [u8], traph: PfHandler, memory: Memory) -> Self { + Self { + registers: [Value::from(0_u64); 256], + memory, + pfhandler: traph, + pc: 0, + program_len: program.len() - 12, + program, + timer: 0, + } + } + + /// Create a new VM with program and trap handler only if it passes validation + pub fn new_validated( + program: &'a [u8], + traph: PfHandler, + memory: Memory, + ) -> Result { + valider::validate(program)?; + Ok(unsafe { Self::new_unchecked(program, traph, memory) }) + } + + /// Execute program + /// + /// Program can return [`VmRunError`] if a trap handling failed + pub fn run(&mut self) -> Result { + use hbbytecode::opcode::*; + loop { + // Check instruction boundary + if self.pc >= self.program_len { + return Ok(VmRunOk::End); + } + + // Big match + // + // Contribution guide: + // - Zero register shall never be overwitten. It's value has to always be 0. + // - Prefer `Self::read_reg` and `Self::write_reg` functions + // - Extract parameters using `param!` macro + // - Prioritise speed over code size + // - Memory is cheap, CPUs not that much + // - Do not heap allocate at any cost + // - Yes, user-provided trap handler may allocate, + // but that is not our »fault«. + // - Unsafe is kinda must, but be sure you have validated everything + // - Your contributions have to pass sanitizers and Miri + // - Strictly follow the spec + // - The spec does not specify how you perform actions, in what order, + // just that the observable effects have to be performed in order and + // correctly. + // - Yes, we assume you run 64 bit CPU. Else ?conradluget a better CPU + // sorry 8 bit fans, HBVM won't run on your Speccy :( + unsafe { + match *self.program.get_unchecked(self.pc) { + UN => { + self.decode::<()>(); + return Err(VmRunError::Unreachable); + } + NOP => self.decode::<()>(), + ADD => self.binary_op(u64::wrapping_add), + SUB => self.binary_op(u64::wrapping_sub), + MUL => self.binary_op(u64::wrapping_mul), + AND => self.binary_op::(ops::BitAnd::bitand), + OR => self.binary_op::(ops::BitOr::bitor), + XOR => self.binary_op::(ops::BitXor::bitxor), + SL => self.binary_op(|l, r| u64::wrapping_shl(l, r as u32)), + SR => self.binary_op(|l, r| u64::wrapping_shr(l, r as u32)), + SRS => self.binary_op(|l, r| i64::wrapping_shl(l, r as u32)), + CMP => { + // Compare a0 <=> a1 + // < → -1 + // > → 1 + // = → 0 + + let ParamBBB(tg, a0, a1) = self.decode(); + self.write_reg( + tg, + self.read_reg(a0) + .cast::() + .cmp(&self.read_reg(a1).cast::()) + as i64, + ); + } + CMPU => { + // Unsigned comparsion + let ParamBBB(tg, a0, a1) = self.decode(); + self.write_reg( + tg, + self.read_reg(a0) + .cast::() + .cmp(&self.read_reg(a1).cast::()) + as i64, + ); + } + NOT => { + // Logical negation + let ParamBB(tg, a0) = self.decode(); + self.write_reg(tg, !self.read_reg(a0).cast::()); + } + NEG => { + // Bitwise negation + let ParamBB(tg, a0) = self.decode(); + self.write_reg( + tg, + match self.read_reg(a0).cast::() { + 0 => 1_u64, + _ => 0, + }, + ); + } + DIR => { + // Fused Division-Remainder + let ParamBBBB(dt, rt, a0, a1) = self.decode(); + let a0 = self.read_reg(a0).cast::(); + let a1 = self.read_reg(a1).cast::(); + self.write_reg(dt, a0.checked_div(a1).unwrap_or(u64::MAX)); + self.write_reg(rt, a0.checked_rem(a1).unwrap_or(u64::MAX)); + } + ADDI => self.binary_op_imm(u64::wrapping_add), + MULI => self.binary_op_imm(u64::wrapping_sub), + ANDI => self.binary_op_imm::(ops::BitAnd::bitand), + ORI => self.binary_op_imm::(ops::BitOr::bitor), + XORI => self.binary_op_imm::(ops::BitXor::bitxor), + SLI => self.binary_op_ims(u64::wrapping_shl), + SRI => self.binary_op_ims(u64::wrapping_shr), + SRSI => self.binary_op_ims(i64::wrapping_shr), + CMPI => { + let ParamBBD(tg, a0, imm) = self.decode(); + self.write_reg( + tg, + self.read_reg(a0) + .cast::() + .cmp(&Value::from(imm).cast::()) + as i64, + ); + } + CMPUI => { + let ParamBBD(tg, a0, imm) = self.decode(); + self.write_reg(tg, self.read_reg(a0).cast::().cmp(&imm) as i64); + } + CP => { + let ParamBB(tg, a0) = self.decode(); + self.write_reg(tg, self.read_reg(a0)); + } + SWA => { + // Swap registers + let ParamBB(r0, r1) = self.decode(); + match (r0, r1) { + (0, 0) => (), + (dst, 0) | (0, dst) => self.write_reg(dst, 0_u64), + (r0, r1) => { + core::ptr::swap( + self.registers.get_unchecked_mut(usize::from(r0)), + self.registers.get_unchecked_mut(usize::from(r1)), + ); + } + } + } + LI => { + let ParamBD(tg, imm) = self.decode(); + self.write_reg(tg, imm); + } + LD => { + // Load. If loading more than register size, continue on adjecent registers + let ParamBBDH(dst, base, off, count) = self.decode(); + ldst_bound_check(dst, count)?; + + let n: usize = match dst { + 0 => 1, + _ => 0, + }; + + self.memory.load( + self.read_reg(base).cast::() + off + n as u64, + self.registers.as_mut_ptr().add(usize::from(dst) + n).cast(), + usize::from(count).saturating_sub(n), + &mut self.pfhandler, + )?; + } + ST => { + // Store. Same rules apply as to LD + let ParamBBDH(dst, base, off, count) = self.decode(); + ldst_bound_check(dst, count)?; + + self.memory.store( + self.read_reg(base).cast::() + off, + self.registers.as_ptr().add(usize::from(dst)).cast(), + count.into(), + &mut self.pfhandler, + )?; + } + BMC => { + // Block memory copy + let ParamBBD(src, dst, count) = self.decode(); + self.memory.block_copy( + self.read_reg(src).cast::(), + self.read_reg(dst).cast::(), + count as _, + &mut self.pfhandler, + )?; + } + BRC => { + // Block register copy + let ParamBBB(src, dst, count) = self.decode(); + if src.checked_add(count).is_none() || dst.checked_add(count).is_none() { + return Err(VmRunError::RegOutOfBounds); + } + + core::ptr::copy( + self.registers.get_unchecked(usize::from(src)), + self.registers.get_unchecked_mut(usize::from(dst)), + usize::from(count), + ); + } + JAL => { + // Jump and link. Save PC after this instruction to + // specified register and jump to reg + offset. + let ParamBBD(save, reg, offset) = self.decode(); + self.write_reg(save, self.pc as u64); + self.pc = (self.read_reg(reg).cast::() + offset) as usize; + } + // Conditional jumps, jump only to immediates + JEQ => self.cond_jmp::(Ordering::Equal), + JNE => { + let ParamBBD(a0, a1, jt) = self.decode(); + if self.read_reg(a0).cast::() != self.read_reg(a1).cast::() { + self.pc = jt as usize; + } + } + JLT => self.cond_jmp::(Ordering::Less), + JGT => self.cond_jmp::(Ordering::Greater), + JLTU => self.cond_jmp::(Ordering::Less), + JGTU => self.cond_jmp::(Ordering::Greater), + ECALL => { + self.decode::<()>(); + + // So we don't get timer interrupt after ECALL + if TIMER_QUOTIENT != 0 { + self.timer = self.timer.wrapping_add(1); + } + return Ok(VmRunOk::Ecall); + } + ADDF => self.binary_op::(ops::Add::add), + SUBF => self.binary_op::(ops::Sub::sub), + MULF => self.binary_op::(ops::Mul::mul), + DIRF => { + let ParamBBBB(dt, rt, a0, a1) = self.decode(); + let a0 = self.read_reg(a0).cast::(); + let a1 = self.read_reg(a1).cast::(); + self.write_reg(dt, a0 / a1); + self.write_reg(rt, a0 % a1); + } + FMAF => { + let ParamBBBB(dt, a0, a1, a2) = self.decode(); + self.write_reg( + dt, + self.read_reg(a0).cast::() * self.read_reg(a1).cast::() + + self.read_reg(a2).cast::(), + ); + } + NEGF => { + let ParamBB(dt, a0) = self.decode(); + self.write_reg(dt, -self.read_reg(a0).cast::()); + } + ITF => { + let ParamBB(dt, a0) = self.decode(); + self.write_reg(dt, self.read_reg(a0).cast::() as f64); + } + FTI => { + let ParamBB(dt, a0) = self.decode(); + self.write_reg(dt, self.read_reg(a0).cast::() as i64); + } + ADDFI => self.binary_op_imm::(ops::Add::add), + MULFI => self.binary_op_imm::(ops::Mul::mul), + op => return Err(VmRunError::InvalidOpcode(op)), + } + } + + if TIMER_QUOTIENT != 0 { + self.timer = self.timer.wrapping_add(1); + if self.timer % TIMER_QUOTIENT == 0 { + return Ok(VmRunOk::Timer); + } + } + } + } + + /// Decode instruction operands + #[inline] + unsafe fn decode(&mut self) -> T { + let data = self.program.as_ptr().add(self.pc + 1).cast::().read(); + self.pc += 1 + core::mem::size_of::(); + data + } + + /// Perform binary operating over two registers + #[inline] + unsafe fn binary_op(&mut self, op: impl Fn(T, T) -> T) { + let ParamBBB(tg, a0, a1) = self.decode(); + self.write_reg( + tg, + op(self.read_reg(a0).cast::(), self.read_reg(a1).cast::()), + ); + } + + /// Perform binary operation over register and immediate + #[inline] + unsafe fn binary_op_imm(&mut self, op: impl Fn(T, T) -> T) { + let ParamBBD(tg, reg, imm) = self.decode(); + self.write_reg( + tg, + op(self.read_reg(reg).cast::(), Value::from(imm).cast::()), + ); + } + + /// Perform binary operation over register and shift immediate + #[inline] + unsafe fn binary_op_ims(&mut self, op: impl Fn(T, u32) -> T) { + let ParamBBW(tg, reg, imm) = self.decode(); + self.write_reg(tg, op(self.read_reg(reg).cast::(), imm)); + } + + /// Jump at `#3` if ordering on `#0 <=> #1` is equal to expected + #[inline] + unsafe fn cond_jmp(&mut self, expected: Ordering) { + let ParamBBD(a0, a1, ja) = self.decode(); + if self + .read_reg(a0) + .cast::() + .cmp(&self.read_reg(a1).cast::()) + == expected + { + self.pc = ja as usize; + } + } + + /// Read register + #[inline] + unsafe fn read_reg(&self, n: u8) -> Value { + *self.registers.get_unchecked(n as usize) + } + + /// Write a register. + /// Writing to register 0 is no-op. + #[inline] + unsafe fn write_reg(&mut self, n: u8, value: impl Into) { + if n != 0 { + *self.registers.get_unchecked_mut(n as usize) = value.into(); + } + } +} + +/// Load/Store target/source register range bound checking +#[inline] +fn ldst_bound_check(reg: u8, size: u16) -> Result<(), VmRunError> { + if usize::from(reg) * 8 + usize::from(size) > 2048 { + Err(VmRunError::RegOutOfBounds) + } else { + Ok(()) + } +} + +/// Virtual machine halt error +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +#[repr(u8)] +pub enum VmRunError { + /// Tried to execute invalid instruction + InvalidOpcode(u8), + + /// Unhandled load access exception + LoadAccessEx(u64), + + /// Unhandled store access exception + StoreAccessEx(u64), + + /// Register out-of-bounds access + RegOutOfBounds, + + /// Reached unreachable code + Unreachable, +} + +/// Virtual machine halt ok +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum VmRunOk { + /// Program has eached its end + End, + + /// Program was interrupted by a timer + Timer, + + /// Environment call + Ecall, +} diff --git a/hbvm/src/main.rs b/hbvm/src/main.rs index b9d7a24..f3cc2d0 100644 --- a/hbvm/src/main.rs +++ b/hbvm/src/main.rs @@ -1,8 +1,9 @@ -use hbvm::vm::mem::{HandlePageFault, Memory, MemoryAccessReason, PageSize}; - use { hbbytecode::valider::validate, - hbvm::vm::Vm, + hbvm::{ + mem::{HandlePageFault, Memory, MemoryAccessReason, PageSize}, + Vm, + }, std::io::{stdin, Read}, }; @@ -15,7 +16,7 @@ fn main() -> Result<(), Box> { return Ok(()); } else { unsafe { - let mut vm = Vm::<_, 0>::new_unchecked(&prog, TestTrapHandler); + let mut vm = Vm::<_, 0>::new_unchecked(&prog, TestTrapHandler, Default::default()); let data = { let ptr = std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align_unchecked( 4096, 4096, @@ -30,7 +31,7 @@ fn main() -> Result<(), Box> { .map( data, 0, - hbvm::vm::mem::paging::Permission::Write, + hbvm::mem::paging::Permission::Write, PageSize::Size4K, ) .unwrap(); diff --git a/hbvm/src/vm/mem/mod.rs b/hbvm/src/mem/mod.rs similarity index 98% rename from hbvm/src/vm/mem/mod.rs rename to hbvm/src/mem/mod.rs index df14682..acc190d 100644 --- a/hbvm/src/vm/mem/mod.rs +++ b/hbvm/src/mem/mod.rs @@ -7,28 +7,32 @@ mod pfhandler; pub use pfhandler::HandlePageFault; use { - self::paging::{PageTable, Permission, PtEntry}, super::VmRunError, - alloc::boxed::Box, core::mem::MaybeUninit, derive_more::Display, + paging::{PageTable, Permission}, }; +#[cfg(feature = "alloc")] +use {alloc::boxed::Box, paging::PtEntry}; + /// HoleyBytes virtual memory #[derive(Clone, Debug)] pub struct Memory { /// Root page table - root_pt: *mut PageTable, + pub root_pt: *mut PageTable, } +#[cfg(feature = "alloc")] impl Default for Memory { fn default() -> Self { Self { - root_pt: Box::into_raw(Box::default()), + root_pt: Box::into_raw(Default::default()), } } } +#[cfg(feature = "alloc")] impl Drop for Memory { fn drop(&mut self) { let _ = unsafe { Box::from_raw(self.root_pt) }; @@ -42,6 +46,7 @@ impl Memory { /// - Your faith in the gods of UB /// - Addr-san claims it's fine but who knows is she isn't lying :ferrisSus: /// - Alright, Miri-sama is also fine with this, who knows why + #[cfg(feature = "alloc")] pub unsafe fn map( &mut self, host: *mut u8, @@ -107,6 +112,7 @@ impl Memory { /// /// If errors, it only means there is no entry to unmap and in most cases /// just should be ignored. + #[cfg(feature = "alloc")] pub fn unmap(&mut self, addr: u64) -> Result<(), NothingToUnmap> { let mut current_pt = self.root_pt; let mut page_tables = [core::ptr::null_mut(); 5]; diff --git a/hbvm/src/vm/mem/paging.rs b/hbvm/src/mem/paging.rs similarity index 100% rename from hbvm/src/vm/mem/paging.rs rename to hbvm/src/mem/paging.rs diff --git a/hbvm/src/vm/mem/pfhandler.rs b/hbvm/src/mem/pfhandler.rs similarity index 100% rename from hbvm/src/vm/mem/pfhandler.rs rename to hbvm/src/mem/pfhandler.rs diff --git a/hbvm/src/vm/value.rs b/hbvm/src/value.rs similarity index 100% rename from hbvm/src/vm/value.rs rename to hbvm/src/value.rs diff --git a/hbvm/src/vm/mod.rs b/hbvm/src/vm/mod.rs deleted file mode 100644 index fcbfacb..0000000 --- a/hbvm/src/vm/mod.rs +++ /dev/null @@ -1,446 +0,0 @@ -//! HoleyBytes Virtual Machine -//! -//! All unsafe code here should be sound, if input bytecode passes validation. - -// # General safety notice: -// - Validation has to assure there is 256 registers (r0 - r255) -// - Instructions have to be valid as specified (values and sizes) -// - Mapped pages should be at least 4 KiB - -pub mod mem; -pub mod value; - -use { - self::{mem::HandlePageFault, value::ValueVariant}, - core::{cmp::Ordering, ops}, - hbbytecode::{ - valider, OpParam, ParamBB, ParamBBB, ParamBBBB, ParamBBD, ParamBBDH, ParamBBW, ParamBD, - }, - mem::Memory, - value::Value, -}; - -/// HoleyBytes Virtual Machine -pub struct Vm<'a, PfHandler, const TIMER_QUOTIENT: usize> { - /// Holds 256 registers - /// - /// Writing to register 0 is considered undefined behaviour - /// in terms of HoleyBytes program execution - pub registers: [Value; 256], - - /// Memory implementation - pub memory: Memory, - - /// Trap handler - pub pfhandler: PfHandler, - - /// Program counter - pub pc: usize, - - /// Program - program: &'a [u8], - - /// Cached program length (without unreachable end) - program_len: usize, - - /// Program timer - timer: usize, -} - -impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize> - Vm<'a, PfHandler, TIMER_QUOTIENT> -{ - /// Create a new VM with program and trap handler - /// - /// # Safety - /// Program code has to be validated - pub unsafe fn new_unchecked(program: &'a [u8], traph: PfHandler) -> Self { - Self { - registers: [Value::from(0_u64); 256], - memory: Default::default(), - pfhandler: traph, - pc: 0, - program_len: program.len() - 12, - program, - timer: 0, - } - } - - /// Create a new VM with program and trap handler only if it passes validation - pub fn new_validated(program: &'a [u8], traph: PfHandler) -> Result { - valider::validate(program)?; - Ok(unsafe { Self::new_unchecked(program, traph) }) - } - - /// Execute program - /// - /// Program can return [`VmRunError`] if a trap handling failed - pub fn run(&mut self) -> Result { - use hbbytecode::opcode::*; - loop { - // Check instruction boundary - if self.pc >= self.program_len { - return Ok(VmRunOk::End); - } - - // Big match - // - // Contribution guide: - // - Zero register shall never be overwitten. It's value has to always be 0. - // - Prefer `Self::read_reg` and `Self::write_reg` functions - // - Extract parameters using `param!` macro - // - Prioritise speed over code size - // - Memory is cheap, CPUs not that much - // - Do not heap allocate at any cost - // - Yes, user-provided trap handler may allocate, - // but that is not our »fault«. - // - Unsafe is kinda must, but be sure you have validated everything - // - Your contributions have to pass sanitizers and Miri - // - Strictly follow the spec - // - The spec does not specify how you perform actions, in what order, - // just that the observable effects have to be performed in order and - // correctly. - // - Yes, we assume you run 64 bit CPU. Else ?conradluget a better CPU - // sorry 8 bit fans, HBVM won't run on your Speccy :( - unsafe { - match *self.program.get_unchecked(self.pc) { - UN => { - self.decode::<()>(); - return Err(VmRunError::Unreachable); - } - NOP => self.decode::<()>(), - ADD => self.binary_op(u64::wrapping_add), - SUB => self.binary_op(u64::wrapping_sub), - MUL => self.binary_op(u64::wrapping_mul), - AND => self.binary_op::(ops::BitAnd::bitand), - OR => self.binary_op::(ops::BitOr::bitor), - XOR => self.binary_op::(ops::BitXor::bitxor), - SL => self.binary_op(|l, r| u64::wrapping_shl(l, r as u32)), - SR => self.binary_op(|l, r| u64::wrapping_shr(l, r as u32)), - SRS => self.binary_op(|l, r| i64::wrapping_shl(l, r as u32)), - CMP => { - // Compare a0 <=> a1 - // < → -1 - // > → 1 - // = → 0 - - let ParamBBB(tg, a0, a1) = self.decode(); - self.write_reg( - tg, - self.read_reg(a0) - .cast::() - .cmp(&self.read_reg(a1).cast::()) - as i64, - ); - } - CMPU => { - // Unsigned comparsion - let ParamBBB(tg, a0, a1) = self.decode(); - self.write_reg( - tg, - self.read_reg(a0) - .cast::() - .cmp(&self.read_reg(a1).cast::()) - as i64, - ); - } - NOT => { - // Logical negation - let ParamBB(tg, a0) = self.decode(); - self.write_reg(tg, !self.read_reg(a0).cast::()); - } - NEG => { - // Bitwise negation - let ParamBB(tg, a0) = self.decode(); - self.write_reg( - tg, - match self.read_reg(a0).cast::() { - 0 => 1_u64, - _ => 0, - }, - ); - } - DIR => { - // Fused Division-Remainder - let ParamBBBB(dt, rt, a0, a1) = self.decode(); - let a0 = self.read_reg(a0).cast::(); - let a1 = self.read_reg(a1).cast::(); - self.write_reg(dt, a0.checked_div(a1).unwrap_or(u64::MAX)); - self.write_reg(rt, a0.checked_rem(a1).unwrap_or(u64::MAX)); - } - ADDI => self.binary_op_imm(u64::wrapping_add), - MULI => self.binary_op_imm(u64::wrapping_sub), - ANDI => self.binary_op_imm::(ops::BitAnd::bitand), - ORI => self.binary_op_imm::(ops::BitOr::bitor), - XORI => self.binary_op_imm::(ops::BitXor::bitxor), - SLI => self.binary_op_ims(u64::wrapping_shl), - SRI => self.binary_op_ims(u64::wrapping_shr), - SRSI => self.binary_op_ims(i64::wrapping_shr), - CMPI => { - let ParamBBD(tg, a0, imm) = self.decode(); - self.write_reg( - tg, - self.read_reg(a0) - .cast::() - .cmp(&Value::from(imm).cast::()) - as i64, - ); - } - CMPUI => { - let ParamBBD(tg, a0, imm) = self.decode(); - self.write_reg(tg, self.read_reg(a0).cast::().cmp(&imm) as i64); - } - CP => { - let ParamBB(tg, a0) = self.decode(); - self.write_reg(tg, self.read_reg(a0)); - } - SWA => { - // Swap registers - let ParamBB(r0, r1) = self.decode(); - match (r0, r1) { - (0, 0) => (), - (dst, 0) | (0, dst) => self.write_reg(dst, 0_u64), - (r0, r1) => { - core::ptr::swap( - self.registers.get_unchecked_mut(usize::from(r0)), - self.registers.get_unchecked_mut(usize::from(r1)), - ); - } - } - } - LI => { - let ParamBD(tg, imm) = self.decode(); - self.write_reg(tg, imm); - } - LD => { - // Load. If loading more than register size, continue on adjecent registers - let ParamBBDH(dst, base, off, count) = self.decode(); - ldst_bound_check(dst, count)?; - - let n: usize = match dst { - 0 => 1, - _ => 0, - }; - - self.memory.load( - self.read_reg(base).cast::() + off + n as u64, - self.registers.as_mut_ptr().add(usize::from(dst) + n).cast(), - usize::from(count).saturating_sub(n), - &mut self.pfhandler, - )?; - } - ST => { - // Store. Same rules apply as to LD - let ParamBBDH(dst, base, off, count) = self.decode(); - ldst_bound_check(dst, count)?; - - self.memory.store( - self.read_reg(base).cast::() + off, - self.registers.as_ptr().add(usize::from(dst)).cast(), - count.into(), - &mut self.pfhandler, - )?; - } - BMC => { - // Block memory copy - let ParamBBD(src, dst, count) = self.decode(); - self.memory.block_copy( - self.read_reg(src).cast::(), - self.read_reg(dst).cast::(), - count as _, - &mut self.pfhandler, - )?; - } - BRC => { - // Block register copy - let ParamBBB(src, dst, count) = self.decode(); - if src.checked_add(count).is_none() || dst.checked_add(count).is_none() { - return Err(VmRunError::RegOutOfBounds); - } - - core::ptr::copy( - self.registers.get_unchecked(usize::from(src)), - self.registers.get_unchecked_mut(usize::from(dst)), - usize::from(count), - ); - } - JAL => { - // Jump and link. Save PC after this instruction to - // specified register and jump to reg + offset. - let ParamBBD(save, reg, offset) = self.decode(); - self.write_reg(save, self.pc as u64); - self.pc = (self.read_reg(reg).cast::() + offset) as usize; - } - // Conditional jumps, jump only to immediates - JEQ => self.cond_jmp::(Ordering::Equal), - JNE => { - let ParamBBD(a0, a1, jt) = self.decode(); - if self.read_reg(a0).cast::() != self.read_reg(a1).cast::() { - self.pc = jt as usize; - } - } - JLT => self.cond_jmp::(Ordering::Less), - JGT => self.cond_jmp::(Ordering::Greater), - JLTU => self.cond_jmp::(Ordering::Less), - JGTU => self.cond_jmp::(Ordering::Greater), - ECALL => { - self.decode::<()>(); - - // So we don't get timer interrupt after ECALL - if TIMER_QUOTIENT != 0 { - self.timer = self.timer.wrapping_add(1); - } - return Ok(VmRunOk::Ecall); - } - ADDF => self.binary_op::(ops::Add::add), - SUBF => self.binary_op::(ops::Sub::sub), - MULF => self.binary_op::(ops::Mul::mul), - DIRF => { - let ParamBBBB(dt, rt, a0, a1) = self.decode(); - let a0 = self.read_reg(a0).cast::(); - let a1 = self.read_reg(a1).cast::(); - self.write_reg(dt, a0 / a1); - self.write_reg(rt, a0 % a1); - } - FMAF => { - let ParamBBBB(dt, a0, a1, a2) = self.decode(); - self.write_reg( - dt, - self.read_reg(a0).cast::() * self.read_reg(a1).cast::() - + self.read_reg(a2).cast::(), - ); - } - NEGF => { - let ParamBB(dt, a0) = self.decode(); - self.write_reg(dt, -self.read_reg(a0).cast::()); - } - ITF => { - let ParamBB(dt, a0) = self.decode(); - self.write_reg(dt, self.read_reg(a0).cast::() as f64); - } - FTI => { - let ParamBB(dt, a0) = self.decode(); - self.write_reg(dt, self.read_reg(a0).cast::() as i64); - } - ADDFI => self.binary_op_imm::(ops::Add::add), - MULFI => self.binary_op_imm::(ops::Mul::mul), - op => return Err(VmRunError::InvalidOpcode(op)), - } - } - - if TIMER_QUOTIENT != 0 { - self.timer = self.timer.wrapping_add(1); - if self.timer % TIMER_QUOTIENT == 0 { - return Ok(VmRunOk::Timer); - } - } - } - } - - /// Decode instruction operands - #[inline] - unsafe fn decode(&mut self) -> T { - let data = self.program.as_ptr().add(self.pc + 1).cast::().read(); - self.pc += 1 + core::mem::size_of::(); - data - } - - /// Perform binary operating over two registers - #[inline] - unsafe fn binary_op(&mut self, op: impl Fn(T, T) -> T) { - let ParamBBB(tg, a0, a1) = self.decode(); - self.write_reg( - tg, - op(self.read_reg(a0).cast::(), self.read_reg(a1).cast::()), - ); - } - - /// Perform binary operation over register and immediate - #[inline] - unsafe fn binary_op_imm(&mut self, op: impl Fn(T, T) -> T) { - let ParamBBD(tg, reg, imm) = self.decode(); - self.write_reg( - tg, - op(self.read_reg(reg).cast::(), Value::from(imm).cast::()), - ); - } - - /// Perform binary operation over register and shift immediate - #[inline] - unsafe fn binary_op_ims(&mut self, op: impl Fn(T, u32) -> T) { - let ParamBBW(tg, reg, imm) = self.decode(); - self.write_reg(tg, op(self.read_reg(reg).cast::(), imm)); - } - - /// Jump at `#3` if ordering on `#0 <=> #1` is equal to expected - #[inline] - unsafe fn cond_jmp(&mut self, expected: Ordering) { - let ParamBBD(a0, a1, ja) = self.decode(); - if self - .read_reg(a0) - .cast::() - .cmp(&self.read_reg(a1).cast::()) - == expected - { - self.pc = ja as usize; - } - } - - /// Read register - #[inline] - unsafe fn read_reg(&self, n: u8) -> Value { - *self.registers.get_unchecked(n as usize) - } - - /// Write a register. - /// Writing to register 0 is no-op. - #[inline] - unsafe fn write_reg(&mut self, n: u8, value: impl Into) { - if n != 0 { - *self.registers.get_unchecked_mut(n as usize) = value.into(); - } - } -} - -/// Load/Store target/source register range bound checking -#[inline] -fn ldst_bound_check(reg: u8, size: u16) -> Result<(), VmRunError> { - if usize::from(reg) * 8 + usize::from(size) > 2048 { - Err(VmRunError::RegOutOfBounds) - } else { - Ok(()) - } -} - -/// Virtual machine halt error -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -#[repr(u8)] -pub enum VmRunError { - /// Tried to execute invalid instruction - InvalidOpcode(u8), - - /// Unhandled load access exception - LoadAccessEx(u64), - - /// Unhandled store access exception - StoreAccessEx(u64), - - /// Register out-of-bounds access - RegOutOfBounds, - - /// Reached unreachable code - Unreachable, -} - -/// Virtual machine halt ok -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum VmRunOk { - /// Program has eached its end - End, - - /// Program was interrupted by a timer - Timer, - - /// Environment call - Ecall, -}