diff --git a/hbvm/src/vmrun.rs b/hbvm/src/vmrun.rs index 152a992e..5dac0a69 100644 --- a/hbvm/src/vmrun.rs +++ b/hbvm/src/vmrun.rs @@ -80,17 +80,18 @@ where AND => self.binary_op::(ops::BitAnd::bitand), OR => self.binary_op::(ops::BitOr::bitor), XOR => self.binary_op::(ops::BitXor::bitxor), - SLU8 => self.binary_op::(ops::Shl::shl), - SLU16 => self.binary_op::(ops::Shl::shl), - SLU32 => self.binary_op::(ops::Shl::shl), - SLU64 => self.binary_op::(ops::Shl::shl), - SRU8 => self.binary_op::(ops::Shr::shr), - SRU16 => self.binary_op::(ops::Shr::shr), - SRU32 => self.binary_op::(ops::Shr::shr), - SRS8 => self.binary_op::(ops::Shr::shr), - SRS16 => self.binary_op::(ops::Shr::shr), - SRS32 => self.binary_op::(ops::Shr::shr), - SRS64 => self.binary_op::(ops::Shr::shr), + SLU8 => self.binary_op_shift::(u8::wrapping_shl), + SLU16 => self.binary_op_shift::(u16::wrapping_shl), + SLU32 => self.binary_op_shift::(u32::wrapping_shl), + SLU64 => self.binary_op_shift::(u64::wrapping_shl), + SRU8 => self.binary_op_shift::(u8::wrapping_shr), + SRU16 => self.binary_op_shift::(u16::wrapping_shr), + SRU32 => self.binary_op_shift::(u32::wrapping_shr), + SRU64 => self.binary_op_shift::(u64::wrapping_shr), + SRS8 => self.binary_op_shift::(i8::wrapping_shr), + SRS16 => self.binary_op_shift::(i16::wrapping_shr), + SRS32 => self.binary_op_shift::(i32::wrapping_shr), + SRS64 => self.binary_op_shift::(i64::wrapping_shr), CMPU => handler!(self, |OpsRRR(tg, a0, a1)| self.cmp( tg, a0, @@ -137,18 +138,18 @@ where ANDI => self.binary_op_imm::(ops::BitAnd::bitand), ORI => self.binary_op_imm::(ops::BitOr::bitor), XORI => self.binary_op_imm::(ops::BitXor::bitxor), - SLUI8 => self.binary_op_ims::(ops::Shl::shl), - SLUI16 => self.binary_op_ims::(ops::Shl::shl), - SLUI32 => self.binary_op_ims::(ops::Shl::shl), - SLUI64 => self.binary_op_ims::(ops::Shl::shl), - SRUI8 => self.binary_op_ims::(ops::Shr::shr), - SRUI16 => self.binary_op_ims::(ops::Shr::shr), - SRUI32 => self.binary_op_ims::(ops::Shr::shr), - SRUI64 => self.binary_op_ims::(ops::Shr::shr), - SRSI8 => self.binary_op_ims::(ops::Shr::shr), - SRSI16 => self.binary_op_ims::(ops::Shr::shr), - SRSI32 => self.binary_op_ims::(ops::Shr::shr), - SRSI64 => self.binary_op_ims::(ops::Shr::shr), + SLUI8 => self.binary_op_ims::(u8::wrapping_shl), + SLUI16 => self.binary_op_ims::(u16::wrapping_shl), + SLUI32 => self.binary_op_ims::(u32::wrapping_shl), + SLUI64 => self.binary_op_ims::(u64::wrapping_shl), + SRUI8 => self.binary_op_ims::(u8::wrapping_shr), + SRUI16 => self.binary_op_ims::(u16::wrapping_shr), + SRUI32 => self.binary_op_ims::(u32::wrapping_shr), + SRUI64 => self.binary_op_ims::(u64::wrapping_shr), + SRSI8 => self.binary_op_ims::(i8::wrapping_shr), + SRSI16 => self.binary_op_ims::(i16::wrapping_shr), + SRSI32 => self.binary_op_ims::(i32::wrapping_shr), + SRSI64 => self.binary_op_ims::(i64::wrapping_shr), CMPUI => handler!(self, |OpsRRD(tg, a0, imm)| { self.cmp(tg, a0, imm) }), CMPSI => handler!(self, |OpsRRD(tg, a0, imm)| { self.cmp(tg, a0, imm as i64) }), CP => handler!(self, |OpsRR(tg, a0)| self.write_reg(tg, self.read_reg(a0))), @@ -404,7 +405,7 @@ where .as_mut_ptr() .add(usize::from(dst) + usize::from(n)) .cast(), - usize::from(count).wrapping_sub(n.into()), + usize::from(count).saturating_sub(n.into()), )?; Ok(()) @@ -456,9 +457,23 @@ where /// Perform binary operation over register and shift immediate #[inline(always)] - unsafe fn binary_op_ims(&mut self, op: impl Fn(T, u8) -> T) { + unsafe fn binary_op_shift(&mut self, op: impl Fn(T, u32) -> T) { + let OpsRRR(tg, a0, a1) = self.decode(); + self.write_reg( + tg, + op( + self.read_reg(a0).cast::(), + self.read_reg(a1).cast::(), + ), + ); + self.bump_pc::(); + } + + /// Perform binary operation over register and shift immediate + #[inline(always)] + unsafe fn binary_op_ims(&mut self, op: impl Fn(T, u32) -> T) { let OpsRRB(tg, reg, imm) = self.decode(); - self.write_reg(tg, op(self.read_reg(reg).cast::(), imm)); + self.write_reg(tg, op(self.read_reg(reg).cast::(), imm.into())); self.bump_pc::(); }