diff --git a/hbbytecode/src/lib.rs b/hbbytecode/src/lib.rs index 45ed510d..2c463ecb 100644 --- a/hbbytecode/src/lib.rs +++ b/hbbytecode/src/lib.rs @@ -1,15 +1,15 @@ #![no_std] -pub type OpR = u8; +type OpR = u8; -pub type OpA = u64; -pub type OpO = u32; -pub type OpP = u16; +type OpA = u64; +type OpO = u32; +type OpP = u16; -pub type OpB = u8; -pub type OpH = u16; -pub type OpW = u32; -pub type OpD = u64; +type OpB = u8; +type OpH = u16; +type OpW = u32; +type OpD = u64; /// # Safety /// Has to be valid to be decoded from bytecode. @@ -38,13 +38,12 @@ define_items! { OpsRRPH (OpR, OpR, OpP, OpH), OpsRRO (OpR, OpR, OpO ), OpsRRP (OpR, OpR, OpP ), + OpsO (OpO, ), + OpsP (OpP, ), + OpsN ( ), } -unsafe impl BytecodeItem for OpA {} -unsafe impl BytecodeItem for OpB {} -unsafe impl BytecodeItem for OpO {} -unsafe impl BytecodeItem for OpP {} -unsafe impl BytecodeItem for () {} +unsafe impl BytecodeItem for u8 {} ::with_builtin_macros::with_builtin! { let $spec = include_from_root!("instructions.in") in { diff --git a/hbvm/src/vmrun.rs b/hbvm/src/vmrun.rs index a3d886f0..e633e24b 100644 --- a/hbvm/src/vmrun.rs +++ b/hbvm/src/vmrun.rs @@ -2,6 +2,8 @@ //! //! Have fun +use hbbytecode::OpsN; + use { super::{ bmc::BlockCopier, @@ -9,14 +11,22 @@ use { value::{Value, ValueVariant}, Vm, VmRunError, VmRunOk, }, - crate::mem::{addr::AddressOp, Address}, - core::{cmp::Ordering, mem::size_of, ops}, + crate::mem::Address, + core::{cmp::Ordering, ops}, hbbytecode::{ - BytecodeItem, OpA, OpO, OpP, OpsRD, OpsRR, OpsRRAH, OpsRRB, OpsRRD, OpsRRH, OpsRRO, - OpsRROH, OpsRRP, OpsRRPH, OpsRRR, OpsRRRR, OpsRRW, + BytecodeItem, OpsO, OpsP, OpsRD, OpsRR, OpsRRAH, OpsRRB, OpsRRD, OpsRRH, OpsRRO, OpsRROH, + OpsRRP, OpsRRPH, OpsRRR, OpsRRRR, OpsRRW, }, }; +macro_rules! handler { + ($self:expr, |$ty:ident ($($ident:pat),* $(,)?)| $expr:expr) => {{ + let $ty($($ident),*) = $self.decode::<$ty>(); + #[allow(clippy::no_effect)] $expr; + $self.bump_pc::<$ty>(); + }}; +} + impl Vm where Mem: Memory, @@ -54,14 +64,14 @@ where .ok_or(VmRunError::ProgramFetchLoadEx(self.pc as _))? { UN => { - self.decode::<()>(); + self.bump_pc::(); return Err(VmRunError::Unreachable); } TX => { - self.decode::<()>(); + self.bump_pc::(); return Ok(VmRunOk::End); } - NOP => self.decode::<()>(), + NOP => handler!(self, |OpsN()| ()), ADD => self.binary_op(u64::wrapping_add), SUB => self.binary_op(u64::wrapping_sub), MUL => self.binary_op(u64::wrapping_mul), @@ -71,13 +81,12 @@ where SL => self.binary_op(|l, r| u64::wrapping_shl(l, r as u32)), SR => self.binary_op(|l, r| u64::wrapping_shr(l, r as u32)), SRS => self.binary_op(|l: u64, r| i64::wrapping_shl(l as i64, r as u32) as u64), - CMP => { + CMP => handler!(self, |OpsRRR(tg, a0, a1)| { // Compare a0 <=> a1 // < → 0 // > → 1 // = → 2 - let OpsRRR(tg, a0, a1) = self.decode(); self.write_reg( tg, self.read_reg(a0) @@ -86,10 +95,9 @@ where as i64 + 1, ); - } - CMPU => { + }), + CMPU => handler!(self, |OpsRRR(tg, a0, a1)| { // Unsigned comparsion - let OpsRRR(tg, a0, a1) = self.decode(); self.write_reg( tg, self.read_reg(a0) @@ -98,25 +106,22 @@ where as i64 + 1, ); - } - NEG => { + }), + NEG => handler!(self, |OpsRR(tg, a0)| { // Bit negation - let OpsRR(tg, a0) = self.decode(); self.write_reg(tg, !self.read_reg(a0).cast::()) - } - NOT => { + }), + NOT => handler!(self, |OpsRR(tg, a0)| { // Logical negation - let OpsRR(tg, a0) = self.decode(); self.write_reg(tg, u64::from(self.read_reg(a0).cast::() == 0)); - } - DIR => { + }), + DIR => handler!(self, |OpsRRRR(dt, rt, a0, a1)| { // Fused Division-Remainder - let OpsRRRR(dt, rt, a0, a1) = self.decode(); let a0 = self.read_reg(a0).cast::(); let a1 = self.read_reg(a1).cast::(); self.write_reg(dt, a0.checked_div(a1).unwrap_or(u64::MAX)); self.write_reg(rt, a0.checked_rem(a1).unwrap_or(u64::MAX)); - } + }), ADDI => self.binary_op_imm(u64::wrapping_add), MULI => self.binary_op_imm(u64::wrapping_sub), ANDI => self.binary_op_imm::(ops::BitAnd::bitand), @@ -125,8 +130,7 @@ where SLI => self.binary_op_ims(u64::wrapping_shl), SRI => self.binary_op_ims(u64::wrapping_shr), SRSI => self.binary_op_ims(i64::wrapping_shr), - CMPI => { - let OpsRRD(tg, a0, imm) = self.decode(); + CMPI => handler!(self, |OpsRRD(tg, a0, imm)| { self.write_reg( tg, self.read_reg(a0) @@ -134,18 +138,15 @@ where .cmp(&Value::from(imm).cast::()) as i64, ); - } - CMPUI => { - let OpsRRD(tg, a0, imm) = self.decode(); + }), + CMPUI => handler!(self, |OpsRRD(tg, a0, imm)| { self.write_reg(tg, self.read_reg(a0).cast::().cmp(&imm) as i64); - } - CP => { - let OpsRR(tg, a0) = self.decode(); + }), + CP => handler!(self, |OpsRR(tg, a0)| { self.write_reg(tg, self.read_reg(a0)); - } - SWA => { + }), + SWA => handler!(self, |OpsRR(r0, r1)| { // Swap registers - let OpsRR(r0, r1) = self.decode(); match (r0, r1) { (0, 0) => (), (dst, 0) | (0, dst) => self.write_reg(dst, 0_u64), @@ -156,36 +157,41 @@ where ); } } - } - LI => { - let OpsRD(tg, imm) = self.decode(); + }), + LI => handler!(self, |OpsRD(tg, imm)| { self.write_reg(tg, imm); - } - LRA => { - let OpsRRO(tg, reg, imm) = self.decode(); - self.write_reg(tg, self.rel_addr(reg, imm).get()); - } - LD => { + }), + LRA => handler!(self, |OpsRRO(tg, reg, imm)| { + self.write_reg( + tg, + (self.pc + self.read_reg(reg).cast::() + imm + 3_u16).get(), + ); + }), + LD => handler!(self, |OpsRRAH(dst, base, off, count)| { // Load. If loading more than register size, continue on adjecent registers - let OpsRRAH(dst, base, off, count) = self.decode(); self.load(dst, base, off, count)?; - } - ST => { + }), + ST => handler!(self, |OpsRRAH(dst, base, off, count)| { // Store. Same rules apply as to LD - let OpsRRAH(dst, base, off, count) = self.decode(); self.store(dst, base, off, count)?; - } - LDR => { - let OpsRROH(dst, base, off, count) = self.decode(); - self.load(dst, base, u64::from(off).wrapping_add(self.pc.get()), count)?; - } - STR => { - let OpsRROH(dst, base, off, count) = self.decode(); - self.store(dst, base, u64::from(off).wrapping_add(self.pc.get()), count)?; - } + }), + LDR => handler!(self, |OpsRROH(dst, base, off, count)| { + self.load( + dst, + base, + u64::from(off).wrapping_add((self.pc + 3_u64).get()), + count, + )?; + }), + STR => handler!(self, |OpsRROH(dst, base, off, count)| { + self.store( + dst, + base, + u64::from(off).wrapping_add((self.pc + 3_u64).get()), + count, + )?; + }), BMC => { - const INS_SIZE: usize = size_of::() + 1; - // Block memory copy match if let Some(copier) = &mut self.copier { // There is some copier, poll. @@ -194,9 +200,6 @@ where // There is none, make one! let OpsRRH(src, dst, count) = self.decode(); - // So we are still on BMC on next cycle - self.pc -= INS_SIZE; - self.copier = Some(BlockCopier::new( Address::new(self.read_reg(src).cast()), Address::new(self.read_reg(dst).cast()), @@ -211,21 +214,19 @@ where // We are done, shift program counter core::task::Poll::Ready(Ok(())) => { self.copier = None; - self.pc += INS_SIZE; + self.bump_pc::(); } // Error, shift program counter (for consistency) // and yield error core::task::Poll::Ready(Err(e)) => { - self.pc += INS_SIZE; return Err(e.into()); } // Not done yet, proceed to next cycle core::task::Poll::Pending => (), } } - BRC => { + BRC => handler!(self, |OpsRRB(src, dst, count)| { // Block register copy - let OpsRRB(src, dst, count) = self.decode(); if src.checked_add(count).is_none() || dst.checked_add(count).is_none() { return Err(VmRunError::RegOutOfBounds); } @@ -235,89 +236,83 @@ where self.registers.get_unchecked_mut(usize::from(dst)), usize::from(count), ); - } - JMP => self.pc = self.pc.wrapping_add(self.decode::()), - JAL => { + }), + JMP => handler!(self, |OpsO(off)| self.pc = self.pc.wrapping_add(off)), + JAL => handler!(self, |OpsRRW(save, reg, offset)| { // Jump and link. Save PC after this instruction to // specified register and jump to reg + offset. - let OpsRRW(save, reg, offset) = self.decode(); self.write_reg(save, self.pc.get()); self.pc = Address::new( self.read_reg(reg).cast::().wrapping_add(offset.into()), ); - } + }), // Conditional jumps, jump only to immediates JEQ => self.cond_jmp::(Ordering::Equal), - JNE => { - let OpsRRP(a0, a1, ja) = self.decode(); + JNE => handler!(self, |OpsRRP(a0, a1, ja)| { if self.read_reg(a0).cast::() != self.read_reg(a1).cast::() { self.pc = Address::new( ((self.pc.get() as i64).wrapping_add(ja as i64)) as u64, ) } - } + }), JLT => self.cond_jmp::(Ordering::Less), JGT => self.cond_jmp::(Ordering::Greater), JLTU => self.cond_jmp::(Ordering::Less), JGTU => self.cond_jmp::(Ordering::Greater), ECA => { - self.decode::<()>(); - // So we don't get timer interrupt after ECALL if TIMER_QUOTIENT != 0 { self.timer = self.timer.wrapping_add(1); } + + self.bump_pc::(); return Ok(VmRunOk::Ecall); } EBP => { - self.decode::<()>(); + self.bump_pc::(); return Ok(VmRunOk::Breakpoint); } ADDF => self.binary_op::(ops::Add::add), SUBF => self.binary_op::(ops::Sub::sub), MULF => self.binary_op::(ops::Mul::mul), - DIRF => { - let OpsRRRR(dt, rt, a0, a1) = self.decode(); + DIRF => handler!(self, |OpsRRRR(dt, rt, a0, a1)| { let a0 = self.read_reg(a0).cast::(); let a1 = self.read_reg(a1).cast::(); self.write_reg(dt, a0 / a1); self.write_reg(rt, a0 % a1); - } - FMAF => { - let OpsRRRR(dt, a0, a1, a2) = self.decode(); + }), + FMAF => handler!(self, |OpsRRRR(dt, a0, a1, a2)| { self.write_reg( dt, self.read_reg(a0).cast::() * self.read_reg(a1).cast::() + self.read_reg(a2).cast::(), ); - } - NEGF => { - let OpsRR(dt, a0) = self.decode(); + }), + NEGF => handler!(self, |OpsRR(dt, a0)| { self.write_reg(dt, -self.read_reg(a0).cast::()); - } - ITF => { - let OpsRR(dt, a0) = self.decode(); + }), + ITF => handler!(self, |OpsRR(dt, a0)| { self.write_reg(dt, self.read_reg(a0).cast::() as f64); - } + }), FTI => { let OpsRR(dt, a0) = self.decode(); self.write_reg(dt, self.read_reg(a0).cast::() as i64); } ADDFI => self.binary_op_imm::(ops::Add::add), MULFI => self.binary_op_imm::(ops::Mul::mul), - LRA16 => { - let OpsRRP(tg, reg, imm) = self.decode(); - self.write_reg(tg, self.rel_addr(reg, imm).get()); - } - LDR16 => { - let OpsRRPH(dst, base, off, count) = self.decode(); + LRA16 => handler!(self, |OpsRRP(tg, reg, imm)| { + self.write_reg( + tg, + (self.pc + self.read_reg(reg).cast::() + imm + 3_u16).get(), + ); + }), + LDR16 => handler!(self, |OpsRRPH(dst, base, off, count)| { self.load(dst, base, u64::from(off).wrapping_add(self.pc.get()), count)?; - } - STR16 => { - let OpsRRPH(dst, base, off, count) = self.decode(); + }), + STR16 => handler!(self, |OpsRRPH(dst, base, off, count)| { self.store(dst, base, u64::from(off).wrapping_add(self.pc.get()), count)?; - } - JMPR16 => self.pc = self.pc.wrapping_add(self.decode::()), + }), + JMPR16 => handler!(self, |OpsP(off)| self.pc = self.pc.wrapping_add(off)), op => return Err(VmRunError::InvalidOpcode(op)), } } @@ -331,13 +326,16 @@ where } } + /// Bump instruction pointer + #[inline(always)] + fn bump_pc(&mut self) { + self.pc = self.pc.wrapping_add(core::mem::size_of::() + 1); + } + /// Decode instruction operands #[inline(always)] unsafe fn decode(&mut self) -> T { - let pc1 = self.pc + 1_u64; - let data = self.memory.prog_read_unchecked::(pc1 as _); - self.pc += 1 + size_of::(); - data + self.memory.prog_read_unchecked::(self.pc + 1_u64) } /// Load @@ -391,6 +389,7 @@ where tg, op(self.read_reg(a0).cast::(), self.read_reg(a1).cast::()), ); + self.bump_pc::(); } /// Perform binary operation over register and immediate @@ -401,6 +400,7 @@ where tg, op(self.read_reg(reg).cast::(), Value::from(imm).cast::()), ); + self.bump_pc::(); } /// Perform binary operation over register and shift immediate @@ -408,14 +408,7 @@ where unsafe fn binary_op_ims(&mut self, op: impl Fn(T, u32) -> T) { let OpsRRW(tg, reg, imm) = self.decode(); self.write_reg(tg, op(self.read_reg(reg).cast::(), imm)); - } - - /// Compute address relative to program counter an register value - #[inline(always)] - fn rel_addr(&self, reg: u8, imm: impl AddressOp) -> Address { - self.pc - .wrapping_add(self.read_reg(reg).cast::()) - .wrapping_add(imm) + self.bump_pc::(); } /// Jump at `PC + #3` if ordering on `#0 <=> #1` is equal to expected @@ -430,6 +423,8 @@ where { self.pc = Address::new(((self.pc.get() as i64).wrapping_add(ja as i64)) as u64); } + + self.bump_pc::(); } /// Read register