1
0
Fork 0
forked from koniifer/ableos

Changed relative addressing

This commit is contained in:
Erin 2023-10-01 16:02:06 +02:00
parent 59be906835
commit 889aefe87a
2 changed files with 117 additions and 123 deletions

View file

@ -1,15 +1,15 @@
#![no_std] #![no_std]
pub type OpR = u8; type OpR = u8;
pub type OpA = u64; type OpA = u64;
pub type OpO = u32; type OpO = u32;
pub type OpP = u16; type OpP = u16;
pub type OpB = u8; type OpB = u8;
pub type OpH = u16; type OpH = u16;
pub type OpW = u32; type OpW = u32;
pub type OpD = u64; type OpD = u64;
/// # Safety /// # Safety
/// Has to be valid to be decoded from bytecode. /// Has to be valid to be decoded from bytecode.
@ -38,13 +38,12 @@ define_items! {
OpsRRPH (OpR, OpR, OpP, OpH), OpsRRPH (OpR, OpR, OpP, OpH),
OpsRRO (OpR, OpR, OpO ), OpsRRO (OpR, OpR, OpO ),
OpsRRP (OpR, OpR, OpP ), OpsRRP (OpR, OpR, OpP ),
OpsO (OpO, ),
OpsP (OpP, ),
OpsN ( ),
} }
unsafe impl BytecodeItem for OpA {} unsafe impl BytecodeItem for u8 {}
unsafe impl BytecodeItem for OpB {}
unsafe impl BytecodeItem for OpO {}
unsafe impl BytecodeItem for OpP {}
unsafe impl BytecodeItem for () {}
::with_builtin_macros::with_builtin! { ::with_builtin_macros::with_builtin! {
let $spec = include_from_root!("instructions.in") in { let $spec = include_from_root!("instructions.in") in {

View file

@ -2,6 +2,8 @@
//! //!
//! Have fun //! Have fun
use hbbytecode::OpsN;
use { use {
super::{ super::{
bmc::BlockCopier, bmc::BlockCopier,
@ -9,14 +11,22 @@ use {
value::{Value, ValueVariant}, value::{Value, ValueVariant},
Vm, VmRunError, VmRunOk, Vm, VmRunError, VmRunOk,
}, },
crate::mem::{addr::AddressOp, Address}, crate::mem::Address,
core::{cmp::Ordering, mem::size_of, ops}, core::{cmp::Ordering, ops},
hbbytecode::{ hbbytecode::{
BytecodeItem, OpA, OpO, OpP, OpsRD, OpsRR, OpsRRAH, OpsRRB, OpsRRD, OpsRRH, OpsRRO, BytecodeItem, OpsO, OpsP, OpsRD, OpsRR, OpsRRAH, OpsRRB, OpsRRD, OpsRRH, OpsRRO, OpsRROH,
OpsRROH, OpsRRP, OpsRRPH, OpsRRR, OpsRRRR, OpsRRW, OpsRRP, OpsRRPH, OpsRRR, OpsRRRR, OpsRRW,
}, },
}; };
macro_rules! handler {
($self:expr, |$ty:ident ($($ident:pat),* $(,)?)| $expr:expr) => {{
let $ty($($ident),*) = $self.decode::<$ty>();
#[allow(clippy::no_effect)] $expr;
$self.bump_pc::<$ty>();
}};
}
impl<Mem, const TIMER_QUOTIENT: usize> Vm<Mem, TIMER_QUOTIENT> impl<Mem, const TIMER_QUOTIENT: usize> Vm<Mem, TIMER_QUOTIENT>
where where
Mem: Memory, Mem: Memory,
@ -54,14 +64,14 @@ where
.ok_or(VmRunError::ProgramFetchLoadEx(self.pc as _))? .ok_or(VmRunError::ProgramFetchLoadEx(self.pc as _))?
{ {
UN => { UN => {
self.decode::<()>(); self.bump_pc::<OpsN>();
return Err(VmRunError::Unreachable); return Err(VmRunError::Unreachable);
} }
TX => { TX => {
self.decode::<()>(); self.bump_pc::<OpsN>();
return Ok(VmRunOk::End); return Ok(VmRunOk::End);
} }
NOP => self.decode::<()>(), NOP => handler!(self, |OpsN()| ()),
ADD => self.binary_op(u64::wrapping_add), ADD => self.binary_op(u64::wrapping_add),
SUB => self.binary_op(u64::wrapping_sub), SUB => self.binary_op(u64::wrapping_sub),
MUL => self.binary_op(u64::wrapping_mul), MUL => self.binary_op(u64::wrapping_mul),
@ -71,13 +81,12 @@ where
SL => self.binary_op(|l, r| u64::wrapping_shl(l, r as u32)), SL => self.binary_op(|l, r| u64::wrapping_shl(l, r as u32)),
SR => self.binary_op(|l, r| u64::wrapping_shr(l, r as u32)), SR => self.binary_op(|l, r| u64::wrapping_shr(l, r as u32)),
SRS => self.binary_op(|l: u64, r| i64::wrapping_shl(l as i64, r as u32) as u64), SRS => self.binary_op(|l: u64, r| i64::wrapping_shl(l as i64, r as u32) as u64),
CMP => { CMP => handler!(self, |OpsRRR(tg, a0, a1)| {
// Compare a0 <=> a1 // Compare a0 <=> a1
// < → 0 // < → 0
// > → 1 // > → 1
// = → 2 // = → 2
let OpsRRR(tg, a0, a1) = self.decode();
self.write_reg( self.write_reg(
tg, tg,
self.read_reg(a0) self.read_reg(a0)
@ -86,10 +95,9 @@ where
as i64 as i64
+ 1, + 1,
); );
} }),
CMPU => { CMPU => handler!(self, |OpsRRR(tg, a0, a1)| {
// Unsigned comparsion // Unsigned comparsion
let OpsRRR(tg, a0, a1) = self.decode();
self.write_reg( self.write_reg(
tg, tg,
self.read_reg(a0) self.read_reg(a0)
@ -98,25 +106,22 @@ where
as i64 as i64
+ 1, + 1,
); );
} }),
NEG => { NEG => handler!(self, |OpsRR(tg, a0)| {
// Bit negation // Bit negation
let OpsRR(tg, a0) = self.decode();
self.write_reg(tg, !self.read_reg(a0).cast::<u64>()) self.write_reg(tg, !self.read_reg(a0).cast::<u64>())
} }),
NOT => { NOT => handler!(self, |OpsRR(tg, a0)| {
// Logical negation // Logical negation
let OpsRR(tg, a0) = self.decode();
self.write_reg(tg, u64::from(self.read_reg(a0).cast::<u64>() == 0)); self.write_reg(tg, u64::from(self.read_reg(a0).cast::<u64>() == 0));
} }),
DIR => { DIR => handler!(self, |OpsRRRR(dt, rt, a0, a1)| {
// Fused Division-Remainder // Fused Division-Remainder
let OpsRRRR(dt, rt, a0, a1) = self.decode();
let a0 = self.read_reg(a0).cast::<u64>(); let a0 = self.read_reg(a0).cast::<u64>();
let a1 = self.read_reg(a1).cast::<u64>(); let a1 = self.read_reg(a1).cast::<u64>();
self.write_reg(dt, a0.checked_div(a1).unwrap_or(u64::MAX)); self.write_reg(dt, a0.checked_div(a1).unwrap_or(u64::MAX));
self.write_reg(rt, a0.checked_rem(a1).unwrap_or(u64::MAX)); self.write_reg(rt, a0.checked_rem(a1).unwrap_or(u64::MAX));
} }),
ADDI => self.binary_op_imm(u64::wrapping_add), ADDI => self.binary_op_imm(u64::wrapping_add),
MULI => self.binary_op_imm(u64::wrapping_sub), MULI => self.binary_op_imm(u64::wrapping_sub),
ANDI => self.binary_op_imm::<u64>(ops::BitAnd::bitand), ANDI => self.binary_op_imm::<u64>(ops::BitAnd::bitand),
@ -125,8 +130,7 @@ where
SLI => self.binary_op_ims(u64::wrapping_shl), SLI => self.binary_op_ims(u64::wrapping_shl),
SRI => self.binary_op_ims(u64::wrapping_shr), SRI => self.binary_op_ims(u64::wrapping_shr),
SRSI => self.binary_op_ims(i64::wrapping_shr), SRSI => self.binary_op_ims(i64::wrapping_shr),
CMPI => { CMPI => handler!(self, |OpsRRD(tg, a0, imm)| {
let OpsRRD(tg, a0, imm) = self.decode();
self.write_reg( self.write_reg(
tg, tg,
self.read_reg(a0) self.read_reg(a0)
@ -134,18 +138,15 @@ where
.cmp(&Value::from(imm).cast::<i64>()) .cmp(&Value::from(imm).cast::<i64>())
as i64, as i64,
); );
} }),
CMPUI => { CMPUI => handler!(self, |OpsRRD(tg, a0, imm)| {
let OpsRRD(tg, a0, imm) = self.decode();
self.write_reg(tg, self.read_reg(a0).cast::<u64>().cmp(&imm) as i64); self.write_reg(tg, self.read_reg(a0).cast::<u64>().cmp(&imm) as i64);
} }),
CP => { CP => handler!(self, |OpsRR(tg, a0)| {
let OpsRR(tg, a0) = self.decode();
self.write_reg(tg, self.read_reg(a0)); self.write_reg(tg, self.read_reg(a0));
} }),
SWA => { SWA => handler!(self, |OpsRR(r0, r1)| {
// Swap registers // Swap registers
let OpsRR(r0, r1) = self.decode();
match (r0, r1) { match (r0, r1) {
(0, 0) => (), (0, 0) => (),
(dst, 0) | (0, dst) => self.write_reg(dst, 0_u64), (dst, 0) | (0, dst) => self.write_reg(dst, 0_u64),
@ -156,36 +157,41 @@ where
); );
} }
} }
} }),
LI => { LI => handler!(self, |OpsRD(tg, imm)| {
let OpsRD(tg, imm) = self.decode();
self.write_reg(tg, imm); self.write_reg(tg, imm);
} }),
LRA => { LRA => handler!(self, |OpsRRO(tg, reg, imm)| {
let OpsRRO(tg, reg, imm) = self.decode(); self.write_reg(
self.write_reg(tg, self.rel_addr(reg, imm).get()); tg,
} (self.pc + self.read_reg(reg).cast::<u64>() + imm + 3_u16).get(),
LD => { );
}),
LD => handler!(self, |OpsRRAH(dst, base, off, count)| {
// Load. If loading more than register size, continue on adjecent registers // Load. If loading more than register size, continue on adjecent registers
let OpsRRAH(dst, base, off, count) = self.decode();
self.load(dst, base, off, count)?; self.load(dst, base, off, count)?;
} }),
ST => { ST => handler!(self, |OpsRRAH(dst, base, off, count)| {
// Store. Same rules apply as to LD // Store. Same rules apply as to LD
let OpsRRAH(dst, base, off, count) = self.decode();
self.store(dst, base, off, count)?; self.store(dst, base, off, count)?;
} }),
LDR => { LDR => handler!(self, |OpsRROH(dst, base, off, count)| {
let OpsRROH(dst, base, off, count) = self.decode(); self.load(
self.load(dst, base, u64::from(off).wrapping_add(self.pc.get()), count)?; dst,
} base,
STR => { u64::from(off).wrapping_add((self.pc + 3_u64).get()),
let OpsRROH(dst, base, off, count) = self.decode(); count,
self.store(dst, base, u64::from(off).wrapping_add(self.pc.get()), count)?; )?;
} }),
STR => handler!(self, |OpsRROH(dst, base, off, count)| {
self.store(
dst,
base,
u64::from(off).wrapping_add((self.pc + 3_u64).get()),
count,
)?;
}),
BMC => { BMC => {
const INS_SIZE: usize = size_of::<OpsRRH>() + 1;
// Block memory copy // Block memory copy
match if let Some(copier) = &mut self.copier { match if let Some(copier) = &mut self.copier {
// There is some copier, poll. // There is some copier, poll.
@ -194,9 +200,6 @@ where
// There is none, make one! // There is none, make one!
let OpsRRH(src, dst, count) = self.decode(); let OpsRRH(src, dst, count) = self.decode();
// So we are still on BMC on next cycle
self.pc -= INS_SIZE;
self.copier = Some(BlockCopier::new( self.copier = Some(BlockCopier::new(
Address::new(self.read_reg(src).cast()), Address::new(self.read_reg(src).cast()),
Address::new(self.read_reg(dst).cast()), Address::new(self.read_reg(dst).cast()),
@ -211,21 +214,19 @@ where
// We are done, shift program counter // We are done, shift program counter
core::task::Poll::Ready(Ok(())) => { core::task::Poll::Ready(Ok(())) => {
self.copier = None; self.copier = None;
self.pc += INS_SIZE; self.bump_pc::<OpsRRH>();
} }
// Error, shift program counter (for consistency) // Error, shift program counter (for consistency)
// and yield error // and yield error
core::task::Poll::Ready(Err(e)) => { core::task::Poll::Ready(Err(e)) => {
self.pc += INS_SIZE;
return Err(e.into()); return Err(e.into());
} }
// Not done yet, proceed to next cycle // Not done yet, proceed to next cycle
core::task::Poll::Pending => (), core::task::Poll::Pending => (),
} }
} }
BRC => { BRC => handler!(self, |OpsRRB(src, dst, count)| {
// Block register copy // Block register copy
let OpsRRB(src, dst, count) = self.decode();
if src.checked_add(count).is_none() || dst.checked_add(count).is_none() { if src.checked_add(count).is_none() || dst.checked_add(count).is_none() {
return Err(VmRunError::RegOutOfBounds); return Err(VmRunError::RegOutOfBounds);
} }
@ -235,89 +236,83 @@ where
self.registers.get_unchecked_mut(usize::from(dst)), self.registers.get_unchecked_mut(usize::from(dst)),
usize::from(count), usize::from(count),
); );
} }),
JMP => self.pc = self.pc.wrapping_add(self.decode::<OpO>()), JMP => handler!(self, |OpsO(off)| self.pc = self.pc.wrapping_add(off)),
JAL => { JAL => handler!(self, |OpsRRW(save, reg, offset)| {
// Jump and link. Save PC after this instruction to // Jump and link. Save PC after this instruction to
// specified register and jump to reg + offset. // specified register and jump to reg + offset.
let OpsRRW(save, reg, offset) = self.decode();
self.write_reg(save, self.pc.get()); self.write_reg(save, self.pc.get());
self.pc = Address::new( self.pc = Address::new(
self.read_reg(reg).cast::<u64>().wrapping_add(offset.into()), self.read_reg(reg).cast::<u64>().wrapping_add(offset.into()),
); );
} }),
// Conditional jumps, jump only to immediates // Conditional jumps, jump only to immediates
JEQ => self.cond_jmp::<u64>(Ordering::Equal), JEQ => self.cond_jmp::<u64>(Ordering::Equal),
JNE => { JNE => handler!(self, |OpsRRP(a0, a1, ja)| {
let OpsRRP(a0, a1, ja) = self.decode();
if self.read_reg(a0).cast::<u64>() != self.read_reg(a1).cast::<u64>() { if self.read_reg(a0).cast::<u64>() != self.read_reg(a1).cast::<u64>() {
self.pc = Address::new( self.pc = Address::new(
((self.pc.get() as i64).wrapping_add(ja as i64)) as u64, ((self.pc.get() as i64).wrapping_add(ja as i64)) as u64,
) )
} }
} }),
JLT => self.cond_jmp::<u64>(Ordering::Less), JLT => self.cond_jmp::<u64>(Ordering::Less),
JGT => self.cond_jmp::<u64>(Ordering::Greater), JGT => self.cond_jmp::<u64>(Ordering::Greater),
JLTU => self.cond_jmp::<i64>(Ordering::Less), JLTU => self.cond_jmp::<i64>(Ordering::Less),
JGTU => self.cond_jmp::<i64>(Ordering::Greater), JGTU => self.cond_jmp::<i64>(Ordering::Greater),
ECA => { ECA => {
self.decode::<()>();
// So we don't get timer interrupt after ECALL // So we don't get timer interrupt after ECALL
if TIMER_QUOTIENT != 0 { if TIMER_QUOTIENT != 0 {
self.timer = self.timer.wrapping_add(1); self.timer = self.timer.wrapping_add(1);
} }
self.bump_pc::<OpsN>();
return Ok(VmRunOk::Ecall); return Ok(VmRunOk::Ecall);
} }
EBP => { EBP => {
self.decode::<()>(); self.bump_pc::<OpsN>();
return Ok(VmRunOk::Breakpoint); return Ok(VmRunOk::Breakpoint);
} }
ADDF => self.binary_op::<f64>(ops::Add::add), ADDF => self.binary_op::<f64>(ops::Add::add),
SUBF => self.binary_op::<f64>(ops::Sub::sub), SUBF => self.binary_op::<f64>(ops::Sub::sub),
MULF => self.binary_op::<f64>(ops::Mul::mul), MULF => self.binary_op::<f64>(ops::Mul::mul),
DIRF => { DIRF => handler!(self, |OpsRRRR(dt, rt, a0, a1)| {
let OpsRRRR(dt, rt, a0, a1) = self.decode();
let a0 = self.read_reg(a0).cast::<f64>(); let a0 = self.read_reg(a0).cast::<f64>();
let a1 = self.read_reg(a1).cast::<f64>(); let a1 = self.read_reg(a1).cast::<f64>();
self.write_reg(dt, a0 / a1); self.write_reg(dt, a0 / a1);
self.write_reg(rt, a0 % a1); self.write_reg(rt, a0 % a1);
} }),
FMAF => { FMAF => handler!(self, |OpsRRRR(dt, a0, a1, a2)| {
let OpsRRRR(dt, a0, a1, a2) = self.decode();
self.write_reg( self.write_reg(
dt, dt,
self.read_reg(a0).cast::<f64>() * self.read_reg(a1).cast::<f64>() self.read_reg(a0).cast::<f64>() * self.read_reg(a1).cast::<f64>()
+ self.read_reg(a2).cast::<f64>(), + self.read_reg(a2).cast::<f64>(),
); );
} }),
NEGF => { NEGF => handler!(self, |OpsRR(dt, a0)| {
let OpsRR(dt, a0) = self.decode();
self.write_reg(dt, -self.read_reg(a0).cast::<f64>()); self.write_reg(dt, -self.read_reg(a0).cast::<f64>());
} }),
ITF => { ITF => handler!(self, |OpsRR(dt, a0)| {
let OpsRR(dt, a0) = self.decode();
self.write_reg(dt, self.read_reg(a0).cast::<i64>() as f64); self.write_reg(dt, self.read_reg(a0).cast::<i64>() as f64);
} }),
FTI => { FTI => {
let OpsRR(dt, a0) = self.decode(); let OpsRR(dt, a0) = self.decode();
self.write_reg(dt, self.read_reg(a0).cast::<f64>() as i64); self.write_reg(dt, self.read_reg(a0).cast::<f64>() as i64);
} }
ADDFI => self.binary_op_imm::<f64>(ops::Add::add), ADDFI => self.binary_op_imm::<f64>(ops::Add::add),
MULFI => self.binary_op_imm::<f64>(ops::Mul::mul), MULFI => self.binary_op_imm::<f64>(ops::Mul::mul),
LRA16 => { LRA16 => handler!(self, |OpsRRP(tg, reg, imm)| {
let OpsRRP(tg, reg, imm) = self.decode(); self.write_reg(
self.write_reg(tg, self.rel_addr(reg, imm).get()); tg,
} (self.pc + self.read_reg(reg).cast::<u64>() + imm + 3_u16).get(),
LDR16 => { );
let OpsRRPH(dst, base, off, count) = self.decode(); }),
LDR16 => handler!(self, |OpsRRPH(dst, base, off, count)| {
self.load(dst, base, u64::from(off).wrapping_add(self.pc.get()), count)?; self.load(dst, base, u64::from(off).wrapping_add(self.pc.get()), count)?;
} }),
STR16 => { STR16 => handler!(self, |OpsRRPH(dst, base, off, count)| {
let OpsRRPH(dst, base, off, count) = self.decode();
self.store(dst, base, u64::from(off).wrapping_add(self.pc.get()), count)?; self.store(dst, base, u64::from(off).wrapping_add(self.pc.get()), count)?;
} }),
JMPR16 => self.pc = self.pc.wrapping_add(self.decode::<OpP>()), JMPR16 => handler!(self, |OpsP(off)| self.pc = self.pc.wrapping_add(off)),
op => return Err(VmRunError::InvalidOpcode(op)), op => return Err(VmRunError::InvalidOpcode(op)),
} }
} }
@ -331,13 +326,16 @@ where
} }
} }
/// Bump instruction pointer
#[inline(always)]
fn bump_pc<T: BytecodeItem>(&mut self) {
self.pc = self.pc.wrapping_add(core::mem::size_of::<T>() + 1);
}
/// Decode instruction operands /// Decode instruction operands
#[inline(always)] #[inline(always)]
unsafe fn decode<T: BytecodeItem>(&mut self) -> T { unsafe fn decode<T: BytecodeItem>(&mut self) -> T {
let pc1 = self.pc + 1_u64; self.memory.prog_read_unchecked::<T>(self.pc + 1_u64)
let data = self.memory.prog_read_unchecked::<T>(pc1 as _);
self.pc += 1 + size_of::<T>();
data
} }
/// Load /// Load
@ -391,6 +389,7 @@ where
tg, tg,
op(self.read_reg(a0).cast::<T>(), self.read_reg(a1).cast::<T>()), op(self.read_reg(a0).cast::<T>(), self.read_reg(a1).cast::<T>()),
); );
self.bump_pc::<OpsRRR>();
} }
/// Perform binary operation over register and immediate /// Perform binary operation over register and immediate
@ -401,6 +400,7 @@ where
tg, tg,
op(self.read_reg(reg).cast::<T>(), Value::from(imm).cast::<T>()), op(self.read_reg(reg).cast::<T>(), Value::from(imm).cast::<T>()),
); );
self.bump_pc::<OpsRRD>();
} }
/// Perform binary operation over register and shift immediate /// Perform binary operation over register and shift immediate
@ -408,14 +408,7 @@ where
unsafe fn binary_op_ims<T: ValueVariant>(&mut self, op: impl Fn(T, u32) -> T) { unsafe fn binary_op_ims<T: ValueVariant>(&mut self, op: impl Fn(T, u32) -> T) {
let OpsRRW(tg, reg, imm) = self.decode(); let OpsRRW(tg, reg, imm) = self.decode();
self.write_reg(tg, op(self.read_reg(reg).cast::<T>(), imm)); self.write_reg(tg, op(self.read_reg(reg).cast::<T>(), imm));
} self.bump_pc::<OpsRRW>();
/// Compute address relative to program counter an register value
#[inline(always)]
fn rel_addr(&self, reg: u8, imm: impl AddressOp) -> Address {
self.pc
.wrapping_add(self.read_reg(reg).cast::<u64>())
.wrapping_add(imm)
} }
/// Jump at `PC + #3` if ordering on `#0 <=> #1` is equal to expected /// Jump at `PC + #3` if ordering on `#0 <=> #1` is equal to expected
@ -430,6 +423,8 @@ where
{ {
self.pc = Address::new(((self.pc.get() as i64).wrapping_add(ja as i64)) as u64); self.pc = Address::new(((self.pc.get() as i64).wrapping_add(ja as i64)) as u64);
} }
self.bump_pc::<OpsRRP>();
} }
/// Read register /// Read register