Removed some macros

This commit is contained in:
Erin 2023-07-24 18:48:42 +02:00
parent fce3fa5210
commit 193be0bd5a
10 changed files with 238 additions and 197 deletions

19
Cargo.lock generated
View file

@ -130,9 +130,16 @@ dependencies = [
"hbbytecode", "hbbytecode",
"log", "log",
"paste", "paste",
"sealed",
"static_assertions", "static_assertions",
] ]
[[package]]
name = "heck"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]] [[package]]
name = "lasso" name = "lasso"
version = "0.7.2" version = "0.7.2"
@ -246,6 +253,18 @@ dependencies = [
"semver", "semver",
] ]
[[package]]
name = "sealed"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4a8caec23b7800fb97971a1c6ae365b6239aaeddfb934d6265f8505e795699d"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn 2.0.25",
]
[[package]] [[package]]
name = "semver" name = "semver"
version = "1.0.17" version = "1.0.17"

View file

@ -27,8 +27,10 @@ macros::impl_both!(
bbdh(p0: R, p1: R, p2: I, p3: u16) bbdh(p0: R, p1: R, p2: I, p3: u16)
=> [LD, ST], => [LD, ST],
bbd(p0: R, p1: R, p2: I) bbd(p0: R, p1: R, p2: I)
=> [ADDI, MULI, ANDI, ORI, XORI, SLI, SRI, SRSI, CMPI, CMPUI, => [ADDI, MULI, ANDI, ORI, XORI, CMPI, CMPUI, BMC, JAL, JEQ, JNE, JLT, JGT, JLTU,
BMC, JAL, JEQ, JNE, JLT, JGT, JLTU, JGTU, ADDFI, MULFI], JGTU, ADDFI, MULFI],
bbw(p0: R, p1: R, p2: u32)
=> [SLI, SRI, SRSI],
bb(p0: R, p1: R) bb(p0: R, p1: R)
=> [NEG, NOT, CP, SWA, NEGF, ITF, FTI], => [NEG, NOT, CP, SWA, NEGF, ITF, FTI],
bd(p0: R, p1: I) bd(p0: R, p1: I)

View file

@ -215,6 +215,10 @@ macro_rules! extract_pat {
}; };
} }
/// Generate extract macro
macro_rules! gen_extract {
// Integer types have same body
($($int:ident),* $(,)?) => {
/// Extract operand from code /// Extract operand from code
macro_rules! extract { macro_rules! extract {
// Register (require prefixing with r) // Register (require prefixing with r)
@ -233,19 +237,17 @@ macro_rules! extract {
}; };
}; };
// Get u8, if not fitting, the token is claimed invalid // Get $int, if not fitting, the token is claimed invalid
($self:expr, u8, $id:ident) => { $(($self:expr, $int, $id:ident) => {
extract_pat!($self, Token::Integer($id)); extract_pat!($self, Token::Integer($id));
let $id = u8::try_from($id).map_err(|_| ErrorKind::InvalidToken)?; let $id = $int::try_from($id).map_err(|_| ErrorKind::InvalidToken)?;
}; });*;
}
// Get u16, if not fitting, the token is claimed invalid
($self:expr, u16, $id:ident) => {
extract_pat!($self, Token::Integer($id));
let $id = u16::try_from($id).map_err(|_| ErrorKind::InvalidToken)?;
}; };
} }
gen_extract!(u8, u16, u32);
/// Parameter extract incremental token-tree muncher /// Parameter extract incremental token-tree muncher
/// ///
/// What else would it mean? /// What else would it mean?

View file

@ -44,6 +44,11 @@ struct hbbc_ParamBBD
typedef hbbc_ParamBBD; typedef hbbc_ParamBBD;
static_assert(sizeof(hbbc_ParamBBD) == 80 / 8); static_assert(sizeof(hbbc_ParamBBD) == 80 / 8);
struct hbbc_ParamBBW
{ uint8_t _0; uint8_t _1; uint32_t _2; }
typedef hbbc_ParamBBW;
static_assert(sizeof(hbbc_ParamBBW) == 48 / 8);
struct hbbc_ParamBB struct hbbc_ParamBB
{ uint8_t _0; uint8_t _1; } { uint8_t _0; uint8_t _1; }
typedef hbbc_ParamBB; typedef hbbc_ParamBB;

View file

@ -41,9 +41,9 @@ constmod!(pub opcode(u8) {
ANDI = 18, "BBD; #0 ← #1 & imm #2"; ANDI = 18, "BBD; #0 ← #1 & imm #2";
ORI = 19, "BBD; #0 ← #1 | imm #2"; ORI = 19, "BBD; #0 ← #1 | imm #2";
XORI = 20, "BBD; #0 ← #1 ^ imm #2"; XORI = 20, "BBD; #0 ← #1 ^ imm #2";
SLI = 21, "BBD; #0 ← #1 « imm #2"; SLI = 21, "BBW; #0 ← #1 « imm #2";
SRI = 22, "BBD; #0 ← #1 » imm #2"; SRI = 22, "BBW; #0 ← #1 » imm #2";
SRSI = 23, "BBD; #0 ← #1 » imm #2 (signed)"; SRSI = 23, "BBW; #0 ← #1 » imm #2 (signed)";
CMPI = 24, "BBD; #0 ← #1 <=> imm #2"; CMPI = 24, "BBD; #0 ← #1 <=> imm #2";
CMPUI = 25, "BBD; #0 ← #1 <=> imm #2 (unsigned)"; CMPUI = 25, "BBD; #0 ← #1 <=> imm #2 (unsigned)";
@ -89,6 +89,9 @@ pub struct ParamBBDH(pub u8, pub u8, pub u64, pub u16);
#[repr(packed)] #[repr(packed)]
pub struct ParamBBD(pub u8, pub u8, pub u64); pub struct ParamBBD(pub u8, pub u8, pub u64);
#[repr(packed)]
pub struct ParamBBW(pub u8, pub u8, pub u32);
#[repr(packed)] #[repr(packed)]
pub struct ParamBB(pub u8, pub u8); pub struct ParamBB(pub u8, pub u8);
@ -102,6 +105,7 @@ unsafe impl OpParam for ParamBBBB {}
unsafe impl OpParam for ParamBBB {} unsafe impl OpParam for ParamBBB {}
unsafe impl OpParam for ParamBBDH {} unsafe impl OpParam for ParamBBDH {}
unsafe impl OpParam for ParamBBD {} unsafe impl OpParam for ParamBBD {}
unsafe impl OpParam for ParamBBW {}
unsafe impl OpParam for ParamBB {} unsafe impl OpParam for ParamBB {}
unsafe impl OpParam for ParamBD {} unsafe impl OpParam for ParamBD {}
unsafe impl OpParam for u64 {} unsafe impl OpParam for u64 {}

View file

@ -13,4 +13,5 @@ hashbrown = "0.13"
hbbytecode.path = "../hbbytecode" hbbytecode.path = "../hbbytecode"
log = "0.4" log = "0.4"
paste = "1.0" paste = "1.0"
sealed = "0.5"
static_assertions = "1.0" static_assertions = "1.0"

View file

@ -69,7 +69,8 @@ pub fn validate(mut program: &[u8]) -> Result<(), Error> {
| [ADD..=CMPU | BRC | ADDF..=MULF, _, _, _, rest @ ..] | [ADD..=CMPU | BRC | ADDF..=MULF, _, _, _, rest @ ..]
| [NEG..=NOT | CP..=SWA | NEGF..=FTI, _, _, rest @ ..] | [NEG..=NOT | CP..=SWA | NEGF..=FTI, _, _, rest @ ..]
| [LI, _, _, _, _, _, _, _, _, _, rest @ ..] | [LI, _, _, _, _, _, _, _, _, _, rest @ ..]
| [ADDI..=CMPUI | BMC | JAL..=JGTU | ADDFI..=MULFI, _, _, _, _, _, _, _, _, _, _, rest @ ..] | [ADDI..=XORI | CMPI..=CMPUI | BMC | JAL..=JGTU | ADDFI..=MULFI, _, _, _, _, _, _, _, _, _, _, rest @ ..]
| [SLI..=SRSI, _, _, _, _, rest @ ..]
| [LD..=ST, _, _, _, _, _, _, _, _, _, _, _, _, rest @ ..] => rest, | [LD..=ST, _, _, _, _, _, _, _, _, _, _, _, _, rest @ ..] => rest,
_ => { _ => {
return Err(Error { return Err(Error {

View file

@ -7,91 +7,18 @@
// - Instructions have to be valid as specified (values and sizes) // - Instructions have to be valid as specified (values and sizes)
// - Mapped pages should be at least 4 KiB // - Mapped pages should be at least 4 KiB
use self::mem::HandlePageFault;
pub mod mem; pub mod mem;
pub mod value; pub mod value;
use { use {
self::{mem::HandlePageFault, value::ValueVariant},
crate::validate, crate::validate,
core::ops, core::{cmp::Ordering, ops},
hbbytecode::{OpParam, ParamBB, ParamBBB, ParamBBBB, ParamBBD, ParamBBDH, ParamBD}, hbbytecode::{OpParam, ParamBB, ParamBBB, ParamBBBB, ParamBBD, ParamBBDH, ParamBBW, ParamBD},
mem::Memory, mem::Memory,
static_assertions::assert_impl_one,
value::Value, value::Value,
}; };
/// Extract a parameter from program
macro_rules! param {
($self:expr, $ty:ty) => {{
assert_impl_one!($ty: OpParam);
let data = $self
.program
.as_ptr()
.add($self.pc + 1)
.cast::<$ty>()
.read();
$self.pc += 1 + core::mem::size_of::<$ty>();
data
}};
}
/// Perform binary operation `#0 ← #1 OP #2`
macro_rules! binary_op {
($self:expr, $ty:ident, $handler:expr) => {{
let ParamBBB(tg, a0, a1) = param!($self, ParamBBB);
$self.write_reg(
tg,
$handler(
Value::$ty(&$self.read_reg(a0)),
Value::$ty(&$self.read_reg(a1)),
),
);
}};
($self:expr, $ty:ident, $handler:expr, $con:ty) => {{
let ParamBBB(tg, a0, a1) = param!($self, ParamBBB);
$self.write_reg(
tg,
$handler(
Value::$ty(&$self.read_reg(a0)),
Value::$ty(&$self.read_reg(a1)) as $con,
),
);
}};
}
/// Perform binary operation with immediate `#0 ← #1 OP imm #2`
macro_rules! binary_op_imm {
($self:expr, $ty:ident, $handler:expr) => {{
let ParamBBD(tg, a0, imm) = param!($self, ParamBBD);
$self.write_reg(
tg,
$handler(Value::$ty(&$self.read_reg(a0)), Value::$ty(&imm.into())),
);
}};
($self:expr, $ty:ident, $handler:expr, $con:ty) => {{
let ParamBBD(tg, a0, imm) = param!($self, ParamBBD);
$self.write_reg(
tg,
$handler(Value::$ty(&$self.read_reg(a0)), Value::$ty(&imm.into()) as $con),
);
}};
}
/// Jump at `#3` if ordering on `#0 <=> #1` is equal to expected
macro_rules! cond_jump {
($self:expr, $ty:ident, $expected:ident) => {{
let ParamBBD(a0, a1, jt) = param!($self, ParamBBD);
if core::cmp::Ord::cmp(&$self.read_reg(a0).as_u64(), &$self.read_reg(a1).as_u64())
== core::cmp::Ordering::$expected
{
$self.pc = jt as usize;
}
}};
}
/// HoleyBytes Virtual Machine /// HoleyBytes Virtual Machine
pub struct Vm<'a, PfHandler, const TIMER_QUOTIENT: usize> { pub struct Vm<'a, PfHandler, const TIMER_QUOTIENT: usize> {
/// Holds 256 registers /// Holds 256 registers
@ -107,7 +34,7 @@ pub struct Vm<'a, PfHandler, const TIMER_QUOTIENT: usize> {
pub pfhandler: PfHandler, pub pfhandler: PfHandler,
/// Program counter /// Program counter
pc: usize, pub pc: usize,
/// Program /// Program
program: &'a [u8], program: &'a [u8],
@ -177,50 +104,56 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
unsafe { unsafe {
match *self.program.get_unchecked(self.pc) { match *self.program.get_unchecked(self.pc) {
UN => { UN => {
param!(self, ()); self.decode::<()>();
return Err(VmRunError::Unreachable); return Err(VmRunError::Unreachable);
} }
NOP => param!(self, ()), NOP => self.decode::<()>(),
ADD => binary_op!(self, as_u64, u64::wrapping_add), ADD => self.binary_op(u64::wrapping_add),
SUB => binary_op!(self, as_u64, u64::wrapping_sub), SUB => self.binary_op(u64::wrapping_sub),
MUL => binary_op!(self, as_u64, u64::wrapping_mul), MUL => self.binary_op(u64::wrapping_mul),
AND => binary_op!(self, as_u64, ops::BitAnd::bitand), AND => self.binary_op::<u64>(ops::BitAnd::bitand),
OR => binary_op!(self, as_u64, ops::BitOr::bitor), OR => self.binary_op::<u64>(ops::BitOr::bitor),
XOR => binary_op!(self, as_u64, ops::BitXor::bitxor), XOR => self.binary_op::<u64>(ops::BitXor::bitxor),
SL => binary_op!(self, as_u64, u64::wrapping_shl, u32), SL => self.binary_op(|l, r| u64::wrapping_shl(l, r as u32)),
SR => binary_op!(self, as_u64, u64::wrapping_shr, u32), SR => self.binary_op(|l, r| u64::wrapping_shr(l, r as u32)),
SRS => binary_op!(self, as_i64, i64::wrapping_shr, u32), SRS => self.binary_op(|l, r| i64::wrapping_shl(l, r as u32)),
CMP => { CMP => {
// Compare a0 <=> a1 // Compare a0 <=> a1
// < → -1 // < → -1
// > → 1 // > → 1
// = → 0 // = → 0
let ParamBBB(tg, a0, a1) = param!(self, ParamBBB); let ParamBBB(tg, a0, a1) = self.decode();
self.write_reg( self.write_reg(
tg, tg,
self.read_reg(a0).as_i64().cmp(&self.read_reg(a1).as_i64()) as i64, self.read_reg(a0)
.cast::<i64>()
.cmp(&self.read_reg(a1).cast::<i64>())
as i64,
); );
} }
CMPU => { CMPU => {
// Unsigned comparsion // Unsigned comparsion
let ParamBBB(tg, a0, a1) = param!(self, ParamBBB); let ParamBBB(tg, a0, a1) = self.decode();
self.write_reg( self.write_reg(
tg, tg,
self.read_reg(a0).as_u64().cmp(&self.read_reg(a1).as_u64()) as i64, self.read_reg(a0)
.cast::<u64>()
.cmp(&self.read_reg(a1).cast::<u64>())
as i64,
); );
} }
NOT => { NOT => {
// Logical negation // Logical negation
let param = param!(self, ParamBB); let ParamBB(tg, a0) = self.decode();
self.write_reg(param.0, !self.read_reg(param.1).as_u64()); self.write_reg(tg, !self.read_reg(a0).cast::<u64>());
} }
NEG => { NEG => {
// Bitwise negation // Bitwise negation
let param = param!(self, ParamBB); let ParamBB(tg, a0) = self.decode();
self.write_reg( self.write_reg(
param.0, tg,
match self.read_reg(param.1).as_u64() { match self.read_reg(a0).cast::<u64>() {
0 => 1_u64, 0 => 1_u64,
_ => 0, _ => 0,
}, },
@ -228,38 +161,41 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
} }
DIR => { DIR => {
// Fused Division-Remainder // Fused Division-Remainder
let ParamBBBB(dt, rt, a0, a1) = param!(self, ParamBBBB); let ParamBBBB(dt, rt, a0, a1) = self.decode();
let a0 = self.read_reg(a0).as_u64(); let a0 = self.read_reg(a0).cast::<u64>();
let a1 = self.read_reg(a1).as_u64(); let a1 = self.read_reg(a1).cast::<u64>();
self.write_reg(dt, a0.checked_div(a1).unwrap_or(u64::MAX)); self.write_reg(dt, a0.checked_div(a1).unwrap_or(u64::MAX));
self.write_reg(rt, a0.checked_rem(a1).unwrap_or(u64::MAX)); self.write_reg(rt, a0.checked_rem(a1).unwrap_or(u64::MAX));
} }
ADDI => binary_op_imm!(self, as_u64, ops::Add::add), ADDI => self.binary_op_imm(u64::wrapping_add),
MULI => binary_op_imm!(self, as_u64, ops::Mul::mul), MULI => self.binary_op_imm(u64::wrapping_sub),
ANDI => binary_op_imm!(self, as_u64, ops::BitAnd::bitand), ANDI => self.binary_op_imm::<u64>(ops::BitAnd::bitand),
ORI => binary_op_imm!(self, as_u64, ops::BitOr::bitor), ORI => self.binary_op_imm::<u64>(ops::BitOr::bitor),
XORI => binary_op_imm!(self, as_u64, ops::BitXor::bitxor), XORI => self.binary_op_imm::<u64>(ops::BitXor::bitxor),
SLI => binary_op_imm!(self, as_u64, u64::wrapping_shl, u32), SLI => self.binary_op_ims(u64::wrapping_shl),
SRI => binary_op_imm!(self, as_u64, u64::wrapping_shr, u32), SRI => self.binary_op_ims(u64::wrapping_shr),
SRSI => binary_op_imm!(self, as_i64, i64::wrapping_shr, u32), SRSI => self.binary_op_ims(i64::wrapping_shr),
CMPI => { CMPI => {
let ParamBBD(tg, a0, imm) = param!(self, ParamBBD); let ParamBBD(tg, a0, imm) = self.decode();
self.write_reg( self.write_reg(
tg, tg,
self.read_reg(a0).as_i64().cmp(&Value::from(imm).as_i64()) as i64, self.read_reg(a0)
.cast::<i64>()
.cmp(&Value::from(imm).cast::<i64>())
as i64,
); );
} }
CMPUI => { CMPUI => {
let ParamBBD(tg, a0, imm) = param!(self, ParamBBD); let ParamBBD(tg, a0, imm) = self.decode();
self.write_reg(tg, self.read_reg(a0).as_u64().cmp(&imm) as i64); self.write_reg(tg, self.read_reg(a0).cast::<u64>().cmp(&imm) as i64);
} }
CP => { CP => {
let param = param!(self, ParamBB); let ParamBB(tg, a0) = self.decode();
self.write_reg(param.0, self.read_reg(param.1)); self.write_reg(tg, self.read_reg(a0));
} }
SWA => { SWA => {
// Swap registers // Swap registers
let ParamBB(r0, r1) = param!(self, ParamBB); let ParamBB(r0, r1) = self.decode();
match (r0, r1) { match (r0, r1) {
(0, 0) => (), (0, 0) => (),
(dst, 0) | (0, dst) => self.write_reg(dst, 0_u64), (dst, 0) | (0, dst) => self.write_reg(dst, 0_u64),
@ -272,19 +208,19 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
} }
} }
LI => { LI => {
let param = param!(self, ParamBD); let ParamBD(tg, imm) = self.decode();
self.write_reg(param.0, param.1); self.write_reg(tg, imm);
} }
LD => { LD => {
// Load. If loading more than register size, continue on adjecent registers // Load. If loading more than register size, continue on adjecent registers
let ParamBBDH(dst, base, off, count) = param!(self, ParamBBDH); let ParamBBDH(dst, base, off, count) = self.decode();
let n: usize = match dst { let n: usize = match dst {
0 => 1, 0 => 1,
_ => 0, _ => 0,
}; };
self.memory.load( self.memory.load(
self.read_reg(base).as_u64() + off + n as u64, self.read_reg(base).cast::<u64>() + off + n as u64,
self.registers.as_mut_ptr().add(usize::from(dst) + n).cast(), self.registers.as_mut_ptr().add(usize::from(dst) + n).cast(),
usize::from(count).saturating_sub(n), usize::from(count).saturating_sub(n),
&mut self.pfhandler, &mut self.pfhandler,
@ -292,9 +228,9 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
} }
ST => { ST => {
// Store. Same rules apply as to LD // Store. Same rules apply as to LD
let ParamBBDH(dst, base, off, count) = param!(self, ParamBBDH); let ParamBBDH(dst, base, off, count) = self.decode();
self.memory.store( self.memory.store(
self.read_reg(base).as_u64() + off, self.read_reg(base).cast::<u64>() + off,
self.registers.as_ptr().add(usize::from(dst)).cast(), self.registers.as_ptr().add(usize::from(dst)).cast(),
count.into(), count.into(),
&mut self.pfhandler, &mut self.pfhandler,
@ -302,17 +238,17 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
} }
BMC => { BMC => {
// Block memory copy // Block memory copy
let ParamBBD(src, dst, count) = param!(self, ParamBBD); let ParamBBD(src, dst, count) = self.decode();
self.memory.block_copy( self.memory.block_copy(
self.read_reg(src).as_u64(), self.read_reg(src).cast::<u64>(),
self.read_reg(dst).as_u64(), self.read_reg(dst).cast::<u64>(),
count as _, count as _,
&mut self.pfhandler, &mut self.pfhandler,
)?; )?;
} }
BRC => { BRC => {
// Block register copy // Block register copy
let ParamBBB(src, dst, count) = param!(self, ParamBBB); let ParamBBB(src, dst, count) = self.decode();
core::ptr::copy( core::ptr::copy(
self.registers.get_unchecked(usize::from(src)), self.registers.get_unchecked(usize::from(src)),
self.registers.get_unchecked_mut(usize::from(dst)), self.registers.get_unchecked_mut(usize::from(dst)),
@ -322,24 +258,24 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
JAL => { JAL => {
// Jump and link. Save PC after this instruction to // Jump and link. Save PC after this instruction to
// specified register and jump to reg + offset. // specified register and jump to reg + offset.
let ParamBBD(save, reg, offset) = param!(self, ParamBBD); let ParamBBD(save, reg, offset) = self.decode();
self.write_reg(save, self.pc as u64); self.write_reg(save, self.pc as u64);
self.pc = (self.read_reg(reg).as_u64() + offset) as usize; self.pc = (self.read_reg(reg).cast::<u64>() + offset) as usize;
} }
// Conditional jumps, jump only to immediates // Conditional jumps, jump only to immediates
JEQ => cond_jump!(self, int, Equal), JEQ => self.cond_jmp::<u64>(Ordering::Equal),
JNE => { JNE => {
let ParamBBD(a0, a1, jt) = param!(self, ParamBBD); let ParamBBD(a0, a1, jt) = self.decode();
if self.read_reg(a0).as_u64() != self.read_reg(a1).as_u64() { if self.read_reg(a0).cast::<u64>() != self.read_reg(a1).cast::<u64>() {
self.pc = jt as usize; self.pc = jt as usize;
} }
} }
JLT => cond_jump!(self, int, Less), JLT => self.cond_jmp::<u64>(Ordering::Less),
JGT => cond_jump!(self, int, Greater), JGT => self.cond_jmp::<u64>(Ordering::Greater),
JLTU => cond_jump!(self, sint, Less), JLTU => self.cond_jmp::<i64>(Ordering::Less),
JGTU => cond_jump!(self, sint, Greater), JGTU => self.cond_jmp::<i64>(Ordering::Greater),
ECALL => { ECALL => {
param!(self, ()); self.decode::<()>();
// So we don't get timer interrupt after ECALL // So we don't get timer interrupt after ECALL
if TIMER_QUOTIENT != 0 { if TIMER_QUOTIENT != 0 {
@ -347,38 +283,38 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
} }
return Ok(VmRunOk::Ecall); return Ok(VmRunOk::Ecall);
} }
ADDF => binary_op!(self, as_f64, ops::Add::add), ADDF => self.binary_op::<f64>(ops::Add::add),
SUBF => binary_op!(self, as_f64, ops::Sub::sub), SUBF => self.binary_op::<f64>(ops::Sub::sub),
MULF => binary_op!(self, as_f64, ops::Mul::mul), MULF => self.binary_op::<f64>(ops::Mul::mul),
DIRF => { DIRF => {
let ParamBBBB(dt, rt, a0, a1) = param!(self, ParamBBBB); let ParamBBBB(dt, rt, a0, a1) = self.decode();
let a0 = self.read_reg(a0).as_f64(); let a0 = self.read_reg(a0).cast::<f64>();
let a1 = self.read_reg(a1).as_f64(); let a1 = self.read_reg(a1).cast::<f64>();
self.write_reg(dt, a0 / a1); self.write_reg(dt, a0 / a1);
self.write_reg(rt, a0 % a1); self.write_reg(rt, a0 % a1);
} }
FMAF => { FMAF => {
let ParamBBBB(dt, a0, a1, a2) = param!(self, ParamBBBB); let ParamBBBB(dt, a0, a1, a2) = self.decode();
self.write_reg( self.write_reg(
dt, dt,
self.read_reg(a0).as_f64() * self.read_reg(a1).as_f64() self.read_reg(a0).cast::<f64>() * self.read_reg(a1).cast::<f64>()
+ self.read_reg(a2).as_f64(), + self.read_reg(a2).cast::<f64>(),
); );
} }
NEGF => { NEGF => {
let ParamBB(dt, a0) = param!(self, ParamBB); let ParamBB(dt, a0) = self.decode();
self.write_reg(dt, -self.read_reg(a0).as_f64()); self.write_reg(dt, -self.read_reg(a0).cast::<f64>());
} }
ITF => { ITF => {
let ParamBB(dt, a0) = param!(self, ParamBB); let ParamBB(dt, a0) = self.decode();
self.write_reg(dt, self.read_reg(a0).as_i64() as f64); self.write_reg(dt, self.read_reg(a0).cast::<i64>() as f64);
} }
FTI => { FTI => {
let ParamBB(dt, a0) = param!(self, ParamBB); let ParamBB(dt, a0) = self.decode();
self.write_reg(dt, self.read_reg(a0).as_f64() as i64); self.write_reg(dt, self.read_reg(a0).cast::<f64>() as i64);
} }
ADDFI => binary_op_imm!(self, as_f64, ops::Add::add), ADDFI => self.binary_op_imm::<f64>(ops::Add::add),
MULFI => binary_op_imm!(self, as_f64, ops::Mul::mul), MULFI => self.binary_op_imm::<f64>(ops::Mul::mul),
op => return Err(VmRunError::InvalidOpcode(op)), op => return Err(VmRunError::InvalidOpcode(op)),
} }
} }
@ -392,6 +328,55 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
} }
} }
/// Decode instruction operands
#[inline]
unsafe fn decode<T: OpParam>(&mut self) -> T {
let data = self.program.as_ptr().add(self.pc + 1).cast::<T>().read();
self.pc += 1 + core::mem::size_of::<T>();
data
}
/// Perform binary operating over two registers
#[inline]
unsafe fn binary_op<T: ValueVariant>(&mut self, op: impl Fn(T, T) -> T) {
let ParamBBB(tg, a0, a1) = self.decode();
self.write_reg(
tg,
op(self.read_reg(a0).cast::<T>(), self.read_reg(a1).cast::<T>()),
);
}
/// Perform binary operation over register and immediate
#[inline]
unsafe fn binary_op_imm<T: ValueVariant>(&mut self, op: impl Fn(T, T) -> T) {
let ParamBBD(tg, reg, imm) = self.decode();
self.write_reg(
tg,
op(self.read_reg(reg).cast::<T>(), Value::from(imm).cast::<T>()),
);
}
/// Perform binary operation over register and shift immediate
#[inline]
unsafe fn binary_op_ims<T: ValueVariant>(&mut self, op: impl Fn(T, u32) -> T) {
let ParamBBW(tg, reg, imm) = self.decode();
self.write_reg(tg, op(self.read_reg(reg).cast::<T>(), imm));
}
/// Jump at `#3` if ordering on `#0 <=> #1` is equal to expected
#[inline]
unsafe fn cond_jmp<T: ValueVariant + Ord>(&mut self, expected: Ordering) {
let ParamBBD(a0, a1, ja) = self.decode();
if self
.read_reg(a0)
.cast::<T>()
.cmp(&self.read_reg(a1).cast::<T>())
== expected
{
self.pc = ja as usize;
}
}
/// Read register /// Read register
#[inline] #[inline]
unsafe fn read_reg(&self, n: u8) -> Value { unsafe fn read_reg(&self, n: u8) -> Value {

View file

@ -1,6 +1,6 @@
//! HoleyBytes register value definition //! HoleyBytes register value definition
use core::fmt::Debug; use sealed::sealed;
/// Define [`Value`] union /// Define [`Value`] union
/// ///
@ -16,15 +16,6 @@ macro_rules! value_def {
$(pub $ty: $ty),* $(pub $ty: $ty),*
} }
paste::paste! {
impl Value {$(
#[doc = "Byte-reinterpret [`Value`] as [`" $ty "`]"]
#[inline]
pub fn [<as_ $ty>](&self) -> $ty {
unsafe { self.$ty }
}
)*}
}
$( $(
impl From<$ty> for Value { impl From<$ty> for Value {
@ -33,16 +24,41 @@ macro_rules! value_def {
Self { $ty: value } Self { $ty: value }
} }
} }
static_assertions::const_assert_eq!(
core::mem::size_of::<$ty>(),
core::mem::size_of::<Value>(),
);
#[sealed]
unsafe impl ValueVariant for $ty {}
)* )*
}; };
} }
impl Value {
#[inline]
pub fn cast<Variant: ValueVariant>(self) -> Variant {
union Transmute<Variant: ValueVariant> {
src: Value,
variant: Variant,
}
unsafe { Transmute { src: self }.variant }
}
}
/// # Safety
/// - N/A, not to be implemented manually
#[sealed]
pub unsafe trait ValueVariant: Copy + Into<Value> {}
value_def!(u64, i64, f64); value_def!(u64, i64, f64);
static_assertions::const_assert_eq!(core::mem::size_of::<Value>(), 8); static_assertions::const_assert_eq!(core::mem::size_of::<Value>(), 8);
impl Debug for Value { impl core::fmt::Debug for Value {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
// Print formatted as hexadecimal, unsigned integer // Print formatted as hexadecimal, unsigned integer
write!(f, "{:x}", self.as_u64()) write!(f, "{:x}", self.cast::<u64>())
} }
} }

View file

@ -21,6 +21,7 @@
| BBB | 24 bits | | BBB | 24 bits |
| BBDH | 96 bits | | BBDH | 96 bits |
| BBD | 80 bits | | BBD | 80 bits |
| BBW | 48 bits |
| BB | 16 bits | | BB | 16 bits |
| BD | 72 bits | | BD | 72 bits |
| D | 64 bits | | D | 64 bits |
@ -99,6 +100,11 @@
| 18 | ANDI | Bitand | | 18 | ANDI | Bitand |
| 19 | ORI | Bitor | | 19 | ORI | Bitor |
| 20 | XORI | Bitxor | | 20 | XORI | Bitxor |
### Bitshifts
- Type BBW
| Opcode | Name | Action |
|:------:|:----:|:-----------------------:|
| 21 | SLI | Unsigned left bitshift | | 21 | SLI | Unsigned left bitshift |
| 22 | SRI | Unsigned right bitshift | | 22 | SRI | Unsigned right bitshift |
| 23 | SRSI | Signed right bitshift | | 23 | SRSI | Signed right bitshift |