Removed some macros

This commit is contained in:
Erin 2023-07-24 18:48:42 +02:00 committed by ondra05
parent df41adffde
commit ab4440ce3c
10 changed files with 238 additions and 197 deletions

19
Cargo.lock generated
View file

@ -130,9 +130,16 @@ dependencies = [
"hbbytecode",
"log",
"paste",
"sealed",
"static_assertions",
]
[[package]]
name = "heck"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]]
name = "lasso"
version = "0.7.2"
@ -246,6 +253,18 @@ dependencies = [
"semver",
]
[[package]]
name = "sealed"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4a8caec23b7800fb97971a1c6ae365b6239aaeddfb934d6265f8505e795699d"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn 2.0.25",
]
[[package]]
name = "semver"
version = "1.0.17"

View file

@ -27,8 +27,10 @@ macros::impl_both!(
bbdh(p0: R, p1: R, p2: I, p3: u16)
=> [LD, ST],
bbd(p0: R, p1: R, p2: I)
=> [ADDI, MULI, ANDI, ORI, XORI, SLI, SRI, SRSI, CMPI, CMPUI,
BMC, JAL, JEQ, JNE, JLT, JGT, JLTU, JGTU, ADDFI, MULFI],
=> [ADDI, MULI, ANDI, ORI, XORI, CMPI, CMPUI, BMC, JAL, JEQ, JNE, JLT, JGT, JLTU,
JGTU, ADDFI, MULFI],
bbw(p0: R, p1: R, p2: u32)
=> [SLI, SRI, SRSI],
bb(p0: R, p1: R)
=> [NEG, NOT, CP, SWA, NEGF, ITF, FTI],
bd(p0: R, p1: I)

View file

@ -215,8 +215,12 @@ macro_rules! extract_pat {
};
}
/// Extract operand from code
macro_rules! extract {
/// Generate extract macro
macro_rules! gen_extract {
// Integer types have same body
($($int:ident),* $(,)?) => {
/// Extract operand from code
macro_rules! extract {
// Register (require prefixing with r)
($self:expr, R, $id:ident) => {
extract_pat!($self, Token::Register($id));
@ -233,19 +237,17 @@ macro_rules! extract {
};
};
// Get u8, if not fitting, the token is claimed invalid
($self:expr, u8, $id:ident) => {
// Get $int, if not fitting, the token is claimed invalid
$(($self:expr, $int, $id:ident) => {
extract_pat!($self, Token::Integer($id));
let $id = u8::try_from($id).map_err(|_| ErrorKind::InvalidToken)?;
};
// Get u16, if not fitting, the token is claimed invalid
($self:expr, u16, $id:ident) => {
extract_pat!($self, Token::Integer($id));
let $id = u16::try_from($id).map_err(|_| ErrorKind::InvalidToken)?;
let $id = $int::try_from($id).map_err(|_| ErrorKind::InvalidToken)?;
});*;
}
};
}
gen_extract!(u8, u16, u32);
/// Parameter extract incremental token-tree muncher
///
/// What else would it mean?

View file

@ -44,6 +44,11 @@ struct hbbc_ParamBBD
typedef hbbc_ParamBBD;
static_assert(sizeof(hbbc_ParamBBD) == 80 / 8);
struct hbbc_ParamBBW
{ uint8_t _0; uint8_t _1; uint32_t _2; }
typedef hbbc_ParamBBW;
static_assert(sizeof(hbbc_ParamBBW) == 48 / 8);
struct hbbc_ParamBB
{ uint8_t _0; uint8_t _1; }
typedef hbbc_ParamBB;

View file

@ -41,9 +41,9 @@ constmod!(pub opcode(u8) {
ANDI = 18, "BBD; #0 ← #1 & imm #2";
ORI = 19, "BBD; #0 ← #1 | imm #2";
XORI = 20, "BBD; #0 ← #1 ^ imm #2";
SLI = 21, "BBD; #0 ← #1 « imm #2";
SRI = 22, "BBD; #0 ← #1 » imm #2";
SRSI = 23, "BBD; #0 ← #1 » imm #2 (signed)";
SLI = 21, "BBW; #0 ← #1 « imm #2";
SRI = 22, "BBW; #0 ← #1 » imm #2";
SRSI = 23, "BBW; #0 ← #1 » imm #2 (signed)";
CMPI = 24, "BBD; #0 ← #1 <=> imm #2";
CMPUI = 25, "BBD; #0 ← #1 <=> imm #2 (unsigned)";
@ -89,6 +89,9 @@ pub struct ParamBBDH(pub u8, pub u8, pub u64, pub u16);
#[repr(packed)]
pub struct ParamBBD(pub u8, pub u8, pub u64);
#[repr(packed)]
pub struct ParamBBW(pub u8, pub u8, pub u32);
#[repr(packed)]
pub struct ParamBB(pub u8, pub u8);
@ -102,6 +105,7 @@ unsafe impl OpParam for ParamBBBB {}
unsafe impl OpParam for ParamBBB {}
unsafe impl OpParam for ParamBBDH {}
unsafe impl OpParam for ParamBBD {}
unsafe impl OpParam for ParamBBW {}
unsafe impl OpParam for ParamBB {}
unsafe impl OpParam for ParamBD {}
unsafe impl OpParam for u64 {}

View file

@ -13,4 +13,5 @@ hashbrown = "0.13"
hbbytecode.path = "../hbbytecode"
log = "0.4"
paste = "1.0"
sealed = "0.5"
static_assertions = "1.0"

View file

@ -69,7 +69,8 @@ pub fn validate(mut program: &[u8]) -> Result<(), Error> {
| [ADD..=CMPU | BRC | ADDF..=MULF, _, _, _, rest @ ..]
| [NEG..=NOT | CP..=SWA | NEGF..=FTI, _, _, rest @ ..]
| [LI, _, _, _, _, _, _, _, _, _, rest @ ..]
| [ADDI..=CMPUI | BMC | JAL..=JGTU | ADDFI..=MULFI, _, _, _, _, _, _, _, _, _, _, rest @ ..]
| [ADDI..=XORI | CMPI..=CMPUI | BMC | JAL..=JGTU | ADDFI..=MULFI, _, _, _, _, _, _, _, _, _, _, rest @ ..]
| [SLI..=SRSI, _, _, _, _, rest @ ..]
| [LD..=ST, _, _, _, _, _, _, _, _, _, _, _, _, rest @ ..] => rest,
_ => {
return Err(Error {

View file

@ -7,91 +7,18 @@
// - Instructions have to be valid as specified (values and sizes)
// - Mapped pages should be at least 4 KiB
use self::mem::HandlePageFault;
pub mod mem;
pub mod value;
use {
self::{mem::HandlePageFault, value::ValueVariant},
crate::validate,
core::ops,
hbbytecode::{OpParam, ParamBB, ParamBBB, ParamBBBB, ParamBBD, ParamBBDH, ParamBD},
core::{cmp::Ordering, ops},
hbbytecode::{OpParam, ParamBB, ParamBBB, ParamBBBB, ParamBBD, ParamBBDH, ParamBBW, ParamBD},
mem::Memory,
static_assertions::assert_impl_one,
value::Value,
};
/// Extract a parameter from program
macro_rules! param {
($self:expr, $ty:ty) => {{
assert_impl_one!($ty: OpParam);
let data = $self
.program
.as_ptr()
.add($self.pc + 1)
.cast::<$ty>()
.read();
$self.pc += 1 + core::mem::size_of::<$ty>();
data
}};
}
/// Perform binary operation `#0 ← #1 OP #2`
macro_rules! binary_op {
($self:expr, $ty:ident, $handler:expr) => {{
let ParamBBB(tg, a0, a1) = param!($self, ParamBBB);
$self.write_reg(
tg,
$handler(
Value::$ty(&$self.read_reg(a0)),
Value::$ty(&$self.read_reg(a1)),
),
);
}};
($self:expr, $ty:ident, $handler:expr, $con:ty) => {{
let ParamBBB(tg, a0, a1) = param!($self, ParamBBB);
$self.write_reg(
tg,
$handler(
Value::$ty(&$self.read_reg(a0)),
Value::$ty(&$self.read_reg(a1)) as $con,
),
);
}};
}
/// Perform binary operation with immediate `#0 ← #1 OP imm #2`
macro_rules! binary_op_imm {
($self:expr, $ty:ident, $handler:expr) => {{
let ParamBBD(tg, a0, imm) = param!($self, ParamBBD);
$self.write_reg(
tg,
$handler(Value::$ty(&$self.read_reg(a0)), Value::$ty(&imm.into())),
);
}};
($self:expr, $ty:ident, $handler:expr, $con:ty) => {{
let ParamBBD(tg, a0, imm) = param!($self, ParamBBD);
$self.write_reg(
tg,
$handler(Value::$ty(&$self.read_reg(a0)), Value::$ty(&imm.into()) as $con),
);
}};
}
/// Jump at `#3` if ordering on `#0 <=> #1` is equal to expected
macro_rules! cond_jump {
($self:expr, $ty:ident, $expected:ident) => {{
let ParamBBD(a0, a1, jt) = param!($self, ParamBBD);
if core::cmp::Ord::cmp(&$self.read_reg(a0).as_u64(), &$self.read_reg(a1).as_u64())
== core::cmp::Ordering::$expected
{
$self.pc = jt as usize;
}
}};
}
/// HoleyBytes Virtual Machine
pub struct Vm<'a, PfHandler, const TIMER_QUOTIENT: usize> {
/// Holds 256 registers
@ -107,7 +34,7 @@ pub struct Vm<'a, PfHandler, const TIMER_QUOTIENT: usize> {
pub pfhandler: PfHandler,
/// Program counter
pc: usize,
pub pc: usize,
/// Program
program: &'a [u8],
@ -177,50 +104,56 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
unsafe {
match *self.program.get_unchecked(self.pc) {
UN => {
param!(self, ());
self.decode::<()>();
return Err(VmRunError::Unreachable);
}
NOP => param!(self, ()),
ADD => binary_op!(self, as_u64, u64::wrapping_add),
SUB => binary_op!(self, as_u64, u64::wrapping_sub),
MUL => binary_op!(self, as_u64, u64::wrapping_mul),
AND => binary_op!(self, as_u64, ops::BitAnd::bitand),
OR => binary_op!(self, as_u64, ops::BitOr::bitor),
XOR => binary_op!(self, as_u64, ops::BitXor::bitxor),
SL => binary_op!(self, as_u64, u64::wrapping_shl, u32),
SR => binary_op!(self, as_u64, u64::wrapping_shr, u32),
SRS => binary_op!(self, as_i64, i64::wrapping_shr, u32),
NOP => self.decode::<()>(),
ADD => self.binary_op(u64::wrapping_add),
SUB => self.binary_op(u64::wrapping_sub),
MUL => self.binary_op(u64::wrapping_mul),
AND => self.binary_op::<u64>(ops::BitAnd::bitand),
OR => self.binary_op::<u64>(ops::BitOr::bitor),
XOR => self.binary_op::<u64>(ops::BitXor::bitxor),
SL => self.binary_op(|l, r| u64::wrapping_shl(l, r as u32)),
SR => self.binary_op(|l, r| u64::wrapping_shr(l, r as u32)),
SRS => self.binary_op(|l, r| i64::wrapping_shl(l, r as u32)),
CMP => {
// Compare a0 <=> a1
// < → -1
// > → 1
// = → 0
let ParamBBB(tg, a0, a1) = param!(self, ParamBBB);
let ParamBBB(tg, a0, a1) = self.decode();
self.write_reg(
tg,
self.read_reg(a0).as_i64().cmp(&self.read_reg(a1).as_i64()) as i64,
self.read_reg(a0)
.cast::<i64>()
.cmp(&self.read_reg(a1).cast::<i64>())
as i64,
);
}
CMPU => {
// Unsigned comparsion
let ParamBBB(tg, a0, a1) = param!(self, ParamBBB);
let ParamBBB(tg, a0, a1) = self.decode();
self.write_reg(
tg,
self.read_reg(a0).as_u64().cmp(&self.read_reg(a1).as_u64()) as i64,
self.read_reg(a0)
.cast::<u64>()
.cmp(&self.read_reg(a1).cast::<u64>())
as i64,
);
}
NOT => {
// Logical negation
let param = param!(self, ParamBB);
self.write_reg(param.0, !self.read_reg(param.1).as_u64());
let ParamBB(tg, a0) = self.decode();
self.write_reg(tg, !self.read_reg(a0).cast::<u64>());
}
NEG => {
// Bitwise negation
let param = param!(self, ParamBB);
let ParamBB(tg, a0) = self.decode();
self.write_reg(
param.0,
match self.read_reg(param.1).as_u64() {
tg,
match self.read_reg(a0).cast::<u64>() {
0 => 1_u64,
_ => 0,
},
@ -228,38 +161,41 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
}
DIR => {
// Fused Division-Remainder
let ParamBBBB(dt, rt, a0, a1) = param!(self, ParamBBBB);
let a0 = self.read_reg(a0).as_u64();
let a1 = self.read_reg(a1).as_u64();
let ParamBBBB(dt, rt, a0, a1) = self.decode();
let a0 = self.read_reg(a0).cast::<u64>();
let a1 = self.read_reg(a1).cast::<u64>();
self.write_reg(dt, a0.checked_div(a1).unwrap_or(u64::MAX));
self.write_reg(rt, a0.checked_rem(a1).unwrap_or(u64::MAX));
}
ADDI => binary_op_imm!(self, as_u64, ops::Add::add),
MULI => binary_op_imm!(self, as_u64, ops::Mul::mul),
ANDI => binary_op_imm!(self, as_u64, ops::BitAnd::bitand),
ORI => binary_op_imm!(self, as_u64, ops::BitOr::bitor),
XORI => binary_op_imm!(self, as_u64, ops::BitXor::bitxor),
SLI => binary_op_imm!(self, as_u64, u64::wrapping_shl, u32),
SRI => binary_op_imm!(self, as_u64, u64::wrapping_shr, u32),
SRSI => binary_op_imm!(self, as_i64, i64::wrapping_shr, u32),
ADDI => self.binary_op_imm(u64::wrapping_add),
MULI => self.binary_op_imm(u64::wrapping_sub),
ANDI => self.binary_op_imm::<u64>(ops::BitAnd::bitand),
ORI => self.binary_op_imm::<u64>(ops::BitOr::bitor),
XORI => self.binary_op_imm::<u64>(ops::BitXor::bitxor),
SLI => self.binary_op_ims(u64::wrapping_shl),
SRI => self.binary_op_ims(u64::wrapping_shr),
SRSI => self.binary_op_ims(i64::wrapping_shr),
CMPI => {
let ParamBBD(tg, a0, imm) = param!(self, ParamBBD);
let ParamBBD(tg, a0, imm) = self.decode();
self.write_reg(
tg,
self.read_reg(a0).as_i64().cmp(&Value::from(imm).as_i64()) as i64,
self.read_reg(a0)
.cast::<i64>()
.cmp(&Value::from(imm).cast::<i64>())
as i64,
);
}
CMPUI => {
let ParamBBD(tg, a0, imm) = param!(self, ParamBBD);
self.write_reg(tg, self.read_reg(a0).as_u64().cmp(&imm) as i64);
let ParamBBD(tg, a0, imm) = self.decode();
self.write_reg(tg, self.read_reg(a0).cast::<u64>().cmp(&imm) as i64);
}
CP => {
let param = param!(self, ParamBB);
self.write_reg(param.0, self.read_reg(param.1));
let ParamBB(tg, a0) = self.decode();
self.write_reg(tg, self.read_reg(a0));
}
SWA => {
// Swap registers
let ParamBB(r0, r1) = param!(self, ParamBB);
let ParamBB(r0, r1) = self.decode();
match (r0, r1) {
(0, 0) => (),
(dst, 0) | (0, dst) => self.write_reg(dst, 0_u64),
@ -272,19 +208,19 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
}
}
LI => {
let param = param!(self, ParamBD);
self.write_reg(param.0, param.1);
let ParamBD(tg, imm) = self.decode();
self.write_reg(tg, imm);
}
LD => {
// Load. If loading more than register size, continue on adjecent registers
let ParamBBDH(dst, base, off, count) = param!(self, ParamBBDH);
let ParamBBDH(dst, base, off, count) = self.decode();
let n: usize = match dst {
0 => 1,
_ => 0,
};
self.memory.load(
self.read_reg(base).as_u64() + off + n as u64,
self.read_reg(base).cast::<u64>() + off + n as u64,
self.registers.as_mut_ptr().add(usize::from(dst) + n).cast(),
usize::from(count).saturating_sub(n),
&mut self.pfhandler,
@ -292,9 +228,9 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
}
ST => {
// Store. Same rules apply as to LD
let ParamBBDH(dst, base, off, count) = param!(self, ParamBBDH);
let ParamBBDH(dst, base, off, count) = self.decode();
self.memory.store(
self.read_reg(base).as_u64() + off,
self.read_reg(base).cast::<u64>() + off,
self.registers.as_ptr().add(usize::from(dst)).cast(),
count.into(),
&mut self.pfhandler,
@ -302,17 +238,17 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
}
BMC => {
// Block memory copy
let ParamBBD(src, dst, count) = param!(self, ParamBBD);
let ParamBBD(src, dst, count) = self.decode();
self.memory.block_copy(
self.read_reg(src).as_u64(),
self.read_reg(dst).as_u64(),
self.read_reg(src).cast::<u64>(),
self.read_reg(dst).cast::<u64>(),
count as _,
&mut self.pfhandler,
)?;
}
BRC => {
// Block register copy
let ParamBBB(src, dst, count) = param!(self, ParamBBB);
let ParamBBB(src, dst, count) = self.decode();
core::ptr::copy(
self.registers.get_unchecked(usize::from(src)),
self.registers.get_unchecked_mut(usize::from(dst)),
@ -322,24 +258,24 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
JAL => {
// Jump and link. Save PC after this instruction to
// specified register and jump to reg + offset.
let ParamBBD(save, reg, offset) = param!(self, ParamBBD);
let ParamBBD(save, reg, offset) = self.decode();
self.write_reg(save, self.pc as u64);
self.pc = (self.read_reg(reg).as_u64() + offset) as usize;
self.pc = (self.read_reg(reg).cast::<u64>() + offset) as usize;
}
// Conditional jumps, jump only to immediates
JEQ => cond_jump!(self, int, Equal),
JEQ => self.cond_jmp::<u64>(Ordering::Equal),
JNE => {
let ParamBBD(a0, a1, jt) = param!(self, ParamBBD);
if self.read_reg(a0).as_u64() != self.read_reg(a1).as_u64() {
let ParamBBD(a0, a1, jt) = self.decode();
if self.read_reg(a0).cast::<u64>() != self.read_reg(a1).cast::<u64>() {
self.pc = jt as usize;
}
}
JLT => cond_jump!(self, int, Less),
JGT => cond_jump!(self, int, Greater),
JLTU => cond_jump!(self, sint, Less),
JGTU => cond_jump!(self, sint, Greater),
JLT => self.cond_jmp::<u64>(Ordering::Less),
JGT => self.cond_jmp::<u64>(Ordering::Greater),
JLTU => self.cond_jmp::<i64>(Ordering::Less),
JGTU => self.cond_jmp::<i64>(Ordering::Greater),
ECALL => {
param!(self, ());
self.decode::<()>();
// So we don't get timer interrupt after ECALL
if TIMER_QUOTIENT != 0 {
@ -347,38 +283,38 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
}
return Ok(VmRunOk::Ecall);
}
ADDF => binary_op!(self, as_f64, ops::Add::add),
SUBF => binary_op!(self, as_f64, ops::Sub::sub),
MULF => binary_op!(self, as_f64, ops::Mul::mul),
ADDF => self.binary_op::<f64>(ops::Add::add),
SUBF => self.binary_op::<f64>(ops::Sub::sub),
MULF => self.binary_op::<f64>(ops::Mul::mul),
DIRF => {
let ParamBBBB(dt, rt, a0, a1) = param!(self, ParamBBBB);
let a0 = self.read_reg(a0).as_f64();
let a1 = self.read_reg(a1).as_f64();
let ParamBBBB(dt, rt, a0, a1) = self.decode();
let a0 = self.read_reg(a0).cast::<f64>();
let a1 = self.read_reg(a1).cast::<f64>();
self.write_reg(dt, a0 / a1);
self.write_reg(rt, a0 % a1);
}
FMAF => {
let ParamBBBB(dt, a0, a1, a2) = param!(self, ParamBBBB);
let ParamBBBB(dt, a0, a1, a2) = self.decode();
self.write_reg(
dt,
self.read_reg(a0).as_f64() * self.read_reg(a1).as_f64()
+ self.read_reg(a2).as_f64(),
self.read_reg(a0).cast::<f64>() * self.read_reg(a1).cast::<f64>()
+ self.read_reg(a2).cast::<f64>(),
);
}
NEGF => {
let ParamBB(dt, a0) = param!(self, ParamBB);
self.write_reg(dt, -self.read_reg(a0).as_f64());
let ParamBB(dt, a0) = self.decode();
self.write_reg(dt, -self.read_reg(a0).cast::<f64>());
}
ITF => {
let ParamBB(dt, a0) = param!(self, ParamBB);
self.write_reg(dt, self.read_reg(a0).as_i64() as f64);
let ParamBB(dt, a0) = self.decode();
self.write_reg(dt, self.read_reg(a0).cast::<i64>() as f64);
}
FTI => {
let ParamBB(dt, a0) = param!(self, ParamBB);
self.write_reg(dt, self.read_reg(a0).as_f64() as i64);
let ParamBB(dt, a0) = self.decode();
self.write_reg(dt, self.read_reg(a0).cast::<f64>() as i64);
}
ADDFI => binary_op_imm!(self, as_f64, ops::Add::add),
MULFI => binary_op_imm!(self, as_f64, ops::Mul::mul),
ADDFI => self.binary_op_imm::<f64>(ops::Add::add),
MULFI => self.binary_op_imm::<f64>(ops::Mul::mul),
op => return Err(VmRunError::InvalidOpcode(op)),
}
}
@ -392,6 +328,55 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
}
}
/// Decode instruction operands
#[inline]
unsafe fn decode<T: OpParam>(&mut self) -> T {
let data = self.program.as_ptr().add(self.pc + 1).cast::<T>().read();
self.pc += 1 + core::mem::size_of::<T>();
data
}
/// Perform binary operating over two registers
#[inline]
unsafe fn binary_op<T: ValueVariant>(&mut self, op: impl Fn(T, T) -> T) {
let ParamBBB(tg, a0, a1) = self.decode();
self.write_reg(
tg,
op(self.read_reg(a0).cast::<T>(), self.read_reg(a1).cast::<T>()),
);
}
/// Perform binary operation over register and immediate
#[inline]
unsafe fn binary_op_imm<T: ValueVariant>(&mut self, op: impl Fn(T, T) -> T) {
let ParamBBD(tg, reg, imm) = self.decode();
self.write_reg(
tg,
op(self.read_reg(reg).cast::<T>(), Value::from(imm).cast::<T>()),
);
}
/// Perform binary operation over register and shift immediate
#[inline]
unsafe fn binary_op_ims<T: ValueVariant>(&mut self, op: impl Fn(T, u32) -> T) {
let ParamBBW(tg, reg, imm) = self.decode();
self.write_reg(tg, op(self.read_reg(reg).cast::<T>(), imm));
}
/// Jump at `#3` if ordering on `#0 <=> #1` is equal to expected
#[inline]
unsafe fn cond_jmp<T: ValueVariant + Ord>(&mut self, expected: Ordering) {
let ParamBBD(a0, a1, ja) = self.decode();
if self
.read_reg(a0)
.cast::<T>()
.cmp(&self.read_reg(a1).cast::<T>())
== expected
{
self.pc = ja as usize;
}
}
/// Read register
#[inline]
unsafe fn read_reg(&self, n: u8) -> Value {

View file

@ -1,6 +1,6 @@
//! HoleyBytes register value definition
use core::fmt::Debug;
use sealed::sealed;
/// Define [`Value`] union
///
@ -16,15 +16,6 @@ macro_rules! value_def {
$(pub $ty: $ty),*
}
paste::paste! {
impl Value {$(
#[doc = "Byte-reinterpret [`Value`] as [`" $ty "`]"]
#[inline]
pub fn [<as_ $ty>](&self) -> $ty {
unsafe { self.$ty }
}
)*}
}
$(
impl From<$ty> for Value {
@ -33,16 +24,41 @@ macro_rules! value_def {
Self { $ty: value }
}
}
static_assertions::const_assert_eq!(
core::mem::size_of::<$ty>(),
core::mem::size_of::<Value>(),
);
#[sealed]
unsafe impl ValueVariant for $ty {}
)*
};
}
impl Value {
#[inline]
pub fn cast<Variant: ValueVariant>(self) -> Variant {
union Transmute<Variant: ValueVariant> {
src: Value,
variant: Variant,
}
unsafe { Transmute { src: self }.variant }
}
}
/// # Safety
/// - N/A, not to be implemented manually
#[sealed]
pub unsafe trait ValueVariant: Copy + Into<Value> {}
value_def!(u64, i64, f64);
static_assertions::const_assert_eq!(core::mem::size_of::<Value>(), 8);
impl Debug for Value {
impl core::fmt::Debug for Value {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
// Print formatted as hexadecimal, unsigned integer
write!(f, "{:x}", self.as_u64())
write!(f, "{:x}", self.cast::<u64>())
}
}

View file

@ -21,6 +21,7 @@
| BBB | 24 bits |
| BBDH | 96 bits |
| BBD | 80 bits |
| BBW | 48 bits |
| BB | 16 bits |
| BD | 72 bits |
| D | 64 bits |
@ -99,6 +100,11 @@
| 18 | ANDI | Bitand |
| 19 | ORI | Bitor |
| 20 | XORI | Bitxor |
### Bitshifts
- Type BBW
| Opcode | Name | Action |
|:------:|:----:|:-----------------------:|
| 21 | SLI | Unsigned left bitshift |
| 22 | SRI | Unsigned right bitshift |
| 23 | SRSI | Signed right bitshift |