Added relaxed relative 16 bit instructions
This commit is contained in:
parent
c1905062c4
commit
354aac2d2c
|
@ -35,7 +35,7 @@
|
||||||
30, LD, RRAH, "Load from absolute address" ;
|
30, LD, RRAH, "Load from absolute address" ;
|
||||||
31, ST, RRAH, "Store to absolute address" ;
|
31, ST, RRAH, "Store to absolute address" ;
|
||||||
32, LDR, RROH, "Load from relative address" ;
|
32, LDR, RROH, "Load from relative address" ;
|
||||||
33, STR, RROH, "Store to absolute address" ;
|
33, STR, RROH, "Store to relative address" ;
|
||||||
34, BMC, RRH, "Copy block of memory" ;
|
34, BMC, RRH, "Copy block of memory" ;
|
||||||
35, BRC, RRB, "Copy register block" ;
|
35, BRC, RRB, "Copy register block" ;
|
||||||
|
|
||||||
|
@ -62,3 +62,8 @@
|
||||||
|
|
||||||
55, ADDFI, RRD, "Floating addition with immediate" ;
|
55, ADDFI, RRD, "Floating addition with immediate" ;
|
||||||
56, MULFI, RRD, "Floating multiplication with immediate";
|
56, MULFI, RRD, "Floating multiplication with immediate";
|
||||||
|
|
||||||
|
57, LRA16 , RRP, "Load relative immediate (16 bit)" ;
|
||||||
|
58, LDR16 , RRPH, "Load from relative address (16 bit)" ;
|
||||||
|
59, STR16 , RRPH, "Store to relative address (16 bit)" ;
|
||||||
|
60, JMPR16, P, "Relative jump (16 bit)" ;
|
||||||
|
|
|
@ -35,6 +35,7 @@ define_items! {
|
||||||
OpsRRD (OpR, OpR, OpD ),
|
OpsRRD (OpR, OpR, OpD ),
|
||||||
OpsRRAH (OpR, OpR, OpA, OpH),
|
OpsRRAH (OpR, OpR, OpA, OpH),
|
||||||
OpsRROH (OpR, OpR, OpO, OpH),
|
OpsRROH (OpR, OpR, OpO, OpH),
|
||||||
|
OpsRRPH (OpR, OpR, OpP, OpH),
|
||||||
OpsRRO (OpR, OpR, OpO ),
|
OpsRRO (OpR, OpR, OpO ),
|
||||||
OpsRRP (OpR, OpR, OpP ),
|
OpsRRP (OpR, OpR, OpP ),
|
||||||
}
|
}
|
||||||
|
@ -42,6 +43,7 @@ define_items! {
|
||||||
unsafe impl BytecodeItem for OpA {}
|
unsafe impl BytecodeItem for OpA {}
|
||||||
unsafe impl BytecodeItem for OpB {}
|
unsafe impl BytecodeItem for OpB {}
|
||||||
unsafe impl BytecodeItem for OpO {}
|
unsafe impl BytecodeItem for OpO {}
|
||||||
|
unsafe impl BytecodeItem for OpP {}
|
||||||
unsafe impl BytecodeItem for () {}
|
unsafe impl BytecodeItem for () {}
|
||||||
|
|
||||||
::with_builtin_macros::with_builtin! {
|
::with_builtin_macros::with_builtin! {
|
||||||
|
|
|
@ -12,8 +12,8 @@ use {
|
||||||
crate::mem::{addr::AddressOp, Address},
|
crate::mem::{addr::AddressOp, Address},
|
||||||
core::{cmp::Ordering, mem::size_of, ops},
|
core::{cmp::Ordering, mem::size_of, ops},
|
||||||
hbbytecode::{
|
hbbytecode::{
|
||||||
BytecodeItem, OpA, OpO, OpsRD, OpsRR, OpsRRAH, OpsRRB, OpsRRD, OpsRRH, OpsRRO, OpsRROH,
|
BytecodeItem, OpA, OpO, OpP, OpsRD, OpsRR, OpsRRAH, OpsRRB, OpsRRD, OpsRRH, OpsRRO,
|
||||||
OpsRRP, OpsRRR, OpsRRRR, OpsRRW,
|
OpsRROH, OpsRRP, OpsRRPH, OpsRRR, OpsRRRR, OpsRRW,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -163,64 +163,20 @@ where
|
||||||
LD => {
|
LD => {
|
||||||
// Load. If loading more than register size, continue on adjecent registers
|
// Load. If loading more than register size, continue on adjecent registers
|
||||||
let OpsRRAH(dst, base, off, count) = self.decode();
|
let OpsRRAH(dst, base, off, count) = self.decode();
|
||||||
let n: u8 = match dst {
|
self.load(dst, base, off, count)?;
|
||||||
0 => 1,
|
|
||||||
_ => 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
self.memory.load(
|
|
||||||
self.ldst_addr_uber(dst, base, off, count, n)?,
|
|
||||||
self.registers
|
|
||||||
.as_mut_ptr()
|
|
||||||
.add(usize::from(dst) + usize::from(n))
|
|
||||||
.cast(),
|
|
||||||
usize::from(count).wrapping_sub(n.into()),
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
ST => {
|
ST => {
|
||||||
// Store. Same rules apply as to LD
|
// Store. Same rules apply as to LD
|
||||||
let OpsRRAH(dst, base, off, count) = self.decode();
|
let OpsRRAH(dst, base, off, count) = self.decode();
|
||||||
self.memory.store(
|
self.store(dst, base, off, count)?;
|
||||||
self.ldst_addr_uber(dst, base, off, count, 0)?,
|
|
||||||
self.registers.as_ptr().add(usize::from(dst)).cast(),
|
|
||||||
count.into(),
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
LDR => {
|
LDR => {
|
||||||
let OpsRROH(dst, base, off, count) = self.decode();
|
let OpsRROH(dst, base, off, count) = self.decode();
|
||||||
let n: u8 = match dst {
|
self.load(dst, base, u64::from(off).wrapping_add(self.pc.get()), count)?;
|
||||||
0 => 1,
|
|
||||||
_ => 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
self.memory.load(
|
|
||||||
self.ldst_addr_uber(
|
|
||||||
dst,
|
|
||||||
base,
|
|
||||||
u64::from(off).wrapping_add(self.pc.get()),
|
|
||||||
count,
|
|
||||||
n,
|
|
||||||
)?,
|
|
||||||
self.registers
|
|
||||||
.as_mut_ptr()
|
|
||||||
.add(usize::from(dst) + usize::from(n))
|
|
||||||
.cast(),
|
|
||||||
usize::from(count).wrapping_sub(n.into()),
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
STR => {
|
STR => {
|
||||||
let OpsRROH(dst, base, off, count) = self.decode();
|
let OpsRROH(dst, base, off, count) = self.decode();
|
||||||
self.memory.store(
|
self.store(dst, base, u64::from(off).wrapping_add(self.pc.get()), count)?;
|
||||||
self.ldst_addr_uber(
|
|
||||||
dst,
|
|
||||||
base,
|
|
||||||
u64::from(off).wrapping_add(self.pc.get()),
|
|
||||||
count,
|
|
||||||
0,
|
|
||||||
)?,
|
|
||||||
self.registers.as_ptr().add(usize::from(dst)).cast(),
|
|
||||||
count.into(),
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
BMC => {
|
BMC => {
|
||||||
const INS_SIZE: usize = size_of::<OpsRRH>() + 1;
|
const INS_SIZE: usize = size_of::<OpsRRH>() + 1;
|
||||||
|
@ -341,6 +297,19 @@ where
|
||||||
}
|
}
|
||||||
ADDFI => self.binary_op_imm::<f64>(ops::Add::add),
|
ADDFI => self.binary_op_imm::<f64>(ops::Add::add),
|
||||||
MULFI => self.binary_op_imm::<f64>(ops::Mul::mul),
|
MULFI => self.binary_op_imm::<f64>(ops::Mul::mul),
|
||||||
|
LRA16 => {
|
||||||
|
let OpsRRP(tg, reg, imm) = self.decode();
|
||||||
|
self.write_reg(tg, self.rel_addr(reg, imm).get());
|
||||||
|
}
|
||||||
|
LDR16 => {
|
||||||
|
let OpsRRPH(dst, base, off, count) = self.decode();
|
||||||
|
self.load(dst, base, u64::from(off).wrapping_add(self.pc.get()), count)?;
|
||||||
|
}
|
||||||
|
STR16 => {
|
||||||
|
let OpsRRPH(dst, base, off, count) = self.decode();
|
||||||
|
self.store(dst, base, u64::from(off).wrapping_add(self.pc.get()), count)?;
|
||||||
|
}
|
||||||
|
JMPR16 => self.pc = self.pc.wrapping_add(self.decode::<OpP>()),
|
||||||
op => return Err(VmRunError::InvalidOpcode(op)),
|
op => return Err(VmRunError::InvalidOpcode(op)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -363,6 +332,49 @@ where
|
||||||
data
|
data
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Load
|
||||||
|
#[inline(always)]
|
||||||
|
unsafe fn load(
|
||||||
|
&mut self,
|
||||||
|
dst: u8,
|
||||||
|
base: u8,
|
||||||
|
offset: u64,
|
||||||
|
count: u16,
|
||||||
|
) -> Result<(), VmRunError> {
|
||||||
|
let n: u8 = match dst {
|
||||||
|
0 => 1,
|
||||||
|
_ => 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
self.memory.load(
|
||||||
|
self.ldst_addr_uber(dst, base, offset, count, n)?,
|
||||||
|
self.registers
|
||||||
|
.as_mut_ptr()
|
||||||
|
.add(usize::from(dst) + usize::from(n))
|
||||||
|
.cast(),
|
||||||
|
usize::from(count).wrapping_sub(n.into()),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Store
|
||||||
|
#[inline(always)]
|
||||||
|
unsafe fn store(
|
||||||
|
&mut self,
|
||||||
|
dst: u8,
|
||||||
|
base: u8,
|
||||||
|
offset: u64,
|
||||||
|
count: u16,
|
||||||
|
) -> Result<(), VmRunError> {
|
||||||
|
self.memory.store(
|
||||||
|
self.ldst_addr_uber(dst, base, offset, count, 0)?,
|
||||||
|
self.registers.as_ptr().add(usize::from(dst)).cast(),
|
||||||
|
count.into(),
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Perform binary operating over two registers
|
/// Perform binary operating over two registers
|
||||||
#[inline(always)]
|
#[inline(always)]
|
||||||
unsafe fn binary_op<T: ValueVariant>(&mut self, op: impl Fn(T, T) -> T) {
|
unsafe fn binary_op<T: ValueVariant>(&mut self, op: impl Fn(T, T) -> T) {
|
||||||
|
|
Loading…
Reference in a new issue