Reimplemented memory instructions, deal with it.

This commit is contained in:
Erin 2023-06-19 00:42:57 +02:00 committed by ondra05
parent 4dd2052634
commit 7f2676af91
11 changed files with 588 additions and 515 deletions

1
Cargo.lock generated
View file

@ -73,6 +73,7 @@ dependencies = [
"hashbrown",
"hbbytecode",
"log",
"paste",
"static_assertions",
]

View file

@ -1,28 +0,0 @@
# Math operations
```
MATH_OP
Add
Sub
Mul
Div
Mod
```
```
MATH_TYPE
Unsigned
Signed
FloatingPoint
```
```
MATH_OP_SIDES
Register Constant
Register Register
Constant Constant
Constant Register
```
`[MATH_OP] [MATH_OP_SIDES] [MATH_TYPE] [IMM_LHS] [IMM_RHS] [REG]`

View file

@ -27,7 +27,7 @@ macro_rules! tokendef {
#[regex(
"r[0-9]+",
|lexer| match lexer.slice()[1..].parse() {
Ok(n) if n <= 59 => Some(n),
Ok(n) => Some(n),
_ => None
},
)] Register(u8),
@ -52,11 +52,10 @@ macro_rules! tokendef {
#[rustfmt::skip]
tokendef![
"nop", "add", "sub", "mul", "and", "or", "xor", "sl", "sr", "srs", "cmp", "cmpu", "not", "neg",
"dir", "addf", "mulf", "dirf", "addi", "muli", "andi", "ori",
"xori", "sli", "sri", "srsi", "cmpi", "cmpui", "addfi", "mulfi", "cp", "li", "lb",
"ld", "lq", "lo", "sb", "sd", "sq", "so", "jmp", "jeq", "jne", "jlt", "jgt",
"jltu", "jgtu", "ecall",
"nop", "add", "sub", "mul", "and", "or", "xor", "sl", "sr", "srs", "cmp", "cmpu",
"dir", "neg", "not", "addi", "muli", "andi", "ori", "xori", "sli", "sri", "srsi",
"cmpi", "cmpui", "cp", "swa", "li", "ld", "st", "bmc", "brc", "jmp", "jeq", "jne",
"jlt", "jgt", "jltu", "jgtu", "ecall", "addf", "mulf", "dirf", "addfi", "mulfi",
];
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
@ -113,11 +112,96 @@ pub fn assembly(code: &str, buf: &mut Vec<u8>) -> Result<(), Error> {
self.buf.push(op);
match op {
NOP | ECALL => Ok(()),
DIR | DIRF => self.rrrr(),
ADD..=CMPU | ADDF..=MULF => self.rrr(),
NOT | CP => self.rr(),
LI | JMP => self.ri(),
ADDI..=CMPUI | ADDFI..=MULFI | LB..=SO | JEQ..=JGTU => self.rri(),
DIR | DIRF => {
expect_matches!(
self,
Token::Register(r0),
Token::PSep,
Token::Register(r1),
Token::PSep,
Token::Register(r2),
Token::PSep,
Token::Register(r3),
);
self.buf.extend([r0, r1, r2, r3]);
Ok(())
}
ADD..=CMPU | ADDF..=MULF => {
expect_matches!(
self,
Token::Register(r0),
Token::PSep,
Token::Register(r1),
Token::PSep,
Token::Register(r2),
);
self.buf.extend([r0, r1, r2]);
Ok(())
}
BRC => {
expect_matches!(
self,
Token::Register(r0),
Token::PSep,
Token::Register(r1),
Token::PSep,
Token::Integer(count),
);
self.buf.extend([
r0,
r1,
u8::try_from(count).map_err(|_| ErrorKind::UnexpectedToken)?,
]);
Ok(())
}
NEG..=NOT | CP..=SWA => {
expect_matches!(
self,
Token::Register(r0),
Token::PSep,
Token::Register(r1),
);
self.buf.extend([r0, r1]);
Ok(())
}
LI | JMP => {
expect_matches!(self, Token::Register(r0), Token::PSep);
self.buf.push(r0);
self.insert_imm()?;
Ok(())
}
ADDI..=CMPUI | BMC | JEQ..=JGTU | ADDFI..=MULFI => {
expect_matches!(
self,
Token::Register(r0),
Token::PSep,
Token::Register(r1),
Token::PSep,
);
self.buf.extend([r0, r1]);
self.insert_imm()?;
Ok(())
}
LD..=ST => {
expect_matches!(
self,
Token::Register(r0),
Token::PSep,
Token::Register(r1),
Token::PSep,
Token::Integer(offset),
Token::PSep,
Token::Integer(len),
);
self.buf.extend([r0, r1]);
self.buf.extend(offset.to_le_bytes());
self.buf.extend(
u16::try_from(len)
.map_err(|_| ErrorKind::InvalidToken)?
.to_le_bytes(),
);
Ok(())
}
_ => unreachable!(),
}?;
match self.next() {
@ -154,60 +238,6 @@ pub fn assembly(code: &str, buf: &mut Vec<u8>) -> Result<(), Error> {
Ok(())
}
fn rrr(&mut self) -> Result<(), ErrorKind> {
expect_matches!(
self,
Token::Register(r0),
Token::PSep,
Token::Register(r1),
Token::PSep,
Token::Register(r2)
);
self.buf.extend([r0, r1, r2]);
Ok(())
}
fn rr(&mut self) -> Result<(), ErrorKind> {
expect_matches!(self, Token::Register(r0), Token::PSep, Token::Register(r1),);
self.buf.extend([r0, r1]);
Ok(())
}
fn ri(&mut self) -> Result<(), ErrorKind> {
expect_matches!(self, Token::Register(r0), Token::PSep);
self.buf.push(r0);
self.insert_imm()?;
Ok(())
}
fn rri(&mut self) -> Result<(), ErrorKind> {
expect_matches!(
self,
Token::Register(r0),
Token::PSep,
Token::Register(r1),
Token::PSep,
);
self.buf.extend([r0, r1]);
self.insert_imm()?;
Ok(())
}
fn rrrr(&mut self) -> Result<(), ErrorKind> {
expect_matches!(
self,
Token::Register(r0),
Token::PSep,
Token::Register(r1),
Token::PSep,
Token::Register(r2),
Token::PSep,
Token::Register(r3),
);
self.buf.extend([r0, r1, r2, r3]);
Ok(())
}
fn insert_imm(&mut self) -> Result<(), ErrorKind> {
let imm = match self.next()? {
Token::Integer(i) => i.to_le_bytes(),

60
hbbytecode/hbbytecode.h Normal file
View file

@ -0,0 +1,60 @@
/* HoleyBytes Bytecode representation in C
* Requires C23 compiler or better
*/
#pragma once
#include <assert.h>
#include <stdint.h>
typedef enum hbbc_Opcode: uint8_t {
hbbc_Op_NOP, hbbc_Op_ADD, hbbc_Op_MUL, hbbc_Op_AND, hbbc_Op_OR, hbbc_Op_XOR, hbbc_Op_SL,
hbbc_Op_SR, hbbc_Op_SRS, hbbc_Op_CMP, hbbc_Op_CMPU, hbbc_Op_DIR, hbbc_Op_NEG, hbbc_Op_NOT,
hbbc_Op_ADDI, hbbc_Op_MULI, hbbc_Op_ANDI, hbbc_Op_ORI, hbbc_Op_XORI, hbbc_Op_SLI, hbbc_Op_SRI,
hbbc_Op_SRSI, hbbc_Op_CMPI, hbbc_Op_CMPUI, hbbc_Op_CP, hbbc_Op_SWA, hbbc_Op_LI, hbbc_Op_LD,
hbbc_Op_ST, hbbc_Op_BMC, hbbc_Op_BRC, hbbc_Op_JMP, hbbc_Op_JEQ, hbbc_Op_JNE, hbbc_Op_JLT,
hbbc_Op_JGT, hbbc_Op_JLTU, hbbc_Op_JGTU, hbbc_Op_ECALL, hbbc_Op_ADDF, hbbc_Op_MULF,
hbbc_Op_DIRF, hbbc_Op_ADDFI, hbbc_Op_MULFI,
} hbbc_Opcode;
static_assert(sizeof(hbbc_Opcode) == 1);
#pragma pack(push, 1)
typedef struct hbbc_ParamBBBB
{ uint8_t _0; uint8_t _1; uint8_t _2; uint8_t _3; }
hbbc_ParamBBBB;
static_assert(sizeof(hbbc_ParamBBBB) == 4);
typedef struct hbbc_ParamBBB
{ uint8_t _0; uint8_t _1; uint8_t _2; }
hbbc_ParamBBB;
static_assert(sizeof(hbbc_ParamBBB) == 3);
typedef struct hbbc_ParamBBDH
{ uint8_t _0; uint8_t _1; uint64_t _2; uint16_t _3; }
hbbc_ParamBBDH;
static_assert(sizeof(hbbc_ParamBBDH) == 12);
typedef struct hbbc_ParamBBDB
{ uint8_t _0; uint8_t _1; uint64_t _2; uint8_t _3; }
hbbc_ParamBBDB;
static_assert(sizeof(hbbc_ParamBBDB) == 11);
typedef struct hbbc_ParamBBD
{ uint8_t _0; uint8_t _1; uint64_t _2; }
hbbc_ParamBBD;
static_assert(sizeof(hbbc_ParamBBD) == 10);
typedef struct hbbc_ParamBB
{ uint8_t _0; uint8_t _1; }
hbbc_ParamBB;
static_assert(sizeof(hbbc_ParamBB) == 2);
typedef struct hbbc_ParamBD
{ uint8_t _0; uint64_t _1; }
hbbc_ParamBD;
static_assert(sizeof(hbbc_ParamBD) == 9);
typedef uint64_t hbbc_ParamD;
static_assert(sizeof(hbbc_ParamD) == 8);
#pragma pack(pop)

View file

@ -20,88 +20,87 @@ constmod!(pub opcode(u8) {
NOP = 0, "N; Do nothing";
ADD = 1, "RRR; #0 ← #1 + #2";
SUB = 2, "RRR; #0 ← #1 - #2";
MUL = 3, "RRR; #0 ← #1 × #2";
AND = 4, "RRR; #0 ← #1 & #2";
OR = 5, "RRR; #0 ← #1 | #2";
XOR = 6, "RRR; #0 ← #1 ^ #2";
SL = 7, "RRR; #0 ← #1 « #2";
SR = 8, "RRR; #0 ← #1 » #2";
SRS = 9, "RRR; #0 ← #1 » #2 (signed)";
CMP = 10, "RRR; #0 ← #1 <=> #2";
CMPU = 11, "RRR; #0 ← #1 <=> #2 (unsigned)";
DIR = 12, "RRRR; #0 ← #2 / #3, #1 ← #2 % #3";
NEG = 13, "RR; #0 ← ~#1";
NOT = 14, "RR; #0 ← !#1";
ADD = 1, "BBB; #0 ← #1 + #2";
SUB = 2, "BBB; #0 ← #1 - #2";
MUL = 3, "BBB; #0 ← #1 × #2";
AND = 4, "BBB; #0 ← #1 & #2";
OR = 5, "BBB; #0 ← #1 | #2";
XOR = 6, "BBB; #0 ← #1 ^ #2";
SL = 7, "BBB; #0 ← #1 « #2";
SR = 8, "BBB; #0 ← #1 » #2";
SRS = 9, "BBB; #0 ← #1 » #2 (signed)";
CMP = 10, "BBB; #0 ← #1 <=> #2";
CMPU = 11, "BBB; #0 ← #1 <=> #2 (unsigned)";
DIR = 12, "BBBB; #0 ← #2 / #3, #1 ← #2 % #3";
NEG = 13, "BB; #0 ← ~#1";
NOT = 14, "BB; #0 ← !#1";
ADDI = 18, "RRI; #0 ← #1 + imm #2";
MULI = 19, "RRI; #0 ← #1 × imm #2";
ANDI = 20, "RRI; #0 ← #1 & imm #2";
ORI = 21, "RRI; #0 ← #1 | imm #2";
XORI = 22, "RRI; #0 ← #1 ^ imm #2";
SLI = 23, "RRI; #0 ← #1 « imm #2";
SRI = 24, "RRI; #0 ← #1 » imm #2";
SRSI = 25, "RRI; #0 ← #1 » imm #2 (signed)";
CMPI = 26, "RRI; #0 ← #1 <=> imm #2";
CMPUI = 27, "RRI; #0 ← #1 <=> imm #2 (unsigned)";
ADDI = 18, "BBD; #0 ← #1 + imm #2";
MULI = 19, "BBD; #0 ← #1 × imm #2";
ANDI = 20, "BBD; #0 ← #1 & imm #2";
ORI = 21, "BBD; #0 ← #1 | imm #2";
XORI = 22, "BBD; #0 ← #1 ^ imm #2";
SLI = 23, "BBD; #0 ← #1 « imm #2";
SRI = 24, "BBD; #0 ← #1 » imm #2";
SRSI = 25, "BBD; #0 ← #1 » imm #2 (signed)";
CMPI = 26, "BBD; #0 ← #1 <=> imm #2";
CMPUI = 27, "BBD; #0 ← #1 <=> imm #2 (unsigned)";
CP = 28, "RR; Copy #0 ← #1";
LI = 29, "RI; Load immediate, #0 ← imm #1";
LB = 30, "RRI; Load byte (8 bits), #0 ← [#1 + imm #2]";
LD = 31, "RRI; Load doublet (16 bits)";
LQ = 32, "RRI; Load quadlet (32 bits)";
LO = 33, "RRI; Load octlet (64 bits)";
SB = 34, "RRI; Store byte, [#1 + imm #2] ← #0";
SD = 35, "RRI; Store doublet";
SQ = 36, "RRI; Store quadlet";
SO = 37, "RRI; Store octlet";
CP = 28, "BB; Copy #0 ← #1";
SWA = 29, "BB; Swap #0 and #1";
LI = 30, "BD; #0 ← imm #1";
LD = 31, "BBDB; #0 ← [#1 + imm #3], imm #4 bytes, overflowing";
ST = 32, "BBDB; [#1 + imm #3] ← #0, imm #4 bytes, overflowing";
BMC = 33, "BBD; [#0] ← [#1], imm #2 bytes";
BRC = 34, "BBB; #0 ← #1, imm #2 registers";
JMP = 38, "RI; Unconditional jump [#0 + imm #1]";
JEQ = 39, "RRI; if #0 = #1 → jump imm #2";
JNE = 40, "RRI; if #0 ≠ #1 → jump imm #2";
JLT = 41, "RRI; if #0 < #1 → jump imm #2";
JGT = 42, "RRI; if #0 > #1 → jump imm #2";
JLTU = 43, "RRI; if #0 < #1 → jump imm #2 (unsigned)";
JGTU = 44, "RRI; if #0 > #1 → jump imm #2 (unsigned)";
ECALL = 45, "N; Issue system call";
JMP = 35, "BD; Unconditional jump [#0 + imm #1]";
JEQ = 36, "BBD; if #0 = #1 → jump imm #2";
JNE = 37, "BBD; if #0 ≠ #1 → jump imm #2";
JLT = 38, "BBD; if #0 < #1 → jump imm #2";
JGT = 39, "BBD; if #0 > #1 → jump imm #2";
JLTU = 40, "BBD; if #0 < #1 → jump imm #2 (unsigned)";
JGTU = 41, "BBD; if #0 > #1 → jump imm #2 (unsigned)";
ECALL = 42, "N; Issue system call";
ADDF = 46, "RRR; #0 ← #1 +. #2";
MULF = 47, "RRR; #0 ← #1 +. #2";
DIRF = 48, "RRRR; #0 ← #2 / #3, #1 ← #2 % #3";
ADDF = 43, "BBB; #0 ← #1 +. #2";
MULF = 44, "BBB; #0 ← #1 +. #2";
DIRF = 45, "BBBB; #0 ← #2 / #3, #1 ← #2 % #3";
ADDFI = 49, "RRI; #0 ← #1 +. imm #2";
MULFI = 50, "RRI; #0 ← #1 *. imm #2";
ADDFI = 46, "BBD; #0 ← #1 +. imm #2";
MULFI = 47, "BBD; #0 ← #1 *. imm #2";
});
/// Register-register-register-register instruction parameter
#[repr(packed)]
pub struct ParamRRRR(pub u8, pub u8, pub u8, pub u8);
pub struct ParamBBBB(pub u8, pub u8, pub u8, pub u8);
/// Register-register-register instruction parameter
#[repr(packed)]
pub struct ParamRRR(pub u8, pub u8, pub u8);
pub struct ParamBBB(pub u8, pub u8, pub u8);
/// Register-register-immediate intruction parameter
#[repr(packed)]
pub struct ParamRRI(pub u8, pub u8, pub u64);
pub struct ParamBBDH(pub u8, pub u8, pub u64, pub u16);
/// Register-register instruction parameter
#[repr(packed)]
pub struct ParamRR(pub u8, pub u8);
pub struct ParamBBDB(pub u8, pub u8, pub u64, pub u8);
/// Register-immediate instruction parameter
#[repr(packed)]
pub struct ParamRI(pub u8, pub u64);
pub struct ParamBBD(pub u8, pub u8, pub u64);
#[repr(packed)]
pub struct ParamBB(pub u8, pub u8);
#[repr(packed)]
pub struct ParamBD(pub u8, pub u64);
/// # Safety
/// Has to be valid to be decoded from bytecode.
pub unsafe trait OpParam {}
unsafe impl OpParam for ParamRRRR {}
unsafe impl OpParam for ParamRRR {}
unsafe impl OpParam for ParamRRI {}
unsafe impl OpParam for ParamRR {}
unsafe impl OpParam for ParamRI {}
unsafe impl OpParam for ParamBBBB {}
unsafe impl OpParam for ParamBBB {}
unsafe impl OpParam for ParamBBDB {}
unsafe impl OpParam for ParamBBDH {}
unsafe impl OpParam for ParamBBD {}
unsafe impl OpParam for ParamBB {}
unsafe impl OpParam for ParamBD {}
unsafe impl OpParam for u64 {}
unsafe impl OpParam for () {}

View file

@ -11,4 +11,5 @@ delegate = "0.9"
hashbrown = "0.13"
hbbytecode.path = "../hbbytecode"
log = "0.4"
paste = "1.0"
static_assertions = "1.0"

View file

@ -1,20 +1,8 @@
macro_rules! bail {
($kind:ident, $start:expr, $curr:expr, $offset:expr) => {
return Err(Error {
kind: ErrorKind::$kind,
index: ($curr.as_ptr() as usize) - ($start.as_ptr() as usize) + $offset,
})
};
($kind:ident, $start:expr, $curr:expr) => {
bail!($kind, $start, $curr, 0)
};
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ErrorKind {
InvalidInstruction,
InvalidRegister,
Unimplemented,
RegisterArrayOverflow,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
@ -26,57 +14,39 @@ pub struct Error {
pub fn validate(mut program: &[u8]) -> Result<(), Error> {
use hbbytecode::opcode::*;
#[inline]
fn reg(regs: &[u8]) -> Option<usize> {
regs.iter()
.enumerate()
.filter_map(|(n, &r)| (r > 59).then_some(n))
.next()
}
let start = program;
loop {
program = match program {
[] => return Ok(()),
// N
[NOP | ECALL, rest @ ..] => rest,
// RRRR
[DIR | DIRF, _, _, _, _, rest @ ..] => {
if let Some(n) = reg(&program[1..=4]) {
bail!(InvalidRegister, start, program, n + 1);
}
rest
}
// RRR
[ADD..=CMPU | ADDF..=MULF, _, _, _, rest @ ..] => {
if let Some(n) = reg(&program[1..=3]) {
bail!(InvalidRegister, start, program, n + 1);
}
rest
}
// RR
[NOT | CP, _, _, rest @ ..] => {
if let Some(n) = reg(&program[1..=2]) {
bail!(InvalidRegister, start, program, n + 1);
}
rest
}
// RI
[LI | JMP, reg, _, _, _, _, _, _, _, _, rest @ ..] => {
if *reg > 59 {
bail!(InvalidRegister, start, program, 1);
}
rest
}
// RRI
[ADDI..=CMPUI | ADDFI..=MULFI | LB..=SO | JEQ..=JGTU, _, _, _, _, _, _, _, _, _, _, rest @ ..] =>
[LD..=ST, reg, _, _, _, _, _, _, _, _, _, count, ..]
if usize::from(*reg) * 8 + usize::from(*count) > 2048 =>
{
if let Some(n) = reg(&program[1..=2]) {
bail!(InvalidRegister, start, program, n + 1);
return Err(Error {
kind: ErrorKind::RegisterArrayOverflow,
index: (program.as_ptr() as usize) - (start.as_ptr() as usize),
})
}
rest
[BRC, src, dst, count, ..]
if src.checked_add(*count).is_none() || dst.checked_add(*count).is_none() =>
{
return Err(Error {
kind: ErrorKind::RegisterArrayOverflow,
index: (program.as_ptr() as usize) - (start.as_ptr() as usize),
})
}
[NOP | ECALL, rest @ ..]
| [DIR | DIRF, _, _, _, _, rest @ ..]
| [ADD..=CMPU | BRC | ADDF..=MULF, _, _, _, rest @ ..]
| [NEG..=NOT | CP..=SWA, _, _, rest @ ..]
| [LI | JMP, _, _, _, _, _, _, _, _, _, rest @ ..]
| [ADDI..=CMPUI | BMC | JEQ..=JGTU | ADDFI..=MULFI, _, _, _, _, _, _, _, _, _, _, rest @ ..]
| [LD..=ST, _, _, _, _, _, _, _, _, _, _, _, _, rest @ ..] => rest,
_ => {
return Err(Error {
kind: ErrorKind::InvalidInstruction,
index: (program.as_ptr() as usize) - (start.as_ptr() as usize),
})
}
_ => bail!(InvalidInstruction, start, program),
}
}
}

View file

@ -1,11 +1,7 @@
// HACK: This is temporary implementation so we can have memory instructions working
mod paging;
use self::paging::{PageTable, Permission, PtEntry};
use alloc::boxed::Box;
use core::mem::MaybeUninit;
use {crate::vm::value::Value, ma_size::MemAccessSize};
#[derive(Clone, Debug)]
pub struct Memory {
@ -56,67 +52,62 @@ impl Memory {
}
/// Load value from an address
pub fn load<S: MemAccessSize>(&self, addr: u64) -> Option<Value> {
let lookup = self.page_lookup(addr)?;
match lookup.perm {
Permission::Empty | Permission::Node => return None,
Permission::Readonly | Permission::Write | Permission::Exec => (),
}
let mut value = MaybeUninit::<Value>::zeroed();
let overflow = (lookup.offset + S::BYTES).saturating_sub(lookup.size - 1);
let normal = S::BYTES - overflow;
unsafe {
core::ptr::copy_nonoverlapping::<u8>(lookup.ptr, value.as_mut_ptr().cast(), normal);
if overflow != 0 {
let lookup = self.page_lookup(lookup.ptr as u64 + lookup.size as u64)?;
match lookup.perm {
Permission::Empty | Permission::Node => return None,
Permission::Readonly | Permission::Write | Permission::Exec => (),
}
core::ptr::copy_nonoverlapping::<u8>(
lookup.ptr,
value.as_mut_ptr().cast::<u8>().add(normal),
overflow,
);
}
Some(value.assume_init())
}
pub unsafe fn load(&self, addr: u64, target: *mut u8, count: usize) -> Result<(), ()> {
self.memory_access(
addr,
target,
count,
|perm| {
matches!(
perm,
Permission::Readonly | Permission::Write | Permission::Exec
)
},
|src, dst, count| core::ptr::copy_nonoverlapping(src, dst, count),
)
}
/// Store value to an address
pub fn store<S: MemAccessSize>(&mut self, addr: u64, value: Value) -> Result<(), ()> {
let lookup = self.page_lookup(addr).ok_or(())?;
if lookup.perm != Permission::Write {
return Err(());
pub unsafe fn store(&mut self, addr: u64, source: *const u8, count: usize) -> Result<(), ()> {
self.memory_access(
addr,
source.cast_mut(),
count,
|perm| perm == Permission::Write,
|dst, src, count| core::ptr::copy_nonoverlapping(src, dst, count),
)
}
let overflow = (lookup.offset + S::BYTES).saturating_sub(lookup.size - 1);
let normal = S::BYTES - overflow;
/// Copy a block of memory
pub unsafe fn block_copy(&mut self, src: u64, dst: u64, count: u64) -> Result<(), ()> {
let count = usize::try_from(count).expect("?conradluget a better CPU");
let mut srcs = PageSplitter::new(src, count, self.root_pt);
let mut dsts = PageSplitter::new(dst, count, self.root_pt);
let mut c_src = srcs.next().ok_or(())?;
let mut c_dst = dsts.next().ok_or(())?;
loop {
let min_size = c_src.size.min(c_dst.size);
unsafe {
core::ptr::copy_nonoverlapping::<u8>(
(&value as *const Value).cast(),
lookup.ptr,
normal,
);
if overflow != 0 {
let lookup = self
.page_lookup(lookup.ptr as u64 + lookup.size as u64)
.ok_or(())?;
core::ptr::copy_nonoverlapping::<u8>(
(&value as *const Value).cast::<u8>().add(normal),
lookup.ptr,
overflow,
);
core::ptr::copy(c_src.ptr, c_dst.ptr, min_size);
}
};
Ok(())
match (
match c_src.size.saturating_sub(min_size) {
0 => srcs.next(),
size => Some(PageSplitResult { size, ..c_src }),
},
match c_dst.size.saturating_sub(min_size) {
0 => dsts.next(),
size => Some(PageSplitResult { size, ..c_dst }),
},
) {
(None, None) => return Ok(()),
(Some(src), Some(dst)) => (c_src, c_dst) = (src, dst),
_ => return Err(()),
}
}
}
#[inline]
@ -129,13 +120,63 @@ impl Memory {
unsafe { &mut *self.root_pt }
}
/// Resolve page and offset from the page
fn page_lookup(&self, addr: u64) -> Option<PageLookupResult> {
let mut current_pt = self.root_pt;
fn memory_access(
&self,
src: u64,
mut dst: *mut u8,
len: usize,
permission_check: impl Fn(Permission) -> bool,
action: impl Fn(*mut u8, *mut u8, usize),
) -> Result<(), ()> {
for PageSplitResult { ptr, size, perm } in PageSplitter::new(src, len, self.root_pt) {
if !permission_check(perm) {
return Err(());
}
action(ptr, dst, size);
dst = unsafe { dst.add(size) };
}
Ok(())
}
}
struct PageSplitResult {
ptr: *mut u8,
size: usize,
perm: Permission,
}
struct PageSplitter {
addr: u64,
size: usize,
pagetable: *const PageTable,
}
impl PageSplitter {
pub const fn new(addr: u64, size: usize, pagetable: *const PageTable) -> Self {
Self {
addr,
size,
pagetable,
}
}
}
impl Iterator for PageSplitter {
type Item = PageSplitResult;
fn next(&mut self) -> Option<Self::Item> {
if self.size == 0 {
return None;
}
let (base, perm, size, offset) = 'a: {
let mut current_pt = self.pagetable;
for lvl in (0..5).rev() {
unsafe {
let entry = (*current_pt).get_unchecked(
usize::try_from((addr >> (lvl * 9 + 12)) & ((1 << 9) - 1))
usize::try_from((self.addr >> (lvl * 9 + 12)) & ((1 << 9) - 1))
.expect("?conradluget a better CPU"),
);
@ -143,57 +184,32 @@ impl Memory {
match entry.permission() {
Permission::Empty => return None,
Permission::Node => current_pt = ptr as _,
_ if lvl > 2 => return None,
perm => {
return Some(PageLookupResult {
break 'a (
ptr as *mut u8,
perm,
ptr: ptr as _,
size: match lvl {
match lvl {
0 => 4096,
1 => 1024_usize.pow(2) * 2,
2 => 1024_usize.pow(3),
_ => unreachable!(),
_ => return None,
},
offset: addr as usize & ((1 << (lvl * 9 + 12)) - 1),
self.addr as usize & ((1 << (lvl * 9 + 12)) - 1),
)
}
}
}
}
return None;
};
let avail = (size - offset).clamp(0, self.size);
self.addr += size as u64;
self.size = self.size.saturating_sub(size);
Some(PageSplitResult {
ptr: unsafe { base.add(offset) },
size: avail,
perm,
})
}
}
}
}
None
}
}
struct PageLookupResult {
perm: Permission,
ptr: *mut u8,
size: usize,
offset: usize,
}
macro_rules! size_markers {
($($name:ident = $size:expr),* $(,)?) => {
pub mod ma_size {
/// # Safety
/// Implementor has to assure that [`MemAccessSize::BYTES`] won't be larger than
/// size of [`Value`]
pub unsafe trait MemAccessSize {
const BYTES: usize;
}
$(
pub struct $name;
unsafe impl MemAccessSize for $name {
const BYTES: usize = $size;
}
)*
}
};
}
size_markers! {
Byte = 1,
Doublet = 2,
Quadlet = 4,
Octlet = 8,
}

View file

@ -3,24 +3,22 @@
//! All unsafe code here should be sound, if input bytecode passes validation.
// # General safety notice:
// - Validation has to assure there is 60 registers (r0 - r59)
// - Validation has to assure there is 256 registers (r0 - r255)
// - Instructions have to be valid as specified (values and sizes)
// - Mapped pages should be at least 8 KiB
// - Mapped pages should be at least 4 KiB
// - Yes, I am aware of the UB when jumping in-mid of instruction where
// the read byte corresponds to an instruction whose lenght exceets the
// program size. If you are (rightfully) worried about the UB, for now just
// append your program with 11 zeroes.
use hbbytecode::ParamRRRR;
mod mem;
mod value;
use {
crate::validate,
core::ops,
hbbytecode::{OpParam, ParamRI, ParamRR, ParamRRI, ParamRRR},
mem::{ma_size, Memory},
hbbytecode::{OpParam, ParamBB, ParamBBB, ParamBBBB, ParamBBD, ParamBBDH, ParamBD},
mem::Memory,
static_assertions::assert_impl_one,
value::Value,
};
@ -41,7 +39,7 @@ macro_rules! param {
macro_rules! binary_op {
($self:expr, $ty:ident, $handler:expr) => {{
let ParamRRR(tg, a0, a1) = param!($self, ParamRRR);
let ParamBBB(tg, a0, a1) = param!($self, ParamBBB);
$self.write_reg(
tg,
$handler(
@ -55,7 +53,7 @@ macro_rules! binary_op {
macro_rules! binary_op_imm {
($self:expr, $ty:ident, $handler:expr) => {{
let ParamRRI(tg, a0, imm) = param!($self, ParamRRI);
let ParamBBD(tg, a0, imm) = param!($self, ParamBBD);
$self.write_reg(
tg,
$handler(Value::$ty(&$self.read_reg(a0)), Value::$ty(&imm.into())).into(),
@ -63,38 +61,10 @@ macro_rules! binary_op_imm {
}};
}
macro_rules! load {
($self:expr, $size:ty) => {{
let ParamRRI(tg, a0, offset) = param!($self, ParamRRI);
$self.write_reg(
tg,
match $self
.memory
.load::<$size>($self.read_reg(a0).int() + offset)
{
Some(x) => x,
None => return HaltReason::LoadAccessEx,
},
);
}};
}
macro_rules! store {
($self:expr, $size:ty) => {{
let ParamRRI(src, a0, offset) = param!($self, ParamRRI);
if let Err(()) = $self
.memory
.store::<$size>($self.read_reg(a0).int() + offset, $self.read_reg(src))
{
return HaltReason::StoreAccessEx;
}
}};
}
macro_rules! cond_jump {
($self:expr, $ty:ident, $expected:ident) => {{
let ParamRRI(a0, a1, jt) = param!($self, ParamRRI);
if core::cmp::Ord::cmp(&$self.read_reg(a0), &$self.read_reg(a1))
let ParamBBD(a0, a1, jt) = param!($self, ParamBBD);
if core::cmp::Ord::cmp(&$self.read_reg(a0).as_u64(), &$self.read_reg(a1).as_u64())
== core::cmp::Ordering::$expected
{
$self.pc = jt as usize;
@ -103,7 +73,7 @@ macro_rules! cond_jump {
}
pub struct Vm<'a> {
pub registers: [Value; 60],
pub registers: [Value; 256],
pub memory: Memory,
pc: usize,
program: &'a [u8],
@ -114,7 +84,7 @@ impl<'a> Vm<'a> {
/// Program code has to be validated
pub unsafe fn new_unchecked(program: &'a [u8]) -> Self {
Self {
registers: [Value::from(0_u64); 60],
registers: [Value::from(0_u64); 256],
memory: Default::default(),
pc: 0,
program,
@ -135,38 +105,40 @@ impl<'a> Vm<'a> {
unsafe {
match opcode {
NOP => param!(self, ()),
ADD => binary_op!(self, int, u64::wrapping_add),
SUB => binary_op!(self, int, u64::wrapping_sub),
MUL => binary_op!(self, int, u64::wrapping_mul),
AND => binary_op!(self, int, ops::BitAnd::bitand),
OR => binary_op!(self, int, ops::BitOr::bitor),
XOR => binary_op!(self, int, ops::BitXor::bitxor),
SL => binary_op!(self, int, ops::Shl::shl),
SR => binary_op!(self, int, ops::Shr::shr),
SRS => binary_op!(self, sint, ops::Shr::shr),
ADD => binary_op!(self, as_u64, u64::wrapping_add),
SUB => binary_op!(self, as_u64, u64::wrapping_sub),
MUL => binary_op!(self, as_u64, u64::wrapping_mul),
AND => binary_op!(self, as_u64, ops::BitAnd::bitand),
OR => binary_op!(self, as_u64, ops::BitOr::bitor),
XOR => binary_op!(self, as_u64, ops::BitXor::bitxor),
SL => binary_op!(self, as_u64, ops::Shl::shl),
SR => binary_op!(self, as_u64, ops::Shr::shr),
SRS => binary_op!(self, as_i64, ops::Shr::shr),
CMP => {
let ParamRRR(tg, a0, a1) = param!(self, ParamRRR);
let ParamBBB(tg, a0, a1) = param!(self, ParamBBB);
self.write_reg(
tg,
(self.read_reg(a0).sint().cmp(&self.read_reg(a1).sint()) as i64).into(),
(self.read_reg(a0).as_i64().cmp(&self.read_reg(a1).as_i64()) as i64)
.into(),
);
}
CMPU => {
let ParamRRR(tg, a0, a1) = param!(self, ParamRRR);
let ParamBBB(tg, a0, a1) = param!(self, ParamBBB);
self.write_reg(
tg,
(self.read_reg(a0).int().cmp(&self.read_reg(a1).int()) as i64).into(),
(self.read_reg(a0).as_u64().cmp(&self.read_reg(a1).as_u64()) as i64)
.into(),
);
}
NOT => {
let param = param!(self, ParamRR);
self.write_reg(param.0, (!self.read_reg(param.1).int()).into());
let param = param!(self, ParamBB);
self.write_reg(param.0, (!self.read_reg(param.1).as_u64()).into());
}
NEG => {
let param = param!(self, ParamRR);
let param = param!(self, ParamBB);
self.write_reg(
param.0,
match self.read_reg(param.1).int() {
match self.read_reg(param.1).as_u64() {
0 => 1_u64,
_ => 0,
}
@ -174,55 +146,112 @@ impl<'a> Vm<'a> {
);
}
DIR => {
let ParamRRRR(dt, rt, a0, a1) = param!(self, ParamRRRR);
let a0 = self.read_reg(a0).int();
let a1 = self.read_reg(a1).int();
let ParamBBBB(dt, rt, a0, a1) = param!(self, ParamBBBB);
let a0 = self.read_reg(a0).as_u64();
let a1 = self.read_reg(a1).as_u64();
self.write_reg(dt, (a0.checked_div(a1).unwrap_or(u64::MAX)).into());
self.write_reg(rt, (a0.checked_rem(a1).unwrap_or(u64::MAX)).into());
}
ADDI => binary_op_imm!(self, int, ops::Add::add),
MULI => binary_op_imm!(self, int, ops::Mul::mul),
ANDI => binary_op_imm!(self, int, ops::BitAnd::bitand),
ORI => binary_op_imm!(self, int, ops::BitOr::bitor),
XORI => binary_op_imm!(self, int, ops::BitXor::bitxor),
SLI => binary_op_imm!(self, int, ops::Shl::shl),
SRI => binary_op_imm!(self, int, ops::Shr::shr),
SRSI => binary_op_imm!(self, sint, ops::Shr::shr),
ADDI => binary_op_imm!(self, as_u64, ops::Add::add),
MULI => binary_op_imm!(self, as_u64, ops::Mul::mul),
ANDI => binary_op_imm!(self, as_u64, ops::BitAnd::bitand),
ORI => binary_op_imm!(self, as_u64, ops::BitOr::bitor),
XORI => binary_op_imm!(self, as_u64, ops::BitXor::bitxor),
SLI => binary_op_imm!(self, as_u64, ops::Shl::shl),
SRI => binary_op_imm!(self, as_u64, ops::Shr::shr),
SRSI => binary_op_imm!(self, as_i64, ops::Shr::shr),
CMPI => {
let ParamRRI(tg, a0, imm) = param!(self, ParamRRI);
let ParamBBD(tg, a0, imm) = param!(self, ParamBBD);
self.write_reg(
tg,
(self.read_reg(a0).sint().cmp(&Value::from(imm).sint()) as i64).into(),
(self.read_reg(a0).as_i64().cmp(&Value::from(imm).as_i64()) as i64)
.into(),
);
}
CMPUI => {
let ParamRRI(tg, a0, imm) = param!(self, ParamRRI);
self.write_reg(tg, (self.read_reg(a0).int().cmp(&imm) as i64).into());
let ParamBBD(tg, a0, imm) = param!(self, ParamBBD);
self.write_reg(tg, (self.read_reg(a0).as_u64().cmp(&imm) as i64).into());
}
CP => {
let param = param!(self, ParamRR);
let param = param!(self, ParamBB);
self.write_reg(param.0, self.read_reg(param.1));
}
SWA => {
let ParamBB(src, dst) = param!(self, ParamBB);
if src + dst != 0 {
core::ptr::swap(
self.registers.get_unchecked_mut(usize::from(src)),
self.registers.get_unchecked_mut(usize::from(dst)),
);
}
}
LI => {
let param = param!(self, ParamRI);
let param = param!(self, ParamBD);
self.write_reg(param.0, param.1.into());
}
LB => load!(self, ma_size::Byte),
LD => load!(self, ma_size::Doublet),
LQ => load!(self, ma_size::Quadlet),
LO => load!(self, ma_size::Octlet),
SB => store!(self, ma_size::Byte),
SD => store!(self, ma_size::Doublet),
SQ => store!(self, ma_size::Quadlet),
SO => store!(self, ma_size::Octlet),
LD => {
let ParamBBDH(dst, base, off, count) = param!(self, ParamBBDH);
let n: usize = match dst {
0 => 1,
_ => 0,
};
if self
.memory
.load(
self.read_reg(base).as_u64() + off + n as u64,
self.registers.as_mut_ptr().add(usize::from(dst) + n).cast(),
usize::from(count).saturating_sub(n),
)
.is_err()
{
return HaltReason::LoadAccessEx;
}
}
ST => {
let ParamBBDH(dst, base, off, count) = param!(self, ParamBBDH);
if self
.memory
.store(
self.read_reg(base).as_u64() + off,
self.registers.as_ptr().add(usize::from(dst)).cast(),
count.into(),
)
.is_err()
{
return HaltReason::LoadAccessEx;
}
}
BMC => {
let ParamBBD(src, dst, count) = param!(self, ParamBBD);
if self
.memory
.block_copy(
self.read_reg(src).as_u64(),
self.read_reg(dst).as_u64(),
count,
)
.is_err()
{
return HaltReason::LoadAccessEx;
}
}
BRC => {
let ParamBBB(src, dst, count) = param!(self, ParamBBB);
core::ptr::copy(
self.registers.get_unchecked(usize::from(src)),
self.registers.get_unchecked_mut(usize::from(dst)),
usize::from(count * 8),
);
}
JMP => {
let ParamRI(reg, offset) = param!(self, ParamRI);
self.pc = (self.read_reg(reg).int() + offset) as usize;
let ParamBD(reg, offset) = param!(self, ParamBD);
self.pc = (self.read_reg(reg).as_u64() + offset) as usize;
}
JEQ => cond_jump!(self, int, Equal),
JNE => {
let ParamRRI(a0, a1, jt) = param!(self, ParamRRI);
if self.read_reg(a0) != self.read_reg(a1) {
let ParamBBD(a0, a1, jt) = param!(self, ParamBBD);
if self.read_reg(a0).as_u64() != self.read_reg(a1).as_u64() {
self.pc = jt as usize;
}
}
@ -234,18 +263,18 @@ impl<'a> Vm<'a> {
param!(self, ());
return HaltReason::Ecall;
}
ADDF => binary_op!(self, float, ops::Add::add),
MULF => binary_op!(self, float, ops::Mul::mul),
ADDF => binary_op!(self, as_f64, ops::Add::add),
MULF => binary_op!(self, as_f64, ops::Mul::mul),
DIRF => {
let ParamRRRR(dt, rt, a0, a1) = param!(self, ParamRRRR);
let a0 = self.read_reg(a0).float();
let a1 = self.read_reg(a1).float();
let ParamBBBB(dt, rt, a0, a1) = param!(self, ParamBBBB);
let a0 = self.read_reg(a0).as_f64();
let a1 = self.read_reg(a1).as_f64();
self.write_reg(dt, (a0 / a1).into());
self.write_reg(rt, (a0 % a1).into());
}
ADDFI => binary_op_imm!(self, float, ops::Add::add),
MULFI => binary_op_imm!(self, float, ops::Mul::mul),
_ => core::hint::unreachable_unchecked(),
ADDFI => binary_op_imm!(self, as_f64, ops::Add::add),
MULFI => binary_op_imm!(self, as_f64, ops::Mul::mul),
_ => return HaltReason::InvalidOpcode,
}
}
}
@ -273,6 +302,7 @@ impl<'a> Vm<'a> {
pub enum HaltReason {
ProgramEnd,
Ecall,
InvalidOpcode,
LoadAccessEx,
StoreAccessEx,
}

View file

@ -1,59 +1,37 @@
use core::fmt::Debug;
/// # Safety
/// The macro invoker shall make sure that byte reinterpret-cast
/// or zero-init won't cause undefined behaviour.
macro_rules! value_def {
($($fname:ident : $fty:ident, $getter:ident);* $(;)?) => {
#[derive(Clone, Copy)]
($($ty:ident),* $(,)?) => {
#[derive(Copy, Clone)]
#[repr(packed)]
pub union Value {
$($fname: $fty),*
$(pub $ty: $ty),*
}
paste::paste! {
impl Value {$(
#[inline]
pub fn $getter(&self) -> $fty {
unsafe { self.$fname }
pub fn [<as_ $ty>](&self) -> $ty {
unsafe { self.$ty }
}
)*}
}
$(impl From<$fty> for Value {
$(
impl From<$ty> for Value {
#[inline]
fn from($fname: $fty) -> Self {
Self { $fname }
fn from(value: $ty) -> Self {
Self { $ty: value }
}
})*
}
)*
};
}
value_def! {
i: u64, int;
s: i64, sint;
f: f64, float;
}
value_def!(u64, i64, f64);
impl Debug for Value {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
self.int().fmt(f)
}
}
impl PartialEq for Value {
fn eq(&self, other: &Self) -> bool {
self.int().eq(&other.int())
}
}
impl Eq for Value {}
impl PartialOrd for Value {
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
self.int().partial_cmp(&other.int())
}
}
impl Ord for Value {
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
self.int().cmp(&other.int())
self.as_u64().fmt(f)
}
}

116
spec.md
View file

@ -2,7 +2,7 @@
# Bytecode format
- All numbers are encoded little-endian
- There is 60 registers (0 59), they are represented by a byte
- There is 256 registers, they are represented by a byte
- Immediate values are 64 bit
### Instruction encoding
@ -10,17 +10,20 @@
- [opcode, …parameters…]
### Instruction parameter types
- R = Register
- I = Immediate
- B = Byte
- D = Doubleword (64 bits)
- H = Halfword (16 bits)
| Name | Size |
|:----:|:--------|
| RRRR | 32 bits |
| RRR | 24 bits |
| RRI | 80 bits |
| RR | 16 bits |
| RI | 72 bits |
| I | 64 bits |
| BBBB | 32 bits |
| BBB | 24 bits |
| BBDH | 96 bits |
| BBDB | 88 bits |
| BBD | 80 bits |
| BB | 16 bits |
| BD | 72 bits |
| D | 64 bits |
| N | 0 bits |
# Instructions
@ -37,7 +40,7 @@
| 0 | NOP | Do nothing |
## Integer binary ops.
- RRR type
- BBB type
- `#0 ← #1 <op> #2`
| Opcode | Name | Action |
@ -66,7 +69,7 @@
| > | 1 |
### Division-remainder
- Type RRRR
- Type BBBB
- In case of `#3` is zero, the resulting value is all-ones
- `#0 ← #2 ÷ #3`
- `#1 ← #2 % #3`
@ -76,7 +79,7 @@
| 12 | DIR | Divide and remainder combinated |
### Negations
- Type RR
- Type BB
- `#0 ← #1 <op> #2`
| Opcode | Name | Action |
@ -85,7 +88,7 @@
| 14 | NOT | Logical negation |
## Integer immediate binary ops.
- Type RRI
- Type BBD
- `#0 ← #1 <op> imm #2`
| Opcode | Name | Action |
@ -110,100 +113,113 @@
## Register value set / copy
### Copy
- Type RR
- Type BB
- `#0 ← #1`
| Opcode | Name | Action |
|:------:|:----:|:------:|
| 28 | CP | Copy |
### Swap
- Type BB
- Swap #0 and #1
| Opcode | Name | Action |
|:------:|:----:|:------:|
| 29 | SWA | Swap |
### Load immediate
- Type RI
- Type BD
- `#0 ← #1`
| Opcode | Name | Action |
|:------:|:----:|:--------------:|
| 29 | LI | Load immediate |
| 30 | LI | Load immediate |
## Memory operations
- Type RRI
- Type BBDH
- If loaded/store value exceeds one register size, continue accessing following registers
### Load
- `#0 ← [#1 + imm #2]`
### Load / Store
| Opcode | Name | Action |
|:------:|:----:|:---------------------------------------:|
| 31 | LD | `#0 ← [#1 + imm #3], copy imm #4 bytes` |
| 32 | ST | `[#1 + imm #3] ← #0, copy imm #4 bytes` |
## Block copy
- Block copy source and target can overlap
### Memory copy
- Type BBD
| Opcode | Name | Action |
|:------:|:----:|:----------------------:|
| 30 | LB | Load byte (8 bits) |
| 31 | LD | Load doublet (16 bits) |
| 32 | LQ | Load quadlet (32 bits) |
| 33 | LO | Load octlet (64 bits) |
|:------:|:----:|:--------------------------------:|
| 33 | BMC | `[#0] ← [#1], copy imm #2 bytes` |
### Store
- `[#1 + imm #2] ← #0`
### Register copy
- Type BBB
- Copy a block a register to another location (again, overflowing to following registers)
| Opcode | Name | Action |
|:------:|:----:|:-----------------------:|
| 34 | SB | Store byte (8 bits) |
| 35 | SD | Store doublet (16 bits) |
| 36 | SQ | Store quadlet (32 bits) |
| 37 | SO | Store octlet (64 bits) |
|:------:|:----:|:--------------------------------:|
| 34 | BRC | `#0 ← #1, copy imm #2 registers` |
## Control flow
### Unconditional jump
- Type RI
- Type BD
| Opcode | Name | Action |
|:------:|:----:|:---------------------:|
| 38 | JMP | Jump at `#0 + imm #1` |
| 35 | JMP | Jump at `#0 + imm #1` |
### Conditional jumps
- Type RRI
- Type BBD
- Jump at `imm #2` if `#0 <op> #1`
| Opcode | Name | Comparsion |
|:------:|:----:|:------------:|
| 39 | JEQ | = |
| 40 | JNE | ≠ |
| 41 | JLT | < (signed) |
| 42 | JGT | > (signed) |
| 43 | JLTU | < (unsigned) |
| 44 | JGTU | > (unsigned) |
| 36 | JEQ | = |
| 37 | JNE | ≠ |
| 38 | JLT | < (signed) |
| 39 | JGT | > (signed) |
| 40 | JLTU | < (unsigned) |
| 41 | JGTU | > (unsigned) |
### Environment call
- Type N
| Opcode | Name | Action |
|:------:|:-----:|:-------------------------------------:|
| 45 | ECALL | Cause an trap to the host environment |
| 42 | ECALL | Cause an trap to the host environment |
## Floating point operations
- Type RRR
- Type BBB
- `#0 ← #1 <op> #2`
| Opcode | Name | Action |
|:------:|:----:|:--------------:|
| 46 | ADDF | Addition |
| 47 | MULF | Multiplication |
| 43 | ADDF | Addition |
| 44 | MULF | Multiplication |
### Division-remainder
- Type RRRR
- Type BBBB
| Opcode | Name | Action |
|:------:|:----:|:--------------------------------------:|
| 48 | DIRF | Same flow applies as for integer `DIR` |
| 45 | DIRF | Same flow applies as for integer `DIR` |
## Floating point immediate operations
- Type RRI
- Type BBD
- `#0 ← #1 <op> imm #2`
| Opcode | Name | Action |
|:------:|:-----:|:--------------:|
| 49 | ADDFI | Addition |
| 50 | MULFI | Multiplication |
| 46 | ADDFI | Addition |
| 47 | MULFI | Multiplication |
# Registers
- There is 59 registers + one zero register (with index 0)
- There is 255 registers + one zero register (with index 0)
- Reading from zero register yields zero
- Writing to zero register is a no-op