Compare commits

..

1 commit

Author SHA1 Message Date
Erin be27a84c10 ASUnrbfPWIED FUZBERPGIHBN 2023-08-13 22:37:09 +02:00
28 changed files with 784 additions and 1090 deletions

113
Cargo.lock generated
View file

@ -15,9 +15,9 @@ dependencies = [
[[package]] [[package]]
name = "allocator-api2" name = "allocator-api2"
version = "0.2.16" version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" checksum = "56fc6cf8dc8c4158eed8649f9b8b0ea1518eb62b544fe9490d66fa0b349eafe9"
[[package]] [[package]]
name = "ariadne" name = "ariadne"
@ -47,6 +47,36 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "convert_case"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e"
[[package]]
name = "delegate"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d358e0ec5c59a5e1603b933def447096886121660fc680dc1e64a0753981fe3c"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "derive_more"
version = "0.99.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321"
dependencies = [
"convert_case",
"proc-macro2",
"quote",
"rustc_version",
"syn 1.0.109",
]
[[package]] [[package]]
name = "fnv" name = "fnv"
version = "1.0.7" version = "1.0.7"
@ -94,9 +124,20 @@ version = "0.1.0"
name = "hbvm" name = "hbvm"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"delegate",
"derive_more",
"hbbytecode", "hbbytecode",
"paste",
"sealed",
"static_assertions",
] ]
[[package]]
name = "heck"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8"
[[package]] [[package]]
name = "lasso" name = "lasso"
version = "0.7.2" version = "0.7.2"
@ -147,7 +188,7 @@ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
"regex-syntax", "regex-syntax",
"syn", "syn 2.0.25",
] ]
[[package]] [[package]]
@ -167,24 +208,24 @@ checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
[[package]] [[package]]
name = "paste" name = "paste"
version = "1.0.14" version = "1.0.13"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" checksum = "b4b27ab7be369122c218afc2079489cdcb4b517c0a3fc386ff11e1fedfcc2b35"
[[package]] [[package]]
name = "proc-macro2" name = "proc-macro2"
version = "1.0.66" version = "1.0.64"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "18fb31db3f9bddb2ea821cde30a9f70117e3f119938b5ee630b7403aa6e2ead9" checksum = "78803b62cbf1f46fde80d7c0e803111524b9877184cfe7c3033659490ac7a7da"
dependencies = [ dependencies = [
"unicode-ident", "unicode-ident",
] ]
[[package]] [[package]]
name = "quote" name = "quote"
version = "1.0.33" version = "1.0.29"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" checksum = "573015e8ab27661678357f27dc26460738fd2b6c86e46f386fde94cb5d913105"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
] ]
@ -196,10 +237,54 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]] [[package]]
name = "syn" name = "rustc_version"
version = "2.0.29" version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c324c494eba9d92503e6f1ef2e6df781e78f6a7705a0202d9801b198807d518a" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
dependencies = [
"semver",
]
[[package]]
name = "sealed"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4a8caec23b7800fb97971a1c6ae365b6239aaeddfb934d6265f8505e795699d"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn 2.0.25",
]
[[package]]
name = "semver"
version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bebd363326d05ec3e2f532ab7660680f3b02130d780c299bca73469d521bc0ed"
[[package]]
name = "static_assertions"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "15e3fc8c0c74267e2df136e5e5fb656a464158aa57624053375eb9c8c6e25ae2"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -208,9 +293,9 @@ dependencies = [
[[package]] [[package]]
name = "unicode-ident" name = "unicode-ident"
version = "1.0.11" version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "301abaae475aa91687eb82514b328ab47a211a533026cb25fc3e519b86adfc3c" checksum = "22049a19f4a68748a168c0fc439f9516686aa045927ff767eca0a85101fb6e73"
[[package]] [[package]]
name = "unicode-width" name = "unicode-width"

View file

@ -1,3 +1,2 @@
[workspace] [workspace]
resolver = "2" members = ["hbasm", "hbbytecode", "hbvm"]
members = ["hbasm", "hbbytecode", "hbvm"]

View file

@ -1,29 +0,0 @@
# C ABI (proposal)
## C datatypes
| C Type | Description | Size (B) |
|:------------|:-------------------------|-------------:|
| char | Character / byte | 8 |
| short | Short integer | 16 |
| int | Integer | 32 |
| long | Long integer | 64 |
| long long | Long long integer | 64 |
| T* | Pointer | 64 |
| float | Single-precision float | 32 |
| double | Double-precision float | 64 |
| long double | Extended-precision float | **Bikeshed** |
## Registers
| Register | ABI Name | Description | Saver |
|:---------|:---------|:---------------|:-------|
| `r0` | — | Zero register | N/A |
| `r1` | `ra` | Return address | Caller |
| `r2` | `sp` | Stack pointer | Callee |
| `r3` | `tp` | Thread pointer | N/A |
**TODO:** Parameters
**TODO:** Saved
**TODO:** Temp

View file

@ -1,12 +0,0 @@
-- Add two numbers
-- A + B = C
-- r1 A
li r1, 2
-- r2 Result
li r2, 0
-- B = 4
addi r2, r1, 4
-- terminate execution
tx

View file

@ -1,16 +0,0 @@
-- r1 will be the temp in fahrenheit
-- r2 temp in celsius
-- r3/r4/r5 will be used by constants
-- (f - 32) * 5 / 9
li r1, 100
li r3, 32
li r4, 5
li r5, 9
sub r2, r1, r3
mul r2, r2, r4
dir r2, r0, r2, r5
tx

View file

@ -1,14 +1,11 @@
li r255, 0 addi r1, r0, 1024
ecall addi r2, r1, 1024
addi r3, r2, 1024
addi r4, r3, 1024
addi r5, r4, 1024
addi r6, r5, 1024
addi r7, r6, 1024
addi r8, r7, 1024
addi r9, r8, 1024
li r255, 1 ecall
li r254, 1
li r253, 100
ecall
li r255, 2
li r254, 0
li r253, 0
ecall
tx

View file

@ -1,4 +1,3 @@
li r20, 1010 li r20, 1010
st r20, r24, 0, 1 st r20, r24, 0, 1
addi r24, r0, 10 addi r24, r0, 10
tx

View file

@ -13,15 +13,15 @@
static_assert(CHAR_BIT == 8, "Cursed architectures are not supported"); static_assert(CHAR_BIT == 8, "Cursed architectures are not supported");
enum hbbc_Opcode: uint8_t { enum hbbc_Opcode: uint8_t {
hbbc_Op_UN , hbbc_Op_TX , hbbc_Op_NOP , hbbc_Op_ADD , hbbc_Op_SUB , hbbc_Op_MUL , hbbc_Op_UN , hbbc_Op_TX , hbbc_Op_NOP , hbbc_Op_ADD , hbbc_Op_SUB , hbbc_Op_MUL ,
hbbc_Op_AND , hbbc_Op_OR , hbbc_Op_XOR , hbbc_Op_SL , hbbc_Op_SR , hbbc_Op_SRS , hbbc_Op_AND , hbbc_Op_OR , hbbc_Op_XOR , hbbc_Op_SL , hbbc_Op_SR , hbbc_Op_SRS ,
hbbc_Op_CMP , hbbc_Op_CMPU , hbbc_Op_DIR , hbbc_Op_NEG , hbbc_Op_NOT , hbbc_Op_ADDI , hbbc_Op_CMP , hbbc_Op_CMPU , hbbc_Op_DIR , hbbc_Op_NEG , hbbc_Op_NOT , hbbc_Op_ADDI ,
hbbc_Op_MULI , hbbc_Op_ANDI , hbbc_Op_ORI , hbbc_Op_XORI , hbbc_Op_SLI , hbbc_Op_SRI , hbbc_Op_MULI , hbbc_Op_ANDI , hbbc_Op_ORI , hbbc_Op_XORI , hbbc_Op_SLI , hbbc_Op_SRI ,
hbbc_Op_SRSI , hbbc_Op_CMPI , hbbc_Op_CMPUI , hbbc_Op_CP , hbbc_Op_SWA , hbbc_Op_LI , hbbc_Op_SRSI , hbbc_Op_CMPI , hbbc_Op_CMPUI , hbbc_Op_CP , hbbc_Op_SWA , hbbc_Op_LI ,
hbbc_Op_LD , hbbc_Op_ST , hbbc_Op_BMC , hbbc_Op_BRC , hbbc_Op_JMP , hbbc_Op_JAL , hbbc_Op_LD , hbbc_Op_ST , hbbc_Op_BMC , hbbc_Op_BRC , hbbc_Op_JMP , hbbc_Op_JEQ ,
hbbc_Op_JEQ , hbbc_Op_JNE , hbbc_Op_JLT , hbbc_Op_JGT , hbbc_Op_JLTU , hbbc_Op_JGTU , hbbc_Op_JNE , hbbc_Op_JLT , hbbc_Op_JGT , hbbc_Op_JLTU , hbbc_Op_JGTU , hbbc_Op_ECALL ,
hbbc_Op_ECALL , hbbc_Op_ADDF , hbbc_Op_SUBF , hbbc_Op_MULF , hbbc_Op_DIRF , hbbc_Op_FMAF , hbbc_Op_ADDF , hbbc_Op_SUBF , hbbc_Op_MULF , hbbc_Op_DIRF , hbbc_Op_FMAF , hbbc_Op_NEGF ,
hbbc_Op_NEGF , hbbc_Op_ITF , hbbc_Op_FTI , hbbc_Op_ADDFI , hbbc_Op_MULFI , hbbc_Op_ITF , hbbc_Op_FTI , hbbc_Op_ADDFI , hbbc_Op_MULFI ,
} typedef hbbc_Opcode; } typedef hbbc_Opcode;
static_assert(sizeof(hbbc_Opcode) == 1); static_assert(sizeof(hbbc_Opcode) == 1);

View file

@ -35,12 +35,12 @@ macro_rules! constmod {
/// - I: Immediate /// - I: Immediate
/// - L: Memory load / store size (u16) /// - L: Memory load / store size (u16)
/// - Other types are identity-mapped /// - Other types are identity-mapped
/// ///
/// # BRC special-case /// # BRC special-case
/// BRC's 3rd operand is plain byte, not a register. Encoding is the same, but for some cases it may matter. /// BRC's 3rd operand is plain byte, not a register. Encoding is the same, but for some cases it may matter.
/// ///
/// Please, if you distinguish in your API between byte and register, special case this one. /// Please, if you distinguish in your API between byte and register, special case this one.
/// ///
/// Sorry for that :( /// Sorry for that :(
#[macro_export] #[macro_export]
macro_rules! invoke_with_def { macro_rules! invoke_with_def {
@ -110,54 +110,58 @@ constmod!(pub opcode(u8) {
BMC = 32, "BBD; [#0] ← [#1], imm #2 bytes"; BMC = 32, "BBD; [#0] ← [#1], imm #2 bytes";
BRC = 33, "BBB; #0 ← #1, imm #2 registers"; BRC = 33, "BBB; #0 ← #1, imm #2 registers";
JMP = 34, "D; Unconditional, non-linking absolute jump"; JAL = 34, "BD; Copy PC to #0 and unconditional jump [#1 + imm #2]";
JAL = 35, "BD; Copy PC to #0 and unconditional jump [#1 + imm #2]"; JEQ = 35, "BBD; if #0 = #1 → jump imm #2";
JEQ = 36, "BBD; if #0 = #1 → jump imm #2"; JNE = 36, "BBD; if #0 ≠ #1 → jump imm #2";
JNE = 37, "BBD; if #0 ≠ #1 → jump imm #2"; JLT = 37, "BBD; if #0 < #1 → jump imm #2";
JLT = 38, "BBD; if #0 < #1 → jump imm #2"; JGT = 38, "BBD; if #0 > #1 → jump imm #2";
JGT = 39, "BBD; if #0 > #1 → jump imm #2"; JLTU = 39, "BBD; if #0 < #1 → jump imm #2 (unsigned)";
JLTU = 40, "BBD; if #0 < #1 → jump imm #2 (unsigned)"; JGTU = 40, "BBD; if #0 > #1 → jump imm #2 (unsigned)";
JGTU = 41, "BBD; if #0 > #1 → jump imm #2 (unsigned)"; ECALL = 41, "N; Issue system call";
ECALL = 42, "N; Issue system call";
ADDF = 43, "BBB; #0 ← #1 +. #2"; ADDF = 42, "BBB; #0 ← #1 +. #2";
SUBF = 44, "BBB; #0 ← #1 -. #2"; SUBF = 43, "BBB; #0 ← #1 -. #2";
MULF = 45, "BBB; #0 ← #1 +. #2"; MULF = 44, "BBB; #0 ← #1 +. #2";
DIRF = 46, "BBBB; #0 ← #2 / #3, #1 ← #2 % #3"; DIRF = 45, "BBBB; #0 ← #2 / #3, #1 ← #2 % #3";
FMAF = 47, "BBBB; #0 ← (#1 * #2) + #3"; FMAF = 46, "BBBB; #0 ← (#1 * #2) + #3";
NEGF = 48, "BB; #0 ← -#1"; NEGF = 47, "BB; #0 ← -#1";
ITF = 49, "BB; #0 ← #1 as float"; ITF = 48, "BB; #0 ← #1 as float";
FTI = 50, "BB; #0 ← #1 as int"; FTI = 49, "BB; #0 ← #1 as int";
ADDFI = 51, "BBD; #0 ← #1 +. imm #2"; ADDFI = 50, "BBD; #0 ← #1 +. imm #2";
MULFI = 52, "BBD; #0 ← #1 *. imm #2"; MULFI = 51, "BBD; #0 ← #1 *. imm #2";
}); });
#[repr(packed)] #[repr(packed)]
pub struct ParamBBBB(pub u8, pub u8, pub u8, pub u8); pub struct ParamBBBB(pub u8, pub u8, pub u8, pub u8);
#[repr(packed)] #[repr(packed)]
pub struct ParamBBB(pub u8, pub u8, pub u8); pub struct ParamBBB(pub u8, pub u8, pub u8);
#[repr(packed)] #[repr(packed)]
pub struct ParamBBDH(pub u8, pub u8, pub u64, pub u16); pub struct ParamBBDH(pub u8, pub u8, pub u64, pub u16);
#[repr(packed)] #[repr(packed)]
pub struct ParamBBD(pub u8, pub u8, pub u64); pub struct ParamBBD(pub u8, pub u8, pub u64);
#[repr(packed)] #[repr(packed)]
pub struct ParamBBW(pub u8, pub u8, pub u32); pub struct ParamBBW(pub u8, pub u8, pub u32);
#[repr(packed)] #[repr(packed)]
pub struct ParamBB(pub u8, pub u8); pub struct ParamBB(pub u8, pub u8);
#[repr(packed)] #[repr(packed)]
pub struct ParamBD(pub u8, pub u64); pub struct ParamBD(pub u8, pub u64);
/// # Safety /// # Safety
/// Has to be valid to be decoded from bytecode. /// Has to be valid to be decoded from bytecode.
pub unsafe trait ProgramVal {} pub unsafe trait OpParam {}
unsafe impl ProgramVal for ParamBBBB {} unsafe impl OpParam for ParamBBBB {}
unsafe impl ProgramVal for ParamBBB {} unsafe impl OpParam for ParamBBB {}
unsafe impl ProgramVal for ParamBBDH {} unsafe impl OpParam for ParamBBDH {}
unsafe impl ProgramVal for ParamBBD {} unsafe impl OpParam for ParamBBD {}
unsafe impl ProgramVal for ParamBBW {} unsafe impl OpParam for ParamBBW {}
unsafe impl ProgramVal for ParamBB {} unsafe impl OpParam for ParamBB {}
unsafe impl ProgramVal for ParamBD {} unsafe impl OpParam for ParamBD {}
unsafe impl ProgramVal for u64 {} unsafe impl OpParam for u64 {}
unsafe impl ProgramVal for u8 {} // Opcode unsafe impl OpParam for () {}
unsafe impl ProgramVal for () {}

View file

@ -12,4 +12,9 @@ alloc = []
nightly = [] nightly = []
[dependencies] [dependencies]
hbbytecode.path = "../hbbytecode" delegate = "0.9"
derive_more = "0.99"
hbbytecode.path = "../hbbytecode"
paste = "1.0"
sealed = "0.5"
static_assertions = "1.0"

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -3,14 +3,11 @@
use { use {
hbbytecode::valider::validate, hbbytecode::valider::validate,
hbvm::{ hbvm::{
mem::{ softpaging::{
softpaging::{ paging::{PageTable, Permission},
paging::{PageTable, Permission}, HandlePageFault, PageSize, SoftPagedMem,
HandlePageFault, PageSize, SoftPagedMem,
},
Address, MemoryAccessReason,
}, },
Vm, MemoryAccessReason, Vm,
}, },
libfuzzer_sys::fuzz_target, libfuzzer_sys::fuzz_target,
}; };
@ -19,13 +16,12 @@ fuzz_target!(|data: &[u8]| {
if validate(data).is_ok() { if validate(data).is_ok() {
let mut vm = unsafe { let mut vm = unsafe {
Vm::<_, 16384>::new( Vm::<_, 16384>::new(
SoftPagedMem::<_, true> { SoftPagedMem {
pf_handler: TestTrapHandler, pf_handler: TestTrapHandler,
program: data, program: data,
root_pt: Box::into_raw(Default::default()), root_pt: Box::into_raw(Default::default()),
icache: Default::default(),
}, },
Address::new(4), 0,
) )
}; };
@ -42,8 +38,6 @@ fuzz_target!(|data: &[u8]| {
for (i, page) in pages.into_iter().enumerate() { for (i, page) in pages.into_iter().enumerate() {
unmap_and_dealloc(&mut vm.memory, page, i as u64 * 4096); unmap_and_dealloc(&mut vm.memory, page, i as u64 * 4096);
} }
let _ = unsafe { Box::from_raw(vm.memory.root_pt) };
} }
}); });
@ -51,14 +45,14 @@ fn alloc_and_map(memory: &mut SoftPagedMem<TestTrapHandler>, at: u64) -> *mut u8
let ptr = Box::into_raw(Box::<Page>::default()).cast(); let ptr = Box::into_raw(Box::<Page>::default()).cast();
unsafe { unsafe {
memory memory
.map(ptr, Address::new(at), Permission::Write, PageSize::Size4K) .map(ptr, at, Permission::Write, PageSize::Size4K)
.unwrap() .unwrap()
}; };
ptr ptr
} }
fn unmap_and_dealloc(memory: &mut SoftPagedMem<TestTrapHandler>, ptr: *mut u8, from: u64) { fn unmap_and_dealloc(memory: &mut SoftPagedMem<TestTrapHandler>, ptr: *mut u8, from: u64) {
memory.unmap(Address::new(from)).unwrap(); memory.unmap(from).unwrap();
let _ = unsafe { Box::from_raw(ptr.cast::<Page>()) }; let _ = unsafe { Box::from_raw(ptr.cast::<Page>()) };
} }
@ -76,7 +70,7 @@ impl HandlePageFault for TestTrapHandler {
&mut self, &mut self,
_: MemoryAccessReason, _: MemoryAccessReason,
_: &mut PageTable, _: &mut PageTable,
_: Address, _: u64,
_: PageSize, _: PageSize,
_: *mut u8, _: *mut u8,
) -> bool { ) -> bool {

View file

@ -1,8 +1,7 @@
//! Block memory copier state machine //! Block memory copier state machine
use { use {
super::{mem::MemoryAccessReason, Memory, VmRunError}, super::{Memory, MemoryAccessReason, VmRunError},
crate::mem::Address,
core::{mem::MaybeUninit, task::Poll}, core::{mem::MaybeUninit, task::Poll},
}; };
@ -16,9 +15,9 @@ struct AlignedBuf([MaybeUninit<u8>; BUF_SIZE]);
/// State for block memory copy /// State for block memory copy
pub struct BlockCopier { pub struct BlockCopier {
/// Source address /// Source address
src: Address, src: u64,
/// Destination address /// Destination address
dst: Address, dst: u64,
/// How many buffer sizes to copy? /// How many buffer sizes to copy?
n_buffers: usize, n_buffers: usize,
/// …and what remainds after? /// …and what remainds after?
@ -28,7 +27,7 @@ pub struct BlockCopier {
impl BlockCopier { impl BlockCopier {
/// Construct a new one /// Construct a new one
#[inline] #[inline]
pub fn new(src: Address, dst: Address, count: usize) -> Self { pub fn new(src: u64, dst: u64, count: usize) -> Self {
Self { Self {
src, src,
dst, dst,
@ -58,8 +57,17 @@ impl BlockCopier {
} }
// Bump source and destination address // Bump source and destination address
self.src += BUF_SIZE; //
self.dst += BUF_SIZE; // If we are over the address space, bail.
match self.src.checked_add(BUF_SIZE as u64) {
Some(n) => self.src = n,
None => return Poll::Ready(Err(BlkCopyError::OutOfBounds)),
};
match self.dst.checked_add(BUF_SIZE as u64) {
Some(n) => self.dst = n,
None => return Poll::Ready(Err(BlkCopyError::OutOfBounds)),
};
self.n_buffers -= 1; self.n_buffers -= 1;
@ -92,15 +100,15 @@ impl BlockCopier {
#[inline] #[inline]
unsafe fn act( unsafe fn act(
memory: &mut impl Memory, memory: &mut impl Memory,
src: Address, src: u64,
dst: Address, dst: u64,
buf: *mut u8, buf: *mut u8,
count: usize, count: usize,
) -> Result<(), BlkCopyError> { ) -> Result<(), BlkCopyError> {
// Load to buffer // Load to buffer
memory memory
.load(src, buf, count) .load(src, buf, count)
.map_err(|super::mem::LoadError(addr)| BlkCopyError { .map_err(|super::LoadError(addr)| BlkCopyError::Access {
access_reason: MemoryAccessReason::Load, access_reason: MemoryAccessReason::Load,
addr, addr,
})?; })?;
@ -108,7 +116,7 @@ unsafe fn act(
// Store from buffer // Store from buffer
memory memory
.store(dst, buf, count) .store(dst, buf, count)
.map_err(|super::mem::StoreError(addr)| BlkCopyError { .map_err(|super::StoreError(addr)| BlkCopyError::Access {
access_reason: MemoryAccessReason::Store, access_reason: MemoryAccessReason::Store,
addr, addr,
})?; })?;
@ -118,18 +126,30 @@ unsafe fn act(
/// Error occured when copying a block of memory /// Error occured when copying a block of memory
#[derive(Clone, Copy, Debug, PartialEq, Eq)] #[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct BlkCopyError { pub enum BlkCopyError {
/// Kind of access /// Memory access error
access_reason: MemoryAccessReason, Access {
/// VM Address /// Kind of access
addr: Address, access_reason: MemoryAccessReason,
/// VM Address
addr: u64,
},
/// Address out of bounds
OutOfBounds,
} }
impl From<BlkCopyError> for VmRunError { impl From<BlkCopyError> for VmRunError {
fn from(value: BlkCopyError) -> Self { fn from(value: BlkCopyError) -> Self {
match value.access_reason { match value {
MemoryAccessReason::Load => Self::LoadAccessEx(value.addr), BlkCopyError::Access {
MemoryAccessReason::Store => Self::StoreAccessEx(value.addr), access_reason: MemoryAccessReason::Load,
addr,
} => Self::LoadAccessEx(addr),
BlkCopyError::Access {
access_reason: MemoryAccessReason::Store,
addr,
} => Self::StoreAccessEx(addr),
BlkCopyError::OutOfBounds => Self::AddrOutOfBounds,
} }
} }
} }

View file

@ -12,21 +12,23 @@
#![no_std] #![no_std]
#![cfg_attr(feature = "nightly", feature(fn_align))] #![cfg_attr(feature = "nightly", feature(fn_align))]
#![warn(missing_docs)] #![warn(missing_docs, clippy::missing_docs_in_private_items)]
use mem::{Memory, Address};
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
extern crate alloc; extern crate alloc;
pub mod mem; pub mod softpaging;
pub mod value; pub mod value;
mod bmc; mod bmc;
mod vmrun;
mod utils;
use {bmc::BlockCopier, value::Value}; use {
bmc::BlockCopier,
core::{cmp::Ordering, mem::size_of, ops, slice::SliceIndex},
derive_more::Display,
hbbytecode::{OpParam, ParamBB, ParamBBB, ParamBBBB, ParamBBD, ParamBBDH, ParamBBW, ParamBD},
value::{Value, ValueVariant},
};
/// HoleyBytes Virtual Machine /// HoleyBytes Virtual Machine
pub struct Vm<Mem, const TIMER_QUOTIENT: usize> { pub struct Vm<Mem, const TIMER_QUOTIENT: usize> {
@ -40,7 +42,7 @@ pub struct Vm<Mem, const TIMER_QUOTIENT: usize> {
pub memory: Mem, pub memory: Mem,
/// Program counter /// Program counter
pub pc: Address, pub pc: usize,
/// Program timer /// Program timer
timer: usize, timer: usize,
@ -57,15 +59,404 @@ where
/// ///
/// # Safety /// # Safety
/// Program code has to be validated /// Program code has to be validated
pub unsafe fn new(memory: Mem, entry: Address) -> Self { pub unsafe fn new(memory: Mem, entry: u64) -> Self {
Self { Self {
registers: [Value::from(0_u64); 256], registers: [Value::from(0_u64); 256],
memory, memory,
pc: entry, pc: entry as _,
timer: 0, timer: 0,
copier: None, copier: None,
} }
} }
/// Execute program
///
/// Program can return [`VmRunError`] if a trap handling failed
#[cfg_attr(feature = "nightly", repr(align(4096)))]
pub fn run(&mut self) -> Result<VmRunOk, VmRunError> {
use hbbytecode::opcode::*;
loop {
// Big match
//
// Contribution guide:
// - Zero register shall never be overwitten. It's value has to always be 0.
// - Prefer `Self::read_reg` and `Self::write_reg` functions
// - Extract parameters using `param!` macro
// - Prioritise speed over code size
// - Memory is cheap, CPUs not that much
// - Do not heap allocate at any cost
// - Yes, user-provided trap handler may allocate,
// but that is not our »fault«.
// - Unsafe is kinda must, but be sure you have validated everything
// - Your contributions have to pass sanitizers and Miri
// - Strictly follow the spec
// - The spec does not specify how you perform actions, in what order,
// just that the observable effects have to be performed in order and
// correctly.
// - Yes, we assume you run 64 bit CPU. Else ?conradluget a better CPU
// sorry 8 bit fans, HBVM won't run on your Speccy :(
unsafe {
match *self
.memory
.load_prog(self.pc)
.ok_or(VmRunError::ProgramFetchLoadEx(self.pc as _))?
{
UN => {
self.decode::<()>();
return Err(VmRunError::Unreachable);
}
TX => {
self.decode::<()>();
return Ok(VmRunOk::End);
}
NOP => self.decode::<()>(),
ADD => self.binary_op(u64::wrapping_add),
SUB => self.binary_op(u64::wrapping_sub),
MUL => self.binary_op(u64::wrapping_mul),
AND => self.binary_op::<u64>(ops::BitAnd::bitand),
OR => self.binary_op::<u64>(ops::BitOr::bitor),
XOR => self.binary_op::<u64>(ops::BitXor::bitxor),
SL => self.binary_op(|l, r| u64::wrapping_shl(l, r as u32)),
SR => self.binary_op(|l, r| u64::wrapping_shr(l, r as u32)),
SRS => self.binary_op(|l, r| i64::wrapping_shl(l, r as u32)),
CMP => {
// Compare a0 <=> a1
// < → -1
// > → 1
// = → 0
let ParamBBB(tg, a0, a1) = self.decode();
self.write_reg(
tg,
self.read_reg(a0)
.cast::<i64>()
.cmp(&self.read_reg(a1).cast::<i64>())
as i64,
);
}
CMPU => {
// Unsigned comparsion
let ParamBBB(tg, a0, a1) = self.decode();
self.write_reg(
tg,
self.read_reg(a0)
.cast::<u64>()
.cmp(&self.read_reg(a1).cast::<u64>())
as i64,
);
}
NOT => {
// Logical negation
let ParamBB(tg, a0) = self.decode();
self.write_reg(tg, !self.read_reg(a0).cast::<u64>());
}
NEG => {
// Bitwise negation
let ParamBB(tg, a0) = self.decode();
self.write_reg(
tg,
match self.read_reg(a0).cast::<u64>() {
0 => 1_u64,
_ => 0,
},
);
}
DIR => {
// Fused Division-Remainder
let ParamBBBB(dt, rt, a0, a1) = self.decode();
let a0 = self.read_reg(a0).cast::<u64>();
let a1 = self.read_reg(a1).cast::<u64>();
self.write_reg(dt, a0.checked_div(a1).unwrap_or(u64::MAX));
self.write_reg(rt, a0.checked_rem(a1).unwrap_or(u64::MAX));
}
ADDI => self.binary_op_imm(u64::wrapping_add),
MULI => self.binary_op_imm(u64::wrapping_sub),
ANDI => self.binary_op_imm::<u64>(ops::BitAnd::bitand),
ORI => self.binary_op_imm::<u64>(ops::BitOr::bitor),
XORI => self.binary_op_imm::<u64>(ops::BitXor::bitxor),
SLI => self.binary_op_ims(u64::wrapping_shl),
SRI => self.binary_op_ims(u64::wrapping_shr),
SRSI => self.binary_op_ims(i64::wrapping_shr),
CMPI => {
let ParamBBD(tg, a0, imm) = self.decode();
self.write_reg(
tg,
self.read_reg(a0)
.cast::<i64>()
.cmp(&Value::from(imm).cast::<i64>())
as i64,
);
}
CMPUI => {
let ParamBBD(tg, a0, imm) = self.decode();
self.write_reg(tg, self.read_reg(a0).cast::<u64>().cmp(&imm) as i64);
}
CP => {
let ParamBB(tg, a0) = self.decode();
self.write_reg(tg, self.read_reg(a0));
}
SWA => {
// Swap registers
let ParamBB(r0, r1) = self.decode();
match (r0, r1) {
(0, 0) => (),
(dst, 0) | (0, dst) => self.write_reg(dst, 0_u64),
(r0, r1) => {
core::ptr::swap(
self.registers.get_unchecked_mut(usize::from(r0)),
self.registers.get_unchecked_mut(usize::from(r1)),
);
}
}
}
LI => {
let ParamBD(tg, imm) = self.decode();
self.write_reg(tg, imm);
}
LD => {
// Load. If loading more than register size, continue on adjecent registers
let ParamBBDH(dst, base, off, count) = self.decode();
let n: u8 = match dst {
0 => 1,
_ => 0,
};
self.memory.load(
self.ldst_addr_uber(dst, base, off, count, n)?,
self.registers
.as_mut_ptr()
.add(usize::from(dst) + usize::from(n))
.cast(),
usize::from(count).saturating_sub(n.into()),
)?;
}
ST => {
// Store. Same rules apply as to LD
let ParamBBDH(dst, base, off, count) = self.decode();
self.memory.store(
self.ldst_addr_uber(dst, base, off, count, 0)?,
self.registers.as_ptr().add(usize::from(dst)).cast(),
count.into(),
)?;
}
BMC => {
// Block memory copy
match if let Some(copier) = &mut self.copier {
// There is some copier, poll.
copier.poll(&mut self.memory)
} else {
// There is none, make one!
let ParamBBD(src, dst, count) = self.decode();
// So we are still on BMC on next cycle
self.pc -= size_of::<ParamBBD>() + 1;
self.copier = Some(BlockCopier::new(
self.read_reg(src).cast(),
self.read_reg(dst).cast(),
count as _,
));
self.copier
.as_mut()
.unwrap_unchecked() // SAFETY: We just assigned there
.poll(&mut self.memory)
} {
// We are done, shift program counter
core::task::Poll::Ready(Ok(())) => {
self.copier = None;
self.pc += size_of::<ParamBBD>() + 1;
}
// Error, shift program counter (for consistency)
// and yield error
core::task::Poll::Ready(Err(e)) => {
self.pc += size_of::<ParamBBD>() + 1;
return Err(e.into());
}
// Not done yet, proceed to next cycle
core::task::Poll::Pending => (),
}
}
BRC => {
// Block register copy
let ParamBBB(src, dst, count) = self.decode();
if src.checked_add(count).is_none() || dst.checked_add(count).is_none() {
return Err(VmRunError::RegOutOfBounds);
}
core::ptr::copy(
self.registers.get_unchecked(usize::from(src)),
self.registers.get_unchecked_mut(usize::from(dst)),
usize::from(count),
);
}
JAL => {
// Jump and link. Save PC after this instruction to
// specified register and jump to reg + offset.
let ParamBBD(save, reg, offset) = self.decode();
self.write_reg(save, self.pc as u64);
self.pc =
(self.read_reg(reg).cast::<u64>().saturating_add(offset)) as usize;
}
// Conditional jumps, jump only to immediates
JEQ => self.cond_jmp::<u64>(Ordering::Equal),
JNE => {
let ParamBBD(a0, a1, jt) = self.decode();
if self.read_reg(a0).cast::<u64>() != self.read_reg(a1).cast::<u64>() {
self.pc = jt as usize;
}
}
JLT => self.cond_jmp::<u64>(Ordering::Less),
JGT => self.cond_jmp::<u64>(Ordering::Greater),
JLTU => self.cond_jmp::<i64>(Ordering::Less),
JGTU => self.cond_jmp::<i64>(Ordering::Greater),
ECALL => {
self.decode::<()>();
// So we don't get timer interrupt after ECALL
if TIMER_QUOTIENT != 0 {
self.timer = self.timer.wrapping_add(1);
}
return Ok(VmRunOk::Ecall);
}
ADDF => self.binary_op::<f64>(ops::Add::add),
SUBF => self.binary_op::<f64>(ops::Sub::sub),
MULF => self.binary_op::<f64>(ops::Mul::mul),
DIRF => {
let ParamBBBB(dt, rt, a0, a1) = self.decode();
let a0 = self.read_reg(a0).cast::<f64>();
let a1 = self.read_reg(a1).cast::<f64>();
self.write_reg(dt, a0 / a1);
self.write_reg(rt, a0 % a1);
}
FMAF => {
let ParamBBBB(dt, a0, a1, a2) = self.decode();
self.write_reg(
dt,
self.read_reg(a0).cast::<f64>() * self.read_reg(a1).cast::<f64>()
+ self.read_reg(a2).cast::<f64>(),
);
}
NEGF => {
let ParamBB(dt, a0) = self.decode();
self.write_reg(dt, -self.read_reg(a0).cast::<f64>());
}
ITF => {
let ParamBB(dt, a0) = self.decode();
self.write_reg(dt, self.read_reg(a0).cast::<i64>() as f64);
}
FTI => {
let ParamBB(dt, a0) = self.decode();
self.write_reg(dt, self.read_reg(a0).cast::<f64>() as i64);
}
ADDFI => self.binary_op_imm::<f64>(ops::Add::add),
MULFI => self.binary_op_imm::<f64>(ops::Mul::mul),
op => return Err(VmRunError::InvalidOpcode(op)),
}
}
if TIMER_QUOTIENT != 0 {
self.timer = self.timer.wrapping_add(1);
if self.timer % TIMER_QUOTIENT == 0 {
return Ok(VmRunOk::Timer);
}
}
}
}
/// Decode instruction operands
#[inline(always)]
unsafe fn decode<T: OpParam>(&mut self) -> T {
let pc1 = self.pc + 1;
let data = self
.memory
.load_prog_unchecked(pc1..pc1 + size_of::<T>())
.as_ptr()
.cast::<T>()
.read();
self.pc += 1 + size_of::<T>();
data
}
/// Perform binary operating over two registers
#[inline(always)]
unsafe fn binary_op<T: ValueVariant>(&mut self, op: impl Fn(T, T) -> T) {
let ParamBBB(tg, a0, a1) = self.decode();
self.write_reg(
tg,
op(self.read_reg(a0).cast::<T>(), self.read_reg(a1).cast::<T>()),
);
}
/// Perform binary operation over register and immediate
#[inline(always)]
unsafe fn binary_op_imm<T: ValueVariant>(&mut self, op: impl Fn(T, T) -> T) {
let ParamBBD(tg, reg, imm) = self.decode();
self.write_reg(
tg,
op(self.read_reg(reg).cast::<T>(), Value::from(imm).cast::<T>()),
);
}
/// Perform binary operation over register and shift immediate
#[inline(always)]
unsafe fn binary_op_ims<T: ValueVariant>(&mut self, op: impl Fn(T, u32) -> T) {
let ParamBBW(tg, reg, imm) = self.decode();
self.write_reg(tg, op(self.read_reg(reg).cast::<T>(), imm));
}
/// Jump at `#3` if ordering on `#0 <=> #1` is equal to expected
#[inline(always)]
unsafe fn cond_jmp<T: ValueVariant + Ord>(&mut self, expected: Ordering) {
let ParamBBD(a0, a1, ja) = self.decode();
if self
.read_reg(a0)
.cast::<T>()
.cmp(&self.read_reg(a1).cast::<T>())
== expected
{
self.pc = ja as usize;
}
}
/// Read register
#[inline(always)]
unsafe fn read_reg(&self, n: u8) -> Value {
*self.registers.get_unchecked(n as usize)
}
/// Write a register.
/// Writing to register 0 is no-op.
#[inline(always)]
unsafe fn write_reg(&mut self, n: u8, value: impl Into<Value>) {
if n != 0 {
*self.registers.get_unchecked_mut(n as usize) = value.into();
}
}
/// Load / Store Address check-computation überfunction
#[inline(always)]
unsafe fn ldst_addr_uber(
&self,
dst: u8,
base: u8,
offset: u64,
size: u16,
adder: u8,
) -> Result<u64, VmRunError> {
let reg = dst.checked_add(adder).ok_or(VmRunError::RegOutOfBounds)?;
if usize::from(reg) * 8 + usize::from(size) > 2048 {
Err(VmRunError::RegOutOfBounds)
} else {
self.read_reg(base)
.cast::<u64>()
.checked_add(offset)
.and_then(|x| x.checked_add(adder.into()))
.ok_or(VmRunError::AddrOutOfBounds)
}
}
} }
/// Virtual machine halt error /// Virtual machine halt error
@ -76,13 +467,13 @@ pub enum VmRunError {
InvalidOpcode(u8), InvalidOpcode(u8),
/// Unhandled load access exception /// Unhandled load access exception
LoadAccessEx(Address), LoadAccessEx(u64),
/// Unhandled instruction load access exception /// Unhandled instruction load access exception
ProgramFetchLoadEx(Address), ProgramFetchLoadEx(u64),
/// Unhandled store access exception /// Unhandled store access exception
StoreAccessEx(Address), StoreAccessEx(u64),
/// Register out-of-bounds access /// Register out-of-bounds access
RegOutOfBounds, RegOutOfBounds,
@ -106,3 +497,88 @@ pub enum VmRunOk {
/// Environment call /// Environment call
Ecall, Ecall,
} }
/// Load-store memory access
pub trait Memory {
/// Load data from memory on address
///
/// # Safety
/// - Shall not overrun the buffer
unsafe fn load(&mut self, addr: u64, target: *mut u8, count: usize) -> Result<(), LoadError>;
/// Store data to memory on address
///
/// # Safety
/// - Shall not overrun the buffer
unsafe fn store(
&mut self,
addr: u64,
source: *const u8,
count: usize,
) -> Result<(), StoreError>;
/// Fetch bytes from program section
///
/// # Why?
/// Even Holey Bytes programs operate with
/// single address space, the actual implementation
/// may be different, so for these reasons there is a
/// separate function.
///
/// Also if your memory implementation differentiates between
/// readable and executable memory, this is the way to distinguish
/// the loads.
///
/// # Notice for implementors
/// This is a hot function. This is called on each opcode fetch
/// and instruction decode. Inlining the implementation is highly
/// recommended!
///
/// If you utilise some more heavy memory implementation, consider
/// performing caching as HBVM does not do that for you.
///
/// Has to return all the requested data. If cannot fetch data of requested
/// length, return [`None`].
fn load_prog<I>(&mut self, index: I) -> Option<&I::Output>
where
I: SliceIndex<[u8]>;
/// Fetch bytes from program section, unchecked.
///
/// # Safety
/// You really have to be sure you get the bytes, got me?
unsafe fn load_prog_unchecked<I>(&mut self, index: I) -> &I::Output
where
I: SliceIndex<[u8]>;
}
/// Unhandled load access trap
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
#[display(fmt = "Load access error at address {_0:#x}")]
pub struct LoadError(pub u64);
/// Unhandled store access trap
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
#[display(fmt = "Store access error at address {_0:#x}")]
pub struct StoreError(pub u64);
/// Reason to access memory
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
pub enum MemoryAccessReason {
/// Memory was accessed for load (read)
Load,
/// Memory was accessed for store (write)
Store,
}
impl From<LoadError> for VmRunError {
fn from(value: LoadError) -> Self {
Self::LoadAccessEx(value.0)
}
}
impl From<StoreError> for VmRunError {
fn from(value: StoreError) -> Self {
Self::StoreAccessEx(value.0)
}
}

View file

@ -1,13 +1,8 @@
use hbvm::mem::Address;
use { use {
hbbytecode::valider::validate, hbbytecode::valider::validate,
hbvm::{ hbvm::{
mem::{ softpaging::{paging::PageTable, HandlePageFault, PageSize, SoftPagedMem},
softpaging::{paging::PageTable, HandlePageFault, PageSize, SoftPagedMem}, MemoryAccessReason, Vm,
MemoryAccessReason,
},
Vm,
}, },
std::io::{stdin, Read}, std::io::{stdin, Read},
}; };
@ -22,13 +17,12 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
} else { } else {
unsafe { unsafe {
let mut vm = Vm::<_, 0>::new( let mut vm = Vm::<_, 0>::new(
SoftPagedMem::<_, true> { SoftPagedMem {
pf_handler: TestTrapHandler, pf_handler: TestTrapHandler,
program: &prog, program: &prog,
root_pt: Box::into_raw(Default::default()), root_pt: Box::into_raw(Default::default()),
icache: Default::default(),
}, },
Address::new(4), 4,
); );
let data = { let data = {
let ptr = std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align_unchecked( let ptr = std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align_unchecked(
@ -43,8 +37,8 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
vm.memory vm.memory
.map( .map(
data, data,
Address::new(8192), 8192,
hbvm::mem::softpaging::paging::Permission::Write, hbvm::softpaging::paging::Permission::Write,
PageSize::Size4K, PageSize::Size4K,
) )
.unwrap(); .unwrap();
@ -52,12 +46,12 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Program interrupt: {:?}", vm.run()); println!("Program interrupt: {:?}", vm.run());
println!("{:?}", vm.registers); println!("{:?}", vm.registers);
println!("{:?}", core::slice::from_raw_parts(data, 4096));
std::alloc::dealloc( std::alloc::dealloc(
data, data,
std::alloc::Layout::from_size_align_unchecked(4096, 4096), std::alloc::Layout::from_size_align_unchecked(4096, 4096),
); );
vm.memory.unmap(Address::new(8192)).unwrap(); vm.memory.unmap(8192).unwrap();
let _ = Box::from_raw(vm.memory.root_pt);
} }
} }
Ok(()) Ok(())
@ -74,7 +68,7 @@ impl HandlePageFault for TestTrapHandler {
&mut self, &mut self,
_: MemoryAccessReason, _: MemoryAccessReason,
_: &mut PageTable, _: &mut PageTable,
_: Address, _: u64,
_: PageSize, _: PageSize,
_: *mut u8, _: *mut u8,
) -> bool { ) -> bool {

View file

@ -1,110 +0,0 @@
//! Virtual(?) memory address
use core::{fmt::Debug, ops};
use crate::utils::impl_display;
/// Memory address
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Address(u64);
impl Address {
/// A null address
pub const NULL: Self = Self(0);
/// Saturating integer addition. Computes self + rhs, saturating at the numeric bounds instead of overflowing.
#[inline]
pub fn saturating_add<T: AddressOp>(self, rhs: T) -> Self {
Self(self.0.saturating_add(rhs.cast_u64()))
}
/// Saturating integer subtraction. Computes self - rhs, saturating at the numeric bounds instead of overflowing.
#[inline]
pub fn saturating_sub<T: AddressOp>(self, rhs: T) -> Self {
Self(self.0.saturating_sub(rhs.cast_u64()))
}
/// Cast or if smaller, truncate to [`usize`]
pub fn truncate_usize(self) -> usize {
self.0 as _
}
/// Get inner value
#[inline(always)]
pub fn get(self) -> u64 {
self.0
}
/// Construct new address
#[inline(always)]
pub fn new(val: u64) -> Self {
Self(val)
}
/// Do something with inner value
#[inline(always)]
pub fn map(self, f: impl Fn(u64) -> u64) -> Self {
Self(f(self.0))
}
}
impl_display!(for Address =>
|Address(a)| "{a:0x}"
);
impl<T: AddressOp> ops::Add<T> for Address {
type Output = Self;
#[inline]
fn add(self, rhs: T) -> Self::Output {
Self(self.0.wrapping_add(rhs.cast_u64()))
}
}
impl<T: AddressOp> ops::Sub<T> for Address {
type Output = Self;
#[inline]
fn sub(self, rhs: T) -> Self::Output {
Self(self.0.wrapping_sub(rhs.cast_u64()))
}
}
impl<T: AddressOp> ops::AddAssign<T> for Address {
fn add_assign(&mut self, rhs: T) {
self.0 = self.0.wrapping_add(rhs.cast_u64())
}
}
impl<T: AddressOp> ops::SubAssign<T> for Address {
fn sub_assign(&mut self, rhs: T) {
self.0 = self.0.wrapping_sub(rhs.cast_u64())
}
}
impl From<Address> for u64 {
#[inline(always)]
fn from(value: Address) -> Self {
value.0
}
}
impl Debug for Address {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "[{:0x}]", self.0)
}
}
/// Can perform address operations with
pub trait AddressOp {
/// Cast to u64, truncating or extending
fn cast_u64(self) -> u64;
}
macro_rules! impl_address_ops(($($ty:ty),* $(,)?) => {
$(impl AddressOp for $ty {
#[inline(always)]
fn cast_u64(self) -> u64 { self as _ }
})*
});
impl_address_ops!(u8, u16, u32, u64, usize);

View file

@ -1,86 +0,0 @@
//! Memory implementations
pub mod softpaging;
mod addr;
pub use addr::Address;
use {crate::utils::impl_display, hbbytecode::ProgramVal};
/// Load-store memory access
pub trait Memory {
/// Load data from memory on address
///
/// # Safety
/// - Shall not overrun the buffer
unsafe fn load(
&mut self,
addr: Address,
target: *mut u8,
count: usize,
) -> Result<(), LoadError>;
/// Store data to memory on address
///
/// # Safety
/// - Shall not overrun the buffer
unsafe fn store(
&mut self,
addr: Address,
source: *const u8,
count: usize,
) -> Result<(), StoreError>;
/// Read from program memory to execute
///
/// # Safety
/// - Data read have to be valid
unsafe fn prog_read<T: ProgramVal>(&mut self, addr: Address) -> Option<T>;
/// Read from program memory to exectue
///
/// # Safety
/// - You have to be really sure that these bytes are there, understand?
unsafe fn prog_read_unchecked<T: ProgramVal>(&mut self, addr: Address) -> T;
}
/// Unhandled load access trap
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct LoadError(pub Address);
impl_display!(for LoadError =>
|LoadError(a)| "Load access error at address {a}",
);
/// Unhandled store access trap
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct StoreError(pub Address);
impl_display!(for StoreError =>
|StoreError(a)| "Load access error at address {a}",
);
/// Reason to access memory
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum MemoryAccessReason {
/// Memory was accessed for load (read)
Load,
/// Memory was accessed for store (write)
Store,
}
impl_display!(for MemoryAccessReason => match {
Self::Load => const "Load";
Self::Store => const "Store";
});
impl From<LoadError> for crate::VmRunError {
fn from(value: LoadError) -> Self {
Self::LoadAccessEx(value.0)
}
}
impl From<StoreError> for crate::VmRunError {
fn from(value: StoreError) -> Self {
Self::StoreAccessEx(value.0)
}
}

View file

@ -1,109 +0,0 @@
//! Program instruction cache
use crate::mem::Address;
use {
super::{lookup::AddrPageLookuper, paging::PageTable, PageSize},
core::{
mem::{size_of, MaybeUninit},
ptr::{copy_nonoverlapping, NonNull},
},
};
/// Instruction cache
#[derive(Clone, Debug)]
pub struct ICache {
/// Current page address base
base: Address,
/// Curent page pointer
data: Option<NonNull<u8>>,
/// Current page size
size: PageSize,
/// Address mask
mask: u64,
}
impl Default for ICache {
fn default() -> Self {
Self {
base: Address::NULL,
data: Default::default(),
size: PageSize::Size4K,
mask: Default::default(),
}
}
}
impl ICache {
/// Fetch instruction from cache
///
/// # Safety
/// `T` should be valid to read from instruction memory
pub(super) unsafe fn fetch<T>(
&mut self,
addr: Address,
root_pt: *const PageTable,
) -> Option<T> {
let mut ret = MaybeUninit::<T>::uninit();
let pbase = self
.data
.or_else(|| self.fetch_page(self.base + self.size, root_pt))?;
// Get address base
let base = addr.map(|x| x & self.mask);
// Base not matching, fetch anew
if base != self.base {
self.fetch_page(base, root_pt)?;
};
let offset = addr.get() & !self.mask;
let requ_size = size_of::<T>();
// Page overflow
let rem = (offset as usize)
.saturating_add(requ_size)
.saturating_sub(self.size as _);
let first_copy = requ_size.saturating_sub(rem);
// Copy non-overflowing part
copy_nonoverlapping(pbase.as_ptr(), ret.as_mut_ptr().cast::<u8>(), first_copy);
// Copy overflow
if rem != 0 {
let pbase = self.fetch_page(self.base + self.size, root_pt)?;
// Unlikely, unsupported scenario
if rem > self.size as _ {
return None;
}
copy_nonoverlapping(
pbase.as_ptr(),
ret.as_mut_ptr().cast::<u8>().add(first_copy),
rem,
);
}
Some(ret.assume_init())
}
/// Fetch a page
unsafe fn fetch_page(&mut self, addr: Address, pt: *const PageTable) -> Option<NonNull<u8>> {
let res = AddrPageLookuper::new(addr, 0, pt).next()?.ok()?;
if !super::perm_check::executable(res.perm) {
return None;
}
(self.size, self.mask) = match res.size {
4096 => (PageSize::Size4K, !((1 << 8) - 1)),
2097152 => (PageSize::Size2M, !((1 << (8 * 2)) - 1)),
1073741824 => (PageSize::Size1G, !((1 << (8 * 3)) - 1)),
_ => return None,
};
self.data = Some(NonNull::new(res.ptr)?);
self.base = addr.map(|x| x & self.mask);
self.data
}
}

View file

@ -1,7 +1,5 @@
//! Address lookup //! Address lookup
use crate::mem::addr::Address;
use super::{ use super::{
addr_extract_index, addr_extract_index,
paging::{PageTable, Permission}, paging::{PageTable, Permission},
@ -11,7 +9,7 @@ use super::{
/// Good result from address split /// Good result from address split
pub struct AddrPageLookupOk { pub struct AddrPageLookupOk {
/// Virtual address /// Virtual address
pub vaddr: Address, pub vaddr: u64,
/// Pointer to the start for perform operation /// Pointer to the start for perform operation
pub ptr: *mut u8, pub ptr: *mut u8,
@ -26,7 +24,7 @@ pub struct AddrPageLookupOk {
/// Errornous address split result /// Errornous address split result
pub struct AddrPageLookupError { pub struct AddrPageLookupError {
/// Address of failure /// Address of failure
pub addr: Address, pub addr: u64,
/// Requested page size /// Requested page size
pub size: PageSize, pub size: PageSize,
@ -35,7 +33,7 @@ pub struct AddrPageLookupError {
/// Address splitter into pages /// Address splitter into pages
pub struct AddrPageLookuper { pub struct AddrPageLookuper {
/// Current address /// Current address
addr: Address, addr: u64,
/// Size left /// Size left
size: usize, size: usize,
@ -47,7 +45,7 @@ pub struct AddrPageLookuper {
impl AddrPageLookuper { impl AddrPageLookuper {
/// Create a new page lookuper /// Create a new page lookuper
#[inline] #[inline]
pub const fn new(addr: Address, size: usize, pagetable: *const PageTable) -> Self { pub const fn new(addr: u64, size: usize, pagetable: *const PageTable) -> Self {
Self { Self {
addr, addr,
size, size,
@ -57,7 +55,7 @@ impl AddrPageLookuper {
/// Bump address by size X /// Bump address by size X
pub fn bump(&mut self, page_size: PageSize) { pub fn bump(&mut self, page_size: PageSize) {
self.addr += page_size; self.addr += page_size as u64;
self.size = self.size.saturating_sub(page_size as _); self.size = self.size.saturating_sub(page_size as _);
} }
} }
@ -113,7 +111,7 @@ impl Iterator for AddrPageLookuper {
}; };
// Get available byte count in the selected page with offset // Get available byte count in the selected page with offset
let avail = (size as usize).saturating_sub(offset).clamp(0, self.size); let avail = (size as usize - offset).clamp(0, self.size);
self.bump(size); self.bump(size);
Some(Ok(AddrPageLookupOk { Some(Ok(AddrPageLookupOk {

View file

@ -1,7 +1,5 @@
//! Automatic memory mapping //! Automatic memory mapping
use crate::{mem::addr::Address, utils::impl_display};
use { use {
super::{ super::{
addr_extract_index, addr_extract_index,
@ -9,9 +7,10 @@ use {
PageSize, SoftPagedMem, PageSize, SoftPagedMem,
}, },
alloc::boxed::Box, alloc::boxed::Box,
derive_more::Display,
}; };
impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> { impl<'p, A> SoftPagedMem<'p, A> {
/// Maps host's memory into VM's memory /// Maps host's memory into VM's memory
/// ///
/// # Safety /// # Safety
@ -21,7 +20,7 @@ impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> {
pub unsafe fn map( pub unsafe fn map(
&mut self, &mut self,
host: *mut u8, host: *mut u8,
target: Address, target: u64,
perm: Permission, perm: Permission,
pagesize: PageSize, pagesize: PageSize,
) -> Result<(), MapError> { ) -> Result<(), MapError> {
@ -83,7 +82,7 @@ impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> {
/// ///
/// If errors, it only means there is no entry to unmap and in most cases /// If errors, it only means there is no entry to unmap and in most cases
/// just should be ignored. /// just should be ignored.
pub fn unmap(&mut self, addr: Address) -> Result<(), NothingToUnmap> { pub fn unmap(&mut self, addr: u64) -> Result<(), NothingToUnmap> {
let mut current_pt = self.root_pt; let mut current_pt = self.root_pt;
let mut page_tables = [core::ptr::null_mut(); 5]; let mut page_tables = [core::ptr::null_mut(); 5];
@ -142,25 +141,22 @@ impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> {
} }
/// Error mapping /// Error mapping
#[derive(Clone, Copy, Debug, PartialEq, Eq)] #[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
pub enum MapError { pub enum MapError {
/// Entry was already mapped /// Entry was already mapped
#[display(fmt = "There is already a page mapped on specified address")]
AlreadyMapped, AlreadyMapped,
/// When walking a page entry was /// When walking a page entry was
/// encounterd. /// encounterd.
#[display(fmt = "There was a page mapped on the way instead of node")]
PageOnNode, PageOnNode,
} }
impl_display!(for MapError => match {
Self::AlreadyMapped => "There is already a page mapped on specified address";
Self::PageOnNode => "There was a page mapped on the way instead of node";
});
/// There was no entry in page table to unmap /// There was no entry in page table to unmap
/// ///
/// No worry, don't panic, nothing bad has happened, /// No worry, don't panic, nothing bad has happened,
/// but if you are 120% sure there should be something, /// but if you are 120% sure there should be something,
/// double-check your addresses. /// double-check your addresses.
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Display, Debug)]
#[display(fmt = "There was no entry to unmap")]
pub struct NothingToUnmap; pub struct NothingToUnmap;
impl_display!(for NothingToUnmap => "There is no entry to unmap");

View file

@ -1,6 +1,5 @@
//! Platform independent, software paged memory implementation //! Platform independent, software paged memory implementation
pub mod icache;
pub mod lookup; pub mod lookup;
pub mod paging; pub mod paging;
@ -8,43 +7,29 @@ pub mod paging;
pub mod mapping; pub mod mapping;
use { use {
super::{addr::Address, LoadError, Memory, MemoryAccessReason, StoreError}, self::lookup::{AddrPageLookupError, AddrPageLookupOk, AddrPageLookuper},
core::mem::size_of, super::{LoadError, Memory, MemoryAccessReason, StoreError},
icache::ICache, core::slice::SliceIndex,
lookup::{AddrPageLookupError, AddrPageLookupOk, AddrPageLookuper},
paging::{PageTable, Permission}, paging::{PageTable, Permission},
}; };
/// HoleyBytes software paged memory /// HoleyBytes software paged memory
///
/// - `OUT_PROG_EXEC`: set to `false` to disable executing program
/// not contained in initially provided program, even the pages
/// are executable
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct SoftPagedMem<'p, PfH, const OUT_PROG_EXEC: bool = true> { pub struct SoftPagedMem<'p, PfH> {
/// Root page table /// Root page table
pub root_pt: *mut PageTable, pub root_pt: *mut PageTable,
/// Page fault handler /// Page fault handler
pub pf_handler: PfH, pub pf_handler: PfH,
/// Program memory segment /// Program memory segment
pub program: &'p [u8], pub program: &'p [u8],
/// Program instruction cache
pub icache: ICache,
} }
impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> Memory impl<'p, PfH: HandlePageFault> Memory for SoftPagedMem<'p, PfH> {
for SoftPagedMem<'p, PfH, OUT_PROG_EXEC>
{
/// Load value from an address /// Load value from an address
/// ///
/// # Safety /// # Safety
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`] /// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
unsafe fn load( unsafe fn load(&mut self, addr: u64, target: *mut u8, count: usize) -> Result<(), LoadError> {
&mut self,
addr: Address,
target: *mut u8,
count: usize,
) -> Result<(), LoadError> {
self.memory_access( self.memory_access(
MemoryAccessReason::Load, MemoryAccessReason::Load,
addr, addr,
@ -62,7 +47,7 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> Memory
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`] /// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
unsafe fn store( unsafe fn store(
&mut self, &mut self,
addr: Address, addr: u64,
source: *const u8, source: *const u8,
count: usize, count: usize,
) -> Result<(), StoreError> { ) -> Result<(), StoreError> {
@ -77,36 +62,26 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> Memory
.map_err(StoreError) .map_err(StoreError)
} }
/// Fetch slice from program memory section
#[inline(always)] #[inline(always)]
unsafe fn prog_read<T>(&mut self, addr: Address) -> Option<T> { fn load_prog<I>(&mut self, index: I) -> Option<&I::Output>
if OUT_PROG_EXEC && addr.truncate_usize() > self.program.len() { where
return self.icache.fetch::<T>(addr, self.root_pt); I: SliceIndex<[u8]>,
} {
self.program.get(index)
let addr = addr.truncate_usize();
self.program
.get(addr..addr + size_of::<T>())
.map(|x| x.as_ptr().cast::<T>().read())
} }
/// Fetch slice from program memory section, unchecked!
#[inline(always)] #[inline(always)]
unsafe fn prog_read_unchecked<T>(&mut self, addr: Address) -> T { unsafe fn load_prog_unchecked<I>(&mut self, index: I) -> &I::Output
if OUT_PROG_EXEC && addr.truncate_usize() > self.program.len() { where
return self I: SliceIndex<[u8]>,
.icache {
.fetch::<T>(addr, self.root_pt) self.program.get_unchecked(index)
.unwrap_or_else(|| core::mem::zeroed());
}
self.program
.as_ptr()
.add(addr.truncate_usize())
.cast::<T>()
.read()
} }
} }
impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, PfH, OUT_PROG_EXEC> { impl<'p, PfH: HandlePageFault> SoftPagedMem<'p, PfH> {
// Everyone behold, the holy function, the god of HBVM memory accesses! // Everyone behold, the holy function, the god of HBVM memory accesses!
/// Split address to pages, check their permissions and feed pointers with offset /// Split address to pages, check their permissions and feed pointers with offset
@ -117,32 +92,32 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, PfH,
fn memory_access( fn memory_access(
&mut self, &mut self,
reason: MemoryAccessReason, reason: MemoryAccessReason,
src: Address, src: u64,
mut dst: *mut u8, mut dst: *mut u8,
len: usize, len: usize,
permission_check: fn(Permission) -> bool, permission_check: fn(Permission) -> bool,
action: fn(*mut u8, *mut u8, usize), action: fn(*mut u8, *mut u8, usize),
) -> Result<(), Address> { ) -> Result<(), u64> {
// Memory load from program section // Memory load from program section
let (src, len) = if src.truncate_usize() < self.program.len() as _ { let (src, len) = if src < self.program.len() as _ {
// Allow only loads // Allow only loads
if reason != MemoryAccessReason::Load { if reason != MemoryAccessReason::Load {
return Err(src); return Err(src);
} }
// Determine how much data to copy from here // Determine how much data to copy from here
let to_copy = len.clamp(0, self.program.len().saturating_sub(src.truncate_usize())); let to_copy = len.clamp(0, self.program.len().saturating_sub(src as _));
// Perform action // Perform action
action( action(
unsafe { self.program.as_ptr().add(src.truncate_usize()).cast_mut() }, unsafe { self.program.as_ptr().add(src as _).cast_mut() },
dst, dst,
to_copy, to_copy,
); );
// Return shifted from what we've already copied // Return shifted from what we've already copied
( (
src.saturating_add(to_copy as u64), src.saturating_add(to_copy as _),
len.saturating_sub(to_copy), len.saturating_sub(to_copy),
) )
} else { } else {
@ -203,9 +178,8 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, PfH,
/// ///
/// The level shall not be larger than 4, otherwise /// The level shall not be larger than 4, otherwise
/// the output of the function is unspecified (yes, it can also panic :) /// the output of the function is unspecified (yes, it can also panic :)
pub fn addr_extract_index(addr: Address, lvl: u8) -> usize { pub fn addr_extract_index(addr: u64, lvl: u8) -> usize {
debug_assert!(lvl <= 4); debug_assert!(lvl <= 4);
let addr = addr.get();
usize::try_from((addr >> (lvl * 8 + 12)) & ((1 << 8) - 1)).expect("?conradluget a better CPU") usize::try_from((addr >> (lvl * 8 + 12)) & ((1 << 8) - 1)).expect("?conradluget a better CPU")
} }
@ -234,22 +208,6 @@ impl PageSize {
} }
} }
impl core::ops::Add<PageSize> for Address {
type Output = Self;
#[inline(always)]
fn add(self, rhs: PageSize) -> Self::Output {
self + (rhs as u64)
}
}
impl core::ops::AddAssign<PageSize> for Address {
#[inline(always)]
fn add_assign(&mut self, rhs: PageSize) {
*self = Self::new(self.get().wrapping_add(rhs as u64));
}
}
/// Permisison checks /// Permisison checks
pub mod perm_check { pub mod perm_check {
use super::paging::Permission; use super::paging::Permission;
@ -287,7 +245,7 @@ pub trait HandlePageFault {
&mut self, &mut self,
reason: MemoryAccessReason, reason: MemoryAccessReason,
pagetable: &mut PageTable, pagetable: &mut PageTable,
vaddr: Address, vaddr: u64,
size: PageSize, size: PageSize,
dataptr: *mut u8, dataptr: *mut u8,
) -> bool ) -> bool

View file

@ -1,53 +0,0 @@
macro_rules! impl_display {
(for $ty:ty => $(|$selfty:pat_param|)? $fmt:literal $(, $($param:expr),+)? $(,)?) => {
impl ::core::fmt::Display for $ty {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
$(let $selfty = self;)?
write!(f, $fmt, $($param),*)
}
}
};
(for $ty:ty => $str:literal) => {
impl ::core::fmt::Display for $ty {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
f.write_str($str)
}
}
};
(for $ty:ty => match {$(
$bind:pat => $($const:ident)? $fmt:literal $(,$($params:tt)*)?;
)*}) => {
impl ::core::fmt::Display for $ty {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
match self {
$(
$bind => $crate::utils::internal::impl_display_match_fragment!($($const,)? f, $fmt $(, $($params)*)?)
),*
}
}
}
}
}
#[doc(hidden)]
pub(crate) mod internal {
macro_rules! impl_display_match_fragment {
(const, $f:expr, $lit:literal) => {
$f.write_str($lit)
};
($f:expr, $fmt:literal $(, $($params:tt)*)?) => {
write!($f, $fmt, $($($params)*)?)
};
}
pub(crate) use impl_display_match_fragment;
}
macro_rules! static_assert_eq(($l:expr, $r:expr $(,)?) => {
const _: [(); ($l != $r) as usize] = [];
});
pub(crate) use {impl_display, static_assert_eq};

View file

@ -1,5 +1,7 @@
//! HoleyBytes register value definition //! HoleyBytes register value definition
use sealed::sealed;
/// Define [`Value`] union /// Define [`Value`] union
/// ///
/// # Safety /// # Safety
@ -26,12 +28,12 @@ macro_rules! value_def {
} }
} }
crate::utils::static_assert_eq!( static_assertions::const_assert_eq!(
core::mem::size_of::<$ty>(), core::mem::size_of::<$ty>(),
core::mem::size_of::<Value>(), core::mem::size_of::<Value>(),
); );
impl private::Sealed for $ty {} #[sealed]
unsafe impl ValueVariant for $ty {} unsafe impl ValueVariant for $ty {}
)* )*
}; };
@ -42,13 +44,13 @@ impl Value {
#[inline] #[inline]
pub fn cast<V: ValueVariant>(self) -> V { pub fn cast<V: ValueVariant>(self) -> V {
/// Evil. /// Evil.
/// ///
/// Transmute cannot be performed with generic type /// Transmute cannot be performed with generic type
/// as size is unknown, so union is used. /// as size is unknown, so union is used.
/// ///
/// # Safety /// # Safety
/// If [`ValueVariant`] implemented correctly, it's fine :) /// If [`ValueVariant`] implemented correctly, it's fine :)
/// ///
/// :ferrisClueless: /// :ferrisClueless:
union Transmute<Variant: ValueVariant> { union Transmute<Variant: ValueVariant> {
/// Self /// Self
@ -63,14 +65,11 @@ impl Value {
/// # Safety /// # Safety
/// - N/A, not to be implemented manually /// - N/A, not to be implemented manually
pub unsafe trait ValueVariant: private::Sealed + Copy + Into<Value> {} #[sealed]
pub unsafe trait ValueVariant: Copy + Into<Value> {}
mod private {
pub trait Sealed {}
}
value_def!(u64, i64, f64); value_def!(u64, i64, f64);
crate::utils::static_assert_eq!(core::mem::size_of::<Value>(), 8); static_assertions::const_assert_eq!(core::mem::size_of::<Value>(), 8);
impl core::fmt::Debug for Value { impl core::fmt::Debug for Value {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {

View file

@ -1,409 +0,0 @@
//! Welcome to the land of The Great Dispatch Loop
//!
//! Have fun
use crate::mem::Address;
use {
super::{
bmc::BlockCopier,
mem::Memory,
value::{Value, ValueVariant},
Vm, VmRunError, VmRunOk,
},
core::{cmp::Ordering, mem::size_of, ops},
hbbytecode::{
ParamBB, ParamBBB, ParamBBBB, ParamBBD, ParamBBDH, ParamBBW, ParamBD, ProgramVal,
},
};
impl<Mem, const TIMER_QUOTIENT: usize> Vm<Mem, TIMER_QUOTIENT>
where
Mem: Memory,
{
/// Execute program
///
/// Program can return [`VmRunError`] if a trap handling failed
#[cfg_attr(feature = "nightly", repr(align(4096)))]
pub fn run(&mut self) -> Result<VmRunOk, VmRunError> {
use hbbytecode::opcode::*;
loop {
// Big match
//
// Contribution guide:
// - Zero register shall never be overwitten. It's value has to always be 0.
// - Prefer `Self::read_reg` and `Self::write_reg` functions
// - Extract parameters using `param!` macro
// - Prioritise speed over code size
// - Memory is cheap, CPUs not that much
// - Do not heap allocate at any cost
// - Yes, user-provided trap handler may allocate,
// but that is not our »fault«.
// - Unsafe is kinda must, but be sure you have validated everything
// - Your contributions have to pass sanitizers and Miri
// - Strictly follow the spec
// - The spec does not specify how you perform actions, in what order,
// just that the observable effects have to be performed in order and
// correctly.
// - Yes, we assume you run 64 bit CPU. Else ?conradluget a better CPU
// sorry 8 bit fans, HBVM won't run on your Speccy :(
unsafe {
match self
.memory
.prog_read::<u8>(self.pc as _)
.ok_or(VmRunError::ProgramFetchLoadEx(self.pc as _))?
{
UN => {
self.decode::<()>();
return Err(VmRunError::Unreachable);
}
TX => {
self.decode::<()>();
return Ok(VmRunOk::End);
}
NOP => self.decode::<()>(),
ADD => self.binary_op(u64::wrapping_add),
SUB => self.binary_op(u64::wrapping_sub),
MUL => self.binary_op(u64::wrapping_mul),
AND => self.binary_op::<u64>(ops::BitAnd::bitand),
OR => self.binary_op::<u64>(ops::BitOr::bitor),
XOR => self.binary_op::<u64>(ops::BitXor::bitxor),
SL => self.binary_op(|l, r| u64::wrapping_shl(l, r as u32)),
SR => self.binary_op(|l, r| u64::wrapping_shr(l, r as u32)),
SRS => self.binary_op(|l, r| i64::wrapping_shl(l, r as u32)),
CMP => {
// Compare a0 <=> a1
// < → 0
// > → 1
// = → 2
let ParamBBB(tg, a0, a1) = self.decode();
self.write_reg(
tg,
self.read_reg(a0)
.cast::<i64>()
.cmp(&self.read_reg(a1).cast::<i64>())
as i64
+ 1,
);
}
CMPU => {
// Unsigned comparsion
let ParamBBB(tg, a0, a1) = self.decode();
self.write_reg(
tg,
self.read_reg(a0)
.cast::<u64>()
.cmp(&self.read_reg(a1).cast::<u64>())
as i64
+ 1,
);
}
NOT => {
// Logical negation
let ParamBB(tg, a0) = self.decode();
self.write_reg(tg, !self.read_reg(a0).cast::<u64>());
}
NEG => {
// Bitwise negation
let ParamBB(tg, a0) = self.decode();
self.write_reg(
tg,
match self.read_reg(a0).cast::<u64>() {
0 => 1_u64,
_ => 0,
},
);
}
DIR => {
// Fused Division-Remainder
let ParamBBBB(dt, rt, a0, a1) = self.decode();
let a0 = self.read_reg(a0).cast::<u64>();
let a1 = self.read_reg(a1).cast::<u64>();
self.write_reg(dt, a0.checked_div(a1).unwrap_or(u64::MAX));
self.write_reg(rt, a0.checked_rem(a1).unwrap_or(u64::MAX));
}
ADDI => self.binary_op_imm(u64::wrapping_add),
MULI => self.binary_op_imm(u64::wrapping_sub),
ANDI => self.binary_op_imm::<u64>(ops::BitAnd::bitand),
ORI => self.binary_op_imm::<u64>(ops::BitOr::bitor),
XORI => self.binary_op_imm::<u64>(ops::BitXor::bitxor),
SLI => self.binary_op_ims(u64::wrapping_shl),
SRI => self.binary_op_ims(u64::wrapping_shr),
SRSI => self.binary_op_ims(i64::wrapping_shr),
CMPI => {
let ParamBBD(tg, a0, imm) = self.decode();
self.write_reg(
tg,
self.read_reg(a0)
.cast::<i64>()
.cmp(&Value::from(imm).cast::<i64>())
as i64,
);
}
CMPUI => {
let ParamBBD(tg, a0, imm) = self.decode();
self.write_reg(tg, self.read_reg(a0).cast::<u64>().cmp(&imm) as i64);
}
CP => {
let ParamBB(tg, a0) = self.decode();
self.write_reg(tg, self.read_reg(a0));
}
SWA => {
// Swap registers
let ParamBB(r0, r1) = self.decode();
match (r0, r1) {
(0, 0) => (),
(dst, 0) | (0, dst) => self.write_reg(dst, 0_u64),
(r0, r1) => {
core::ptr::swap(
self.registers.get_unchecked_mut(usize::from(r0)),
self.registers.get_unchecked_mut(usize::from(r1)),
);
}
}
}
LI => {
let ParamBD(tg, imm) = self.decode();
self.write_reg(tg, imm);
}
LD => {
// Load. If loading more than register size, continue on adjecent registers
let ParamBBDH(dst, base, off, count) = self.decode();
let n: u8 = match dst {
0 => 1,
_ => 0,
};
self.memory.load(
self.ldst_addr_uber(dst, base, off, count, n)?,
self.registers
.as_mut_ptr()
.add(usize::from(dst) + usize::from(n))
.cast(),
usize::from(count).saturating_sub(n.into()),
)?;
}
ST => {
// Store. Same rules apply as to LD
let ParamBBDH(dst, base, off, count) = self.decode();
self.memory.store(
self.ldst_addr_uber(dst, base, off, count, 0)?,
self.registers.as_ptr().add(usize::from(dst)).cast(),
count.into(),
)?;
}
BMC => {
// Block memory copy
match if let Some(copier) = &mut self.copier {
// There is some copier, poll.
copier.poll(&mut self.memory)
} else {
// There is none, make one!
let ParamBBD(src, dst, count) = self.decode();
// So we are still on BMC on next cycle
self.pc -= size_of::<ParamBBD>() + 1;
self.copier = Some(BlockCopier::new(
Address::new(self.read_reg(src).cast()),
Address::new(self.read_reg(dst).cast()),
count as _,
));
self.copier
.as_mut()
.unwrap_unchecked() // SAFETY: We just assigned there
.poll(&mut self.memory)
} {
// We are done, shift program counter
core::task::Poll::Ready(Ok(())) => {
self.copier = None;
self.pc += size_of::<ParamBBD>() + 1;
}
// Error, shift program counter (for consistency)
// and yield error
core::task::Poll::Ready(Err(e)) => {
self.pc += size_of::<ParamBBD>() + 1;
return Err(e.into());
}
// Not done yet, proceed to next cycle
core::task::Poll::Pending => (),
}
}
BRC => {
// Block register copy
let ParamBBB(src, dst, count) = self.decode();
if src.checked_add(count).is_none() || dst.checked_add(count).is_none() {
return Err(VmRunError::RegOutOfBounds);
}
core::ptr::copy(
self.registers.get_unchecked(usize::from(src)),
self.registers.get_unchecked_mut(usize::from(dst)),
usize::from(count),
);
}
JAL => {
// Jump and link. Save PC after this instruction to
// specified register and jump to reg + offset.
let ParamBBD(save, reg, offset) = self.decode();
self.write_reg(save, self.pc.get());
self.pc =
Address::new(self.read_reg(reg).cast::<u64>().saturating_add(offset));
}
// Conditional jumps, jump only to immediates
JEQ => self.cond_jmp::<u64>(Ordering::Equal),
JNE => {
let ParamBBD(a0, a1, jt) = self.decode();
if self.read_reg(a0).cast::<u64>() != self.read_reg(a1).cast::<u64>() {
self.pc = Address::new(jt);
}
}
JLT => self.cond_jmp::<u64>(Ordering::Less),
JGT => self.cond_jmp::<u64>(Ordering::Greater),
JLTU => self.cond_jmp::<i64>(Ordering::Less),
JGTU => self.cond_jmp::<i64>(Ordering::Greater),
ECALL => {
self.decode::<()>();
// So we don't get timer interrupt after ECALL
if TIMER_QUOTIENT != 0 {
self.timer = self.timer.wrapping_add(1);
}
return Ok(VmRunOk::Ecall);
}
ADDF => self.binary_op::<f64>(ops::Add::add),
SUBF => self.binary_op::<f64>(ops::Sub::sub),
MULF => self.binary_op::<f64>(ops::Mul::mul),
DIRF => {
let ParamBBBB(dt, rt, a0, a1) = self.decode();
let a0 = self.read_reg(a0).cast::<f64>();
let a1 = self.read_reg(a1).cast::<f64>();
self.write_reg(dt, a0 / a1);
self.write_reg(rt, a0 % a1);
}
FMAF => {
let ParamBBBB(dt, a0, a1, a2) = self.decode();
self.write_reg(
dt,
self.read_reg(a0).cast::<f64>() * self.read_reg(a1).cast::<f64>()
+ self.read_reg(a2).cast::<f64>(),
);
}
NEGF => {
let ParamBB(dt, a0) = self.decode();
self.write_reg(dt, -self.read_reg(a0).cast::<f64>());
}
ITF => {
let ParamBB(dt, a0) = self.decode();
self.write_reg(dt, self.read_reg(a0).cast::<i64>() as f64);
}
FTI => {
let ParamBB(dt, a0) = self.decode();
self.write_reg(dt, self.read_reg(a0).cast::<f64>() as i64);
}
ADDFI => self.binary_op_imm::<f64>(ops::Add::add),
MULFI => self.binary_op_imm::<f64>(ops::Mul::mul),
op => return Err(VmRunError::InvalidOpcode(op)),
}
}
if TIMER_QUOTIENT != 0 {
self.timer = self.timer.wrapping_add(1);
if self.timer % TIMER_QUOTIENT == 0 {
return Ok(VmRunOk::Timer);
}
}
}
}
/// Decode instruction operands
#[inline(always)]
unsafe fn decode<T: ProgramVal>(&mut self) -> T {
let pc1 = self.pc + 1_u64;
let data = self.memory.prog_read_unchecked::<T>(pc1 as _);
self.pc += 1 + size_of::<T>();
data
}
/// Perform binary operating over two registers
#[inline(always)]
unsafe fn binary_op<T: ValueVariant>(&mut self, op: impl Fn(T, T) -> T) {
let ParamBBB(tg, a0, a1) = self.decode();
self.write_reg(
tg,
op(self.read_reg(a0).cast::<T>(), self.read_reg(a1).cast::<T>()),
);
}
/// Perform binary operation over register and immediate
#[inline(always)]
unsafe fn binary_op_imm<T: ValueVariant>(&mut self, op: impl Fn(T, T) -> T) {
let ParamBBD(tg, reg, imm) = self.decode();
self.write_reg(
tg,
op(self.read_reg(reg).cast::<T>(), Value::from(imm).cast::<T>()),
);
}
/// Perform binary operation over register and shift immediate
#[inline(always)]
unsafe fn binary_op_ims<T: ValueVariant>(&mut self, op: impl Fn(T, u32) -> T) {
let ParamBBW(tg, reg, imm) = self.decode();
self.write_reg(tg, op(self.read_reg(reg).cast::<T>(), imm));
}
/// Jump at `#3` if ordering on `#0 <=> #1` is equal to expected
#[inline(always)]
unsafe fn cond_jmp<T: ValueVariant + Ord>(&mut self, expected: Ordering) {
let ParamBBD(a0, a1, ja) = self.decode();
if self
.read_reg(a0)
.cast::<T>()
.cmp(&self.read_reg(a1).cast::<T>())
== expected
{
self.pc = Address::new(ja);
}
}
/// Read register
#[inline(always)]
unsafe fn read_reg(&self, n: u8) -> Value {
*self.registers.get_unchecked(n as usize)
}
/// Write a register.
/// Writing to register 0 is no-op.
#[inline(always)]
unsafe fn write_reg(&mut self, n: u8, value: impl Into<Value>) {
if n != 0 {
*self.registers.get_unchecked_mut(n as usize) = value.into();
}
}
/// Load / Store Address check-computation überfunction
#[inline(always)]
unsafe fn ldst_addr_uber(
&self,
dst: u8,
base: u8,
offset: u64,
size: u16,
adder: u8,
) -> Result<Address, VmRunError> {
let reg = dst.checked_add(adder).ok_or(VmRunError::RegOutOfBounds)?;
if usize::from(reg) * 8 + usize::from(size) > 2048 {
Err(VmRunError::RegOutOfBounds)
} else {
self.read_reg(base)
.cast::<u64>()
.checked_add(offset)
.and_then(|x| x.checked_add(adder.into()))
.ok_or(VmRunError::AddrOutOfBounds)
.map(Address::new)
}
}
}

50
spec.md
View file

@ -56,7 +56,7 @@
| 7 | OR | Bitor | | 7 | OR | Bitor |
| 8 | XOR | Bitxor | | 8 | XOR | Bitxor |
| 9 | SL | Unsigned left bitshift | | 9 | SL | Unsigned left bitshift |
| 10 | SR | Unsigned right bitshift | | 10 | SR | Unsigned right bitshift |
| 11 | SRS | Signed right bitshift | | 11 | SRS | Signed right bitshift |
### Comparsion ### Comparsion
@ -68,9 +68,9 @@
#### Comparsion table #### Comparsion table
| #1 *op* #2 | Result | | #1 *op* #2 | Result |
|:----------:|:------:| |:----------:|:------:|
| < | 0 | | < | -1 |
| = | 1 | | = | 0 |
| > | 2 | | > | 1 |
### Division-remainder ### Division-remainder
- Type BBBB - Type BBBB
@ -179,17 +179,11 @@
## Control flow ## Control flow
### Unconditional jump ### Unconditional jump
- Type D
| Opcode | Name | Action |
|:------:|:----:|:-------------------------------:|
| 34 | JMP | Unconditional, non-linking jump |
### Unconditional linking jump
- Type BBD - Type BBD
| Opcode | Name | Action | | Opcode | Name | Action |
|:------:|:----:|:--------------------------------------------------:| |:------:|:----:|:--------------------------------------------------:|
| 35 | JAL | Save PC past JAL to `#0` and jump at `#1 + imm #2` | | 34 | JAL | Save PC past JAL to `#0` and jump at `#1 + imm #2` |
### Conditional jumps ### Conditional jumps
- Type BBD - Type BBD
@ -197,19 +191,19 @@
| Opcode | Name | Comparsion | | Opcode | Name | Comparsion |
|:------:|:----:|:------------:| |:------:|:----:|:------------:|
| 36 | JEQ | = | | 35 | JEQ | = |
| 37 | JNE | ≠ | | 36 | JNE | ≠ |
| 38 | JLT | < (signed) | | 37 | JLT | < (signed) |
| 39 | JGT | > (signed) | | 38 | JGT | > (signed) |
| 40 | JLTU | < (unsigned) | | 39 | JLTU | < (unsigned) |
| 41 | JGTU | > (unsigned) | | 40 | JGTU | > (unsigned) |
### Environment call ### Environment call
- Type N - Type N
| Opcode | Name | Action | | Opcode | Name | Action |
|:------:|:-----:|:-------------------------------------:| |:------:|:-----:|:-------------------------------------:|
| 42 | ECALL | Cause an trap to the host environment | | 41 | ECALL | Cause an trap to the host environment |
## Floating point operations ## Floating point operations
- Type BBB - Type BBB
@ -217,29 +211,29 @@
| Opcode | Name | Action | | Opcode | Name | Action |
|:------:|:----:|:--------------:| |:------:|:----:|:--------------:|
| 43 | ADDF | Addition | | 42 | ADDF | Addition |
| 44 | SUBF | Subtraction | | 43 | SUBF | Subtraction |
| 45 | MULF | Multiplication | | 44 | MULF | Multiplication |
### Division-remainder ### Division-remainder
- Type BBBB - Type BBBB
| Opcode | Name | Action | | Opcode | Name | Action |
|:------:|:----:|:-------------------------:| |:------:|:----:|:-------------------------:|
| 46 | DIRF | Same as for integer `DIR` | | 45 | DIRF | Same as for integer `DIR` |
### Fused Multiply-Add ### Fused Multiply-Add
- Type BBBB - Type BBBB
| Opcode | Name | Action | | Opcode | Name | Action |
|:------:|:----:|:---------------------:| |:------:|:----:|:---------------------:|
| 47 | FMAF | `#0 ← (#1 * #2) + #3` | | 46 | FMAF | `#0 ← (#1 * #2) + #3` |
### Negation ### Negation
- Type BB - Type BB
| Opcode | Name | Action | | Opcode | Name | Action |
|:------:|:----:|:----------:| |:------:|:----:|:----------:|
| 48 | NEGF | `#0 ← -#1` | | 47 | NEGF | `#0 ← -#1` |
### Conversion ### Conversion
- Type BB - Type BB
@ -248,8 +242,8 @@
| Opcode | Name | Action | | Opcode | Name | Action |
|:------:|:----:|:------------:| |:------:|:----:|:------------:|
| 49 | ITF | Int to Float | | 48 | ITF | Int to Float |
| 50 | FTI | Float to Int | | 49 | FTI | Float to Int |
## Floating point immediate operations ## Floating point immediate operations
- Type BBD - Type BBD
@ -257,8 +251,8 @@
| Opcode | Name | Action | | Opcode | Name | Action |
|:------:|:-----:|:--------------:| |:------:|:-----:|:--------------:|
| 51 | ADDFI | Addition | | 50 | ADDFI | Addition |
| 52 | MULFI | Multiplication | | 51 | MULFI | Multiplication |
# Registers # Registers
- There is 255 registers + one zero register (with index 0) - There is 255 registers + one zero register (with index 0)