From 6268c9677666ebca54d041ca7238af9887bd4b4b Mon Sep 17 00:00:00 2001 From: Erin Date: Wed, 9 Aug 2023 02:33:03 +0200 Subject: [PATCH 1/9] Von-Neumann? --- hbvm/fuzz/Cargo.toml | 3 + hbvm/fuzz/fuzz_targets/vm.rs | 34 +++++++---- hbvm/src/lib.rs | 107 +++++++++++++++++++++-------------- hbvm/src/main.rs | 15 +++-- hbvm/src/softpaging/mod.rs | 43 ++++++++------ 5 files changed, 126 insertions(+), 76 deletions(-) diff --git a/hbvm/fuzz/Cargo.toml b/hbvm/fuzz/Cargo.toml index f6bc616..42566bd 100644 --- a/hbvm/fuzz/Cargo.toml +++ b/hbvm/fuzz/Cargo.toml @@ -13,6 +13,9 @@ libfuzzer-sys = "0.4" [dependencies.hbvm] path = ".." +[dependencies.hbbytecode] +path = "../../hbbytecode" + # Prevent this from interfering with workspaces [workspace] members = ["."] diff --git a/hbvm/fuzz/fuzz_targets/vm.rs b/hbvm/fuzz/fuzz_targets/vm.rs index e1d4266..b10c6d9 100644 --- a/hbvm/fuzz/fuzz_targets/vm.rs +++ b/hbvm/fuzz/fuzz_targets/vm.rs @@ -1,15 +1,30 @@ #![no_main] use { + hbbytecode::valider::validate, hbvm::{ - mem::{HandlePageFault, Memory, MemoryAccessReason, PageSize}, - Vm, + softpaging::{ + paging::{PageTable, Permission}, + HandlePageFault, PageSize, SoftPagedMem, + }, + MemoryAccessReason, Vm, }, libfuzzer_sys::fuzz_target, }; fuzz_target!(|data: &[u8]| { - if let Ok(mut vm) = Vm::<_, 16384>::new_validated(data, TestTrapHandler, Default::default()) { + if validate(data).is_ok() { + let mut vm = unsafe { + Vm::<_, 16384>::new( + SoftPagedMem { + pf_handler: TestTrapHandler, + program: data, + root_pt: Box::into_raw(Default::default()), + }, + 0, + ) + }; + // Alloc and map some memory let pages = [ alloc_and_map(&mut vm.memory, 0), @@ -26,22 +41,17 @@ fuzz_target!(|data: &[u8]| { } }); -fn alloc_and_map(memory: &mut Memory, at: u64) -> *mut u8 { +fn alloc_and_map(memory: &mut SoftPagedMem, at: u64) -> *mut u8 { let ptr = Box::into_raw(Box::::default()).cast(); unsafe { memory - .map( - ptr, - at, - hbvm::mem::paging::Permission::Write, - PageSize::Size4K, - ) + .map(ptr, at, Permission::Write, PageSize::Size4K) .unwrap() }; ptr } -fn unmap_and_dealloc(memory: &mut Memory, ptr: *mut u8, from: u64) { +fn unmap_and_dealloc(memory: &mut SoftPagedMem, ptr: *mut u8, from: u64) { memory.unmap(from).unwrap(); let _ = unsafe { Box::from_raw(ptr.cast::()) }; } @@ -59,7 +69,7 @@ impl HandlePageFault for TestTrapHandler { fn page_fault( &mut self, _: MemoryAccessReason, - _: &mut Memory, + _: &mut PageTable, _: u64, _: PageSize, _: *mut u8, diff --git a/hbvm/src/lib.rs b/hbvm/src/lib.rs index 0cad1cd..752bf51 100644 --- a/hbvm/src/lib.rs +++ b/hbvm/src/lib.rs @@ -14,8 +14,6 @@ #![cfg_attr(feature = "nightly", feature(fn_align))] #![warn(missing_docs, clippy::missing_docs_in_private_items)] -use core::marker::PhantomData; - #[cfg(feature = "alloc")] extern crate alloc; @@ -26,16 +24,14 @@ mod bmc; use { bmc::BlockCopier, - core::{cmp::Ordering, mem::size_of, ops}, + core::{cmp::Ordering, mem::size_of, ops, slice::SliceIndex}, derive_more::Display, - hbbytecode::{ - valider, OpParam, ParamBB, ParamBBB, ParamBBBB, ParamBBD, ParamBBDH, ParamBBW, ParamBD, - }, + hbbytecode::{OpParam, ParamBB, ParamBBB, ParamBBBB, ParamBBD, ParamBBDH, ParamBBW, ParamBD}, value::{Value, ValueVariant}, }; /// HoleyBytes Virtual Machine -pub struct Vm<'a, Mem, const TIMER_QUOTIENT: usize> { +pub struct Vm { /// Holds 256 registers /// /// Writing to register 0 is considered undefined behaviour @@ -48,15 +44,6 @@ pub struct Vm<'a, Mem, const TIMER_QUOTIENT: usize> { /// Program counter pub pc: usize, - /// Program - program: *const u8, - - /// Cached program length (without unreachable end) - program_len: usize, - - /// Program lifetime - _program_lt: PhantomData<&'a [u8]>, - /// Program timer timer: usize, @@ -64,7 +51,7 @@ pub struct Vm<'a, Mem, const TIMER_QUOTIENT: usize> { copier: Option, } -impl<'a, Mem, const TIMER_QUOTIENT: usize> Vm<'a, Mem, TIMER_QUOTIENT> +impl Vm where Mem: Memory, { @@ -72,25 +59,16 @@ where /// /// # Safety /// Program code has to be validated - pub unsafe fn new_unchecked(program: &'a [u8], memory: Mem) -> Self { + pub unsafe fn new(memory: Mem, entry: u64) -> Self { Self { registers: [Value::from(0_u64); 256], memory, - pc: 0, - program_len: program.len() - 12, - program: program[4..].as_ptr(), - _program_lt: Default::default(), + pc: entry as _, timer: 0, copier: None, } } - /// Create a new VM with program and trap handler only if it passes validation - pub fn new_validated(program: &'a [u8], memory: Mem) -> Result { - valider::validate(program)?; - Ok(unsafe { Self::new_unchecked(program, memory) }) - } - /// Execute program /// /// Program can return [`VmRunError`] if a trap handling failed @@ -98,11 +76,6 @@ where pub fn run(&mut self) -> Result { use hbbytecode::opcode::*; loop { - // Check instruction boundary - if self.pc >= self.program_len { - return Err(VmRunError::AddrOutOfBounds); - } - // Big match // // Contribution guide: @@ -123,7 +96,11 @@ where // - Yes, we assume you run 64 bit CPU. Else ?conradluget a better CPU // sorry 8 bit fans, HBVM won't run on your Speccy :( unsafe { - match *self.program.add(self.pc) { + match *self + .memory + .load_prog(self.pc) + .ok_or(VmRunError::ProgramFetchLoadEx(self.pc as _))? + { UN => { self.decode::<()>(); return Err(VmRunError::Unreachable); @@ -388,15 +365,22 @@ where } /// Decode instruction operands - #[inline] + #[inline(always)] unsafe fn decode(&mut self) -> T { - let data = self.program.add(self.pc + 1).cast::().read(); + let pc1 = self.pc + 1; + let data = self + .memory + .load_prog_unchecked(pc1..pc1 + size_of::()) + .as_ptr() + .cast::() + .read(); + self.pc += 1 + size_of::(); data } /// Perform binary operating over two registers - #[inline] + #[inline(always)] unsafe fn binary_op(&mut self, op: impl Fn(T, T) -> T) { let ParamBBB(tg, a0, a1) = self.decode(); self.write_reg( @@ -406,7 +390,7 @@ where } /// Perform binary operation over register and immediate - #[inline] + #[inline(always)] unsafe fn binary_op_imm(&mut self, op: impl Fn(T, T) -> T) { let ParamBBD(tg, reg, imm) = self.decode(); self.write_reg( @@ -416,14 +400,14 @@ where } /// Perform binary operation over register and shift immediate - #[inline] + #[inline(always)] unsafe fn binary_op_ims(&mut self, op: impl Fn(T, u32) -> T) { let ParamBBW(tg, reg, imm) = self.decode(); self.write_reg(tg, op(self.read_reg(reg).cast::(), imm)); } /// Jump at `#3` if ordering on `#0 <=> #1` is equal to expected - #[inline] + #[inline(always)] unsafe fn cond_jmp(&mut self, expected: Ordering) { let ParamBBD(a0, a1, ja) = self.decode(); if self @@ -437,14 +421,14 @@ where } /// Read register - #[inline] + #[inline(always)] unsafe fn read_reg(&self, n: u8) -> Value { *self.registers.get_unchecked(n as usize) } /// Write a register. /// Writing to register 0 is no-op. - #[inline] + #[inline(always)] unsafe fn write_reg(&mut self, n: u8, value: impl Into) { if n != 0 { *self.registers.get_unchecked_mut(n as usize) = value.into(); @@ -452,7 +436,7 @@ where } /// Load / Store Address check-computation überfunction - #[inline] + #[inline(always)] unsafe fn ldst_addr_uber( &self, dst: u8, @@ -485,6 +469,9 @@ pub enum VmRunError { /// Unhandled load access exception LoadAccessEx(u64), + /// Unhandled instruction load access exception + ProgramFetchLoadEx(u64), + /// Unhandled store access exception StoreAccessEx(u64), @@ -529,6 +516,40 @@ pub trait Memory { source: *const u8, count: usize, ) -> Result<(), StoreError>; + + /// Fetch bytes from program section + /// + /// # Why? + /// Even Holey Bytes programs operate with + /// single address space, the actual implementation + /// may be different, so for these reasons there is a + /// separate function. + /// + /// Also if your memory implementation differentiates between + /// readable and executable memory, this is the way to distinguish + /// the loads. + /// + /// # Notice for implementors + /// This is a hot function. This is called on each opcode fetch + /// and instruction decode. Inlining the implementation is highly + /// recommended! + /// + /// If you utilise some more heavy memory implementation, consider + /// performing caching as HBVM does not do that for you. + /// + /// Has to return all the requested data. If cannot fetch data of requested + /// length, return [`None`]. + fn load_prog(&mut self, index: I) -> Option<&I::Output> + where + I: SliceIndex<[u8]>; + + /// Fetch bytes from program section, unchecked. + /// + /// # Safety + /// You really have to be sure you get the bytes, got me? + unsafe fn load_prog_unchecked(&mut self, index: I) -> &I::Output + where + I: SliceIndex<[u8]>; } /// Unhandled load access trap diff --git a/hbvm/src/main.rs b/hbvm/src/main.rs index a2325b2..11e08dd 100644 --- a/hbvm/src/main.rs +++ b/hbvm/src/main.rs @@ -10,15 +10,20 @@ use { fn main() -> Result<(), Box> { let mut prog = vec![]; stdin().read_to_end(&mut prog)?; - println!("{prog:?}"); if let Err(e) = validate(&prog) { eprintln!("Program validation error: {e:?}"); return Ok(()); } else { unsafe { - let mut vm = - Vm::<_, 0>::new_unchecked(&prog, SoftPagedMem::::default()); + let mut vm = Vm::<_, 0>::new( + SoftPagedMem { + pf_handler: TestTrapHandler, + program: &prog, + root_pt: Box::into_raw(Default::default()), + }, + 0, + ); let data = { let ptr = std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align_unchecked( 4096, 4096, @@ -32,7 +37,7 @@ fn main() -> Result<(), Box> { vm.memory .map( data, - 0, + 8192, hbvm::softpaging::paging::Permission::Write, PageSize::Size4K, ) @@ -46,7 +51,7 @@ fn main() -> Result<(), Box> { data, std::alloc::Layout::from_size_align_unchecked(4096, 4096), ); - vm.memory.unmap(0).unwrap(); + vm.memory.unmap(8192).unwrap(); } } Ok(()) diff --git a/hbvm/src/softpaging/mod.rs b/hbvm/src/softpaging/mod.rs index 96005a4..ca2d3cd 100644 --- a/hbvm/src/softpaging/mod.rs +++ b/hbvm/src/softpaging/mod.rs @@ -4,6 +4,7 @@ pub mod paging; use { super::{LoadError, Memory, MemoryAccessReason, StoreError}, + core::slice::SliceIndex, derive_more::Display, paging::{PageTable, Permission}, }; @@ -13,14 +14,16 @@ use {alloc::boxed::Box, paging::PtEntry}; /// HoleyBytes software paged memory #[derive(Clone, Debug)] -pub struct SoftPagedMem { +pub struct SoftPagedMem<'p, PfH> { /// Root page table pub root_pt: *mut PageTable, /// Page fault handler - pub pf_handler: PfHandler, + pub pf_handler: PfH, + /// Program memory segment + pub program: &'p [u8], } -impl Memory for SoftPagedMem { +impl<'p, PfH: HandlePageFault> Memory for SoftPagedMem<'p, PfH> { /// Load value from an address /// /// # Safety @@ -57,9 +60,27 @@ impl Memory for SoftPagedMem { ) .map_err(StoreError) } + + /// Fetch slice from program memory section + #[inline(always)] + fn load_prog(&mut self, index: I) -> Option<&I::Output> + where + I: SliceIndex<[u8]>, + { + self.program.get(index) + } + + /// Fetch slice from program memory section, unchecked! + #[inline(always)] + unsafe fn load_prog_unchecked(&mut self, index: I) -> &I::Output + where + I: SliceIndex<[u8]>, + { + self.program.get_unchecked(index) + } } -impl SoftPagedMem { +impl<'p, PfH: HandlePageFault> SoftPagedMem<'p, PfH> { // Everyone behold, the holy function, the god of HBVM memory accesses! /// Split address to pages, check their permissions and feed pointers with offset @@ -239,24 +260,14 @@ impl Iterator for AddrPageLookuper { } #[cfg(feature = "alloc")] -impl Default for SoftPagedMem { - fn default() -> Self { - Self { - root_pt: Box::into_raw(Default::default()), - pf_handler: Default::default(), - } - } -} - -#[cfg(feature = "alloc")] -impl Drop for SoftPagedMem { +impl<'p, A> Drop for SoftPagedMem<'p, A> { fn drop(&mut self) { let _ = unsafe { Box::from_raw(self.root_pt) }; } } #[cfg(feature = "alloc")] -impl SoftPagedMem { +impl<'p, A> SoftPagedMem<'p, A> { /// Maps host's memory into VM's memory /// /// # Safety From 06ce899e71577083ff5f87134083916c9fbe019b Mon Sep 17 00:00:00 2001 From: Erin Date: Wed, 9 Aug 2023 02:53:55 +0200 Subject: [PATCH 2/9] Now finally, leaving Hardvard! --- hbvm/src/main.rs | 2 +- hbvm/src/softpaging/mod.rs | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/hbvm/src/main.rs b/hbvm/src/main.rs index 11e08dd..7420271 100644 --- a/hbvm/src/main.rs +++ b/hbvm/src/main.rs @@ -22,7 +22,7 @@ fn main() -> Result<(), Box> { program: &prog, root_pt: Box::into_raw(Default::default()), }, - 0, + 4, ); let data = { let ptr = std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align_unchecked( diff --git a/hbvm/src/softpaging/mod.rs b/hbvm/src/softpaging/mod.rs index ca2d3cd..3db6248 100644 --- a/hbvm/src/softpaging/mod.rs +++ b/hbvm/src/softpaging/mod.rs @@ -97,6 +97,26 @@ impl<'p, PfH: HandlePageFault> SoftPagedMem<'p, PfH> { permission_check: fn(Permission) -> bool, action: fn(*mut u8, *mut u8, usize), ) -> Result<(), u64> { + let (src, len) = if src < self.program.len() as _ { + let to_copy = len.clamp(0, self.program.len().saturating_sub(src as _)); + action( + unsafe { self.program.as_ptr().add(src as _).cast_mut() }, + dst, + to_copy, + ); + + ( + src.saturating_add(to_copy as _), + len.saturating_sub(to_copy), + ) + } else { + (src, len) + }; + + if len == 0 { + return Ok(()); + } + // Create new splitter let mut pspl = AddrPageLookuper::new(src, len, self.root_pt); loop { From afdcee9bd641858419963e5772ea014724da10f3 Mon Sep 17 00:00:00 2001 From: Erin Date: Wed, 9 Aug 2023 02:57:25 +0200 Subject: [PATCH 3/9] Forbid store --- hbvm/src/softpaging/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/hbvm/src/softpaging/mod.rs b/hbvm/src/softpaging/mod.rs index 3db6248..bf35ff1 100644 --- a/hbvm/src/softpaging/mod.rs +++ b/hbvm/src/softpaging/mod.rs @@ -98,6 +98,10 @@ impl<'p, PfH: HandlePageFault> SoftPagedMem<'p, PfH> { action: fn(*mut u8, *mut u8, usize), ) -> Result<(), u64> { let (src, len) = if src < self.program.len() as _ { + if reason != MemoryAccessReason::Load { + return Err(src); + } + let to_copy = len.clamp(0, self.program.len().saturating_sub(src as _)); action( unsafe { self.program.as_ptr().add(src as _).cast_mut() }, From b955b756e34d1710452db5b25e982c68f42d1149 Mon Sep 17 00:00:00 2001 From: Erin Date: Wed, 9 Aug 2023 02:59:11 +0200 Subject: [PATCH 4/9] Comments --- hbvm/src/softpaging/mod.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/hbvm/src/softpaging/mod.rs b/hbvm/src/softpaging/mod.rs index bf35ff1..c33bc11 100644 --- a/hbvm/src/softpaging/mod.rs +++ b/hbvm/src/softpaging/mod.rs @@ -97,26 +97,33 @@ impl<'p, PfH: HandlePageFault> SoftPagedMem<'p, PfH> { permission_check: fn(Permission) -> bool, action: fn(*mut u8, *mut u8, usize), ) -> Result<(), u64> { + // Memory load from program section let (src, len) = if src < self.program.len() as _ { + // Allow only loads if reason != MemoryAccessReason::Load { return Err(src); } + // Determine how much data to copy from here let to_copy = len.clamp(0, self.program.len().saturating_sub(src as _)); + + // Perform action action( unsafe { self.program.as_ptr().add(src as _).cast_mut() }, dst, to_copy, ); + // Return shifted from what we've already copied ( src.saturating_add(to_copy as _), len.saturating_sub(to_copy), ) } else { - (src, len) + (src, len) // Nothing weird! }; + // Nothing to copy? Don't bother doing anything, bail. if len == 0 { return Ok(()); } From 340ee8bcf358be87be5cdc84bdfe134ee54ab3e0 Mon Sep 17 00:00:00 2001 From: Erin Date: Wed, 9 Aug 2023 03:01:42 +0200 Subject: [PATCH 5/9] Edit 0x0 --- spec.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/spec.md b/spec.md index 138ac84..b9aad2a 100644 --- a/spec.md +++ b/spec.md @@ -262,8 +262,9 @@ # Memory - Addresses are 64 bit - Program should be in the same address space as all other data -- Address `0x0` is invalid and acessing it traps - Memory implementation is arbitrary + - Address `0x0` may or may not be valid. Count with compilers + considering it invalid! - In case of accessing invalid address: - Program shall trap (LoadAccessEx, StoreAccessEx) with parameter of accessed address - Value of register when trapped is undefined From 12bde3a87598384968f0092e38cc8d82bd2db7b0 Mon Sep 17 00:00:00 2001 From: Erin Date: Wed, 9 Aug 2023 03:12:09 +0200 Subject: [PATCH 6/9] bai --- Cargo.lock | 7 ------- hbvm/Cargo.toml | 1 - 2 files changed, 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4cb5d25..56e88bb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -127,7 +127,6 @@ dependencies = [ "delegate", "derive_more", "hbbytecode", - "log", "paste", "sealed", "static_assertions", @@ -169,12 +168,6 @@ dependencies = [ "proc-macro2", ] -[[package]] -name = "log" -version = "0.4.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4" - [[package]] name = "logos" version = "0.13.0" diff --git a/hbvm/Cargo.toml b/hbvm/Cargo.toml index 98d8051..ce3b565 100644 --- a/hbvm/Cargo.toml +++ b/hbvm/Cargo.toml @@ -15,7 +15,6 @@ nightly = [] delegate = "0.9" derive_more = "0.99" hbbytecode.path = "../hbbytecode" -log = "0.4" paste = "1.0" sealed = "0.5" static_assertions = "1.0" From 34a82b55dc1bb3a446d0a2bbb12077e22a32d188 Mon Sep 17 00:00:00 2001 From: Erin Date: Wed, 9 Aug 2023 20:19:12 +0200 Subject: [PATCH 7/9] executable --- hbvm/src/softpaging/mod.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hbvm/src/softpaging/mod.rs b/hbvm/src/softpaging/mod.rs index c33bc11..07e05a5 100644 --- a/hbvm/src/softpaging/mod.rs +++ b/hbvm/src/softpaging/mod.rs @@ -501,6 +501,12 @@ pub mod perm_check { pub const fn writable(perm: Permission) -> bool { matches!(perm, Permission::Write) } + + /// Page is executable + #[inline(always)] + pub const fn executable(perm: Permission) -> bool { + matches!(perm, Permission::Exec) + } } /// Handle VM traps From 4c38b1ffb5ccd5b787e18039770cd6680e3c5b66 Mon Sep 17 00:00:00 2001 From: Erin Date: Thu, 10 Aug 2023 12:39:03 +0200 Subject: [PATCH 8/9] move --- hbvm/src/softpaging/lookup.rs | 124 ++++++++++++++ hbvm/src/softpaging/mapping.rs | 163 +++++++++++++++++++ hbvm/src/softpaging/mod.rs | 286 +-------------------------------- 3 files changed, 293 insertions(+), 280 deletions(-) create mode 100644 hbvm/src/softpaging/lookup.rs create mode 100644 hbvm/src/softpaging/mapping.rs diff --git a/hbvm/src/softpaging/lookup.rs b/hbvm/src/softpaging/lookup.rs new file mode 100644 index 0000000..9111390 --- /dev/null +++ b/hbvm/src/softpaging/lookup.rs @@ -0,0 +1,124 @@ +//! Address lookup + +use super::{ + addr_extract_index, + paging::{PageTable, Permission}, + PageSize, +}; + +/// Good result from address split +pub struct AddrPageLookupOk { + /// Virtual address + pub vaddr: u64, + + /// Pointer to the start for perform operation + pub ptr: *mut u8, + + /// Size to the end of page / end of desired size + pub size: usize, + + /// Page permission + pub perm: Permission, +} + +/// Errornous address split result +pub struct AddrPageLookupError { + /// Address of failure + pub addr: u64, + + /// Requested page size + pub size: PageSize, +} + +/// Address splitter into pages +pub struct AddrPageLookuper { + /// Current address + addr: u64, + + /// Size left + size: usize, + + /// Page table + pagetable: *const PageTable, +} + +impl AddrPageLookuper { + /// Create a new page lookuper + #[inline] + pub const fn new(addr: u64, size: usize, pagetable: *const PageTable) -> Self { + Self { + addr, + size, + pagetable, + } + } + + /// Bump address by size X + pub fn bump(&mut self, page_size: PageSize) { + self.addr += page_size as u64; + self.size = self.size.saturating_sub(page_size as _); + } +} + +impl Iterator for AddrPageLookuper { + type Item = Result; + + fn next(&mut self) -> Option { + // The end, everything is fine + if self.size == 0 { + return None; + } + + let (base, perm, size, offset) = 'a: { + let mut current_pt = self.pagetable; + + // Walk the page table + for lvl in (0..5).rev() { + // Get an entry + unsafe { + let entry = (*current_pt) + .table + .get_unchecked(addr_extract_index(self.addr, lvl)); + + let ptr = entry.ptr(); + match entry.permission() { + // No page → page fault + Permission::Empty => { + return Some(Err(AddrPageLookupError { + addr: self.addr, + size: PageSize::from_lvl(lvl)?, + })) + } + + // Node → proceed waking + Permission::Node => current_pt = ptr as _, + + // Leaf → return relevant data + perm => { + break 'a ( + // Pointer in host memory + ptr as *mut u8, + perm, + PageSize::from_lvl(lvl)?, + // In-page offset + addr_extract_index(self.addr, lvl), + ); + } + } + } + } + return None; // Reached the end (should not happen) + }; + + // Get available byte count in the selected page with offset + let avail = (size as usize - offset).clamp(0, self.size); + self.bump(size); + + Some(Ok(AddrPageLookupOk { + vaddr: self.addr, + ptr: unsafe { base.add(offset) }, // Return pointer to the start of region + size: avail, + perm, + })) + } +} diff --git a/hbvm/src/softpaging/mapping.rs b/hbvm/src/softpaging/mapping.rs new file mode 100644 index 0000000..9c556de --- /dev/null +++ b/hbvm/src/softpaging/mapping.rs @@ -0,0 +1,163 @@ +//! Automatic memory mapping + +use { + super::{ + addr_extract_index, + paging::{PageTable, Permission, PtEntry, PtPointedData}, + PageSize, SoftPagedMem, + }, + alloc::boxed::Box, + derive_more::Display, +}; + +#[cfg(feature = "alloc")] +impl<'p, A> SoftPagedMem<'p, A> { + /// Maps host's memory into VM's memory + /// + /// # Safety + /// - Your faith in the gods of UB + /// - Addr-san claims it's fine but who knows is she isn't lying :ferrisSus: + /// - Alright, Miri-sama is also fine with this, who knows why + pub unsafe fn map( + &mut self, + host: *mut u8, + target: u64, + perm: Permission, + pagesize: PageSize, + ) -> Result<(), MapError> { + let mut current_pt = self.root_pt; + + // Decide on what level depth are we going + let lookup_depth = match pagesize { + PageSize::Size4K => 0, + PageSize::Size2M => 1, + PageSize::Size1G => 2, + }; + + // Walk pagetable levels + for lvl in (lookup_depth + 1..5).rev() { + let entry = (*current_pt) + .table + .get_unchecked_mut(addr_extract_index(target, lvl)); + + let ptr = entry.ptr(); + match entry.permission() { + // Still not on target and already seeing empty entry? + // No worries! Let's create one (allocates). + Permission::Empty => { + // Increase children count + (*current_pt).childen += 1; + + let table = Box::into_raw(Box::new(PtPointedData { + pt: PageTable::default(), + })); + + core::ptr::write(entry, PtEntry::new(table, Permission::Node)); + current_pt = table as _; + } + // Continue walking + Permission::Node => current_pt = ptr as _, + + // There is some entry on place of node + _ => return Err(MapError::PageOnNode), + } + } + + let node = (*current_pt) + .table + .get_unchecked_mut(addr_extract_index(target, lookup_depth)); + + // Check if node is not mapped + if node.permission() != Permission::Empty { + return Err(MapError::AlreadyMapped); + } + + // Write entry + (*current_pt).childen += 1; + core::ptr::write(node, PtEntry::new(host.cast(), perm)); + + Ok(()) + } + + /// Unmaps pages from VM's memory + /// + /// If errors, it only means there is no entry to unmap and in most cases + /// just should be ignored. + pub fn unmap(&mut self, addr: u64) -> Result<(), NothingToUnmap> { + let mut current_pt = self.root_pt; + let mut page_tables = [core::ptr::null_mut(); 5]; + + // Walk page table in reverse + for lvl in (0..5).rev() { + let entry = unsafe { + (*current_pt) + .table + .get_unchecked_mut(addr_extract_index(addr, lvl)) + }; + + let ptr = entry.ptr(); + match entry.permission() { + // Nothing is there, throw an error, not critical! + Permission::Empty => return Err(NothingToUnmap), + // Node – Save to visited pagetables and continue walking + Permission::Node => { + page_tables[lvl as usize] = entry; + current_pt = ptr as _ + } + // Page entry – zero it out! + // Zero page entry is completely valid entry with + // empty permission - no UB here! + _ => unsafe { + core::ptr::write_bytes(entry, 0, 1); + break; + }, + } + } + + // Now walk in order visited page tables + for entry in page_tables.into_iter() { + // Level not visited, skip. + if entry.is_null() { + continue; + } + + unsafe { + let children = &mut (*(*entry).ptr()).pt.childen; + *children -= 1; // Decrease children count + + // If there are no children, deallocate. + if *children == 0 { + let _ = Box::from_raw((*entry).ptr() as *mut PageTable); + + // Zero visited entry + core::ptr::write_bytes(entry, 0, 1); + } else { + break; + } + } + } + + Ok(()) + } +} + +/// Error mapping +#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)] +pub enum MapError { + /// Entry was already mapped + #[display(fmt = "There is already a page mapped on specified address")] + AlreadyMapped, + /// When walking a page entry was + /// encounterd. + #[display(fmt = "There was a page mapped on the way instead of node")] + PageOnNode, +} + +/// There was no entry in page table to unmap +/// +/// No worry, don't panic, nothing bad has happened, +/// but if you are 120% sure there should be something, +/// double-check your addresses. +#[derive(Clone, Copy, Display, Debug)] +#[display(fmt = "There was no entry to unmap")] +pub struct NothingToUnmap; diff --git a/hbvm/src/softpaging/mod.rs b/hbvm/src/softpaging/mod.rs index 07e05a5..67c030c 100644 --- a/hbvm/src/softpaging/mod.rs +++ b/hbvm/src/softpaging/mod.rs @@ -1,17 +1,19 @@ //! Platform independent, software paged memory implementation +use self::lookup::{AddrPageLookupError, AddrPageLookupOk, AddrPageLookuper}; + +pub mod lookup; pub mod paging; +#[cfg(feature = "alloc")] +pub mod mapping; + use { super::{LoadError, Memory, MemoryAccessReason, StoreError}, core::slice::SliceIndex, - derive_more::Display, paging::{PageTable, Permission}, }; -#[cfg(feature = "alloc")] -use {alloc::boxed::Box, paging::PtEntry}; - /// HoleyBytes software paged memory #[derive(Clone, Debug)] pub struct SoftPagedMem<'p, PfH> { @@ -173,261 +175,6 @@ impl<'p, PfH: HandlePageFault> SoftPagedMem<'p, PfH> { } } -/// Good result from address split -struct AddrPageLookupOk { - /// Virtual address - vaddr: u64, - - /// Pointer to the start for perform operation - ptr: *mut u8, - - /// Size to the end of page / end of desired size - size: usize, - - /// Page permission - perm: Permission, -} - -/// Errornous address split result -struct AddrPageLookupError { - /// Address of failure - addr: u64, - - /// Requested page size - size: PageSize, -} - -/// Address splitter into pages -struct AddrPageLookuper { - /// Current address - addr: u64, - - /// Size left - size: usize, - - /// Page table - pagetable: *const PageTable, -} - -impl AddrPageLookuper { - /// Create a new page lookuper - #[inline] - pub const fn new(addr: u64, size: usize, pagetable: *const PageTable) -> Self { - Self { - addr, - size, - pagetable, - } - } - - /// Bump address by size X - fn bump(&mut self, page_size: PageSize) { - self.addr += page_size as u64; - self.size = self.size.saturating_sub(page_size as _); - } -} - -impl Iterator for AddrPageLookuper { - type Item = Result; - - fn next(&mut self) -> Option { - // The end, everything is fine - if self.size == 0 { - return None; - } - - let (base, perm, size, offset) = 'a: { - let mut current_pt = self.pagetable; - - // Walk the page table - for lvl in (0..5).rev() { - // Get an entry - unsafe { - let entry = (*current_pt) - .table - .get_unchecked(addr_extract_index(self.addr, lvl)); - - let ptr = entry.ptr(); - match entry.permission() { - // No page → page fault - Permission::Empty => { - return Some(Err(AddrPageLookupError { - addr: self.addr, - size: PageSize::from_lvl(lvl)?, - })) - } - - // Node → proceed waking - Permission::Node => current_pt = ptr as _, - - // Leaf → return relevant data - perm => { - break 'a ( - // Pointer in host memory - ptr as *mut u8, - perm, - PageSize::from_lvl(lvl)?, - // In-page offset - addr_extract_index(self.addr, lvl), - ); - } - } - } - } - return None; // Reached the end (should not happen) - }; - - // Get available byte count in the selected page with offset - let avail = (size as usize - offset).clamp(0, self.size); - self.bump(size); - - Some(Ok(AddrPageLookupOk { - vaddr: self.addr, - ptr: unsafe { base.add(offset) }, // Return pointer to the start of region - size: avail, - perm, - })) - } -} - -#[cfg(feature = "alloc")] -impl<'p, A> Drop for SoftPagedMem<'p, A> { - fn drop(&mut self) { - let _ = unsafe { Box::from_raw(self.root_pt) }; - } -} - -#[cfg(feature = "alloc")] -impl<'p, A> SoftPagedMem<'p, A> { - /// Maps host's memory into VM's memory - /// - /// # Safety - /// - Your faith in the gods of UB - /// - Addr-san claims it's fine but who knows is she isn't lying :ferrisSus: - /// - Alright, Miri-sama is also fine with this, who knows why - pub unsafe fn map( - &mut self, - host: *mut u8, - target: u64, - perm: Permission, - pagesize: PageSize, - ) -> Result<(), MapError> { - let mut current_pt = self.root_pt; - - // Decide on what level depth are we going - let lookup_depth = match pagesize { - PageSize::Size4K => 0, - PageSize::Size2M => 1, - PageSize::Size1G => 2, - }; - - // Walk pagetable levels - for lvl in (lookup_depth + 1..5).rev() { - let entry = (*current_pt) - .table - .get_unchecked_mut(addr_extract_index(target, lvl)); - - let ptr = entry.ptr(); - match entry.permission() { - // Still not on target and already seeing empty entry? - // No worries! Let's create one (allocates). - Permission::Empty => { - // Increase children count - (*current_pt).childen += 1; - - let table = Box::into_raw(Box::new(paging::PtPointedData { - pt: PageTable::default(), - })); - - core::ptr::write(entry, PtEntry::new(table, Permission::Node)); - current_pt = table as _; - } - // Continue walking - Permission::Node => current_pt = ptr as _, - - // There is some entry on place of node - _ => return Err(MapError::PageOnNode), - } - } - - let node = (*current_pt) - .table - .get_unchecked_mut(addr_extract_index(target, lookup_depth)); - - // Check if node is not mapped - if node.permission() != Permission::Empty { - return Err(MapError::AlreadyMapped); - } - - // Write entry - (*current_pt).childen += 1; - core::ptr::write(node, PtEntry::new(host.cast(), perm)); - - Ok(()) - } - - /// Unmaps pages from VM's memory - /// - /// If errors, it only means there is no entry to unmap and in most cases - /// just should be ignored. - pub fn unmap(&mut self, addr: u64) -> Result<(), NothingToUnmap> { - let mut current_pt = self.root_pt; - let mut page_tables = [core::ptr::null_mut(); 5]; - - // Walk page table in reverse - for lvl in (0..5).rev() { - let entry = unsafe { - (*current_pt) - .table - .get_unchecked_mut(addr_extract_index(addr, lvl)) - }; - - let ptr = entry.ptr(); - match entry.permission() { - // Nothing is there, throw an error, not critical! - Permission::Empty => return Err(NothingToUnmap), - // Node – Save to visited pagetables and continue walking - Permission::Node => { - page_tables[lvl as usize] = entry; - current_pt = ptr as _ - } - // Page entry – zero it out! - // Zero page entry is completely valid entry with - // empty permission - no UB here! - _ => unsafe { - core::ptr::write_bytes(entry, 0, 1); - break; - }, - } - } - - // Now walk in order visited page tables - for entry in page_tables.into_iter() { - // Level not visited, skip. - if entry.is_null() { - continue; - } - - unsafe { - let children = &mut (*(*entry).ptr()).pt.childen; - *children -= 1; // Decrease children count - - // If there are no children, deallocate. - if *children == 0 { - let _ = Box::from_raw((*entry).ptr() as *mut PageTable); - - // Zero visited entry - core::ptr::write_bytes(entry, 0, 1); - } else { - break; - } - } - } - - Ok(()) - } -} - /// Extract index in page table on specified level /// /// The level shall not be larger than 4, otherwise @@ -462,27 +209,6 @@ impl PageSize { } } -/// Error mapping -#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)] -pub enum MapError { - /// Entry was already mapped - #[display(fmt = "There is already a page mapped on specified address")] - AlreadyMapped, - /// When walking a page entry was - /// encounterd. - #[display(fmt = "There was a page mapped on the way instead of node")] - PageOnNode, -} - -/// There was no entry in page table to unmap -/// -/// No worry, don't panic, nothing bad has happened, -/// but if you are 120% sure there should be something, -/// double-check your addresses. -#[derive(Clone, Copy, Display, Debug)] -#[display(fmt = "There was no entry to unmap")] -pub struct NothingToUnmap; - /// Permisison checks pub mod perm_check { use super::paging::Permission; From 96c5b07cfb76b8d3fae846653fbbe72ec4660d71 Mon Sep 17 00:00:00 2001 From: Erin Date: Thu, 10 Aug 2023 12:39:18 +0200 Subject: [PATCH 9/9] h --- hbvm/src/softpaging/mapping.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/hbvm/src/softpaging/mapping.rs b/hbvm/src/softpaging/mapping.rs index 9c556de..0d6858e 100644 --- a/hbvm/src/softpaging/mapping.rs +++ b/hbvm/src/softpaging/mapping.rs @@ -10,7 +10,6 @@ use { derive_more::Display, }; -#[cfg(feature = "alloc")] impl<'p, A> SoftPagedMem<'p, A> { /// Maps host's memory into VM's memory ///