Some merges

This commit is contained in:
Erin 2023-08-11 02:19:26 +02:00 committed by ondra05
commit 7dc8c6cca4
10 changed files with 456 additions and 363 deletions

7
Cargo.lock generated
View file

@ -127,7 +127,6 @@ dependencies = [
"delegate", "delegate",
"derive_more", "derive_more",
"hbbytecode", "hbbytecode",
"log",
"paste", "paste",
"sealed", "sealed",
"static_assertions", "static_assertions",
@ -169,12 +168,6 @@ dependencies = [
"proc-macro2", "proc-macro2",
] ]
[[package]]
name = "log"
version = "0.4.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b06a4cde4c0f271a446782e3eff8de789548ce57dbc8eca9292c27f4a42004b4"
[[package]] [[package]]
name = "logos" name = "logos"
version = "0.13.0" version = "0.13.0"

View file

@ -15,7 +15,6 @@ nightly = []
delegate = "0.9" delegate = "0.9"
derive_more = "0.99" derive_more = "0.99"
hbbytecode.path = "../hbbytecode" hbbytecode.path = "../hbbytecode"
log = "0.4"
paste = "1.0" paste = "1.0"
sealed = "0.5" sealed = "0.5"
static_assertions = "1.0" static_assertions = "1.0"

View file

@ -13,6 +13,9 @@ libfuzzer-sys = "0.4"
[dependencies.hbvm] [dependencies.hbvm]
path = ".." path = ".."
[dependencies.hbbytecode]
path = "../../hbbytecode"
# Prevent this from interfering with workspaces # Prevent this from interfering with workspaces
[workspace] [workspace]
members = ["."] members = ["."]

View file

@ -1,15 +1,30 @@
#![no_main] #![no_main]
use { use {
hbbytecode::valider::validate,
hbvm::{ hbvm::{
mem::{HandlePageFault, Memory, MemoryAccessReason, PageSize}, softpaging::{
Vm, paging::{PageTable, Permission},
HandlePageFault, PageSize, SoftPagedMem,
},
MemoryAccessReason, Vm,
}, },
libfuzzer_sys::fuzz_target, libfuzzer_sys::fuzz_target,
}; };
fuzz_target!(|data: &[u8]| { fuzz_target!(|data: &[u8]| {
if let Ok(mut vm) = Vm::<_, 16384>::new_validated(data, TestTrapHandler, Default::default()) { if validate(data).is_ok() {
let mut vm = unsafe {
Vm::<_, 16384>::new(
SoftPagedMem {
pf_handler: TestTrapHandler,
program: data,
root_pt: Box::into_raw(Default::default()),
},
0,
)
};
// Alloc and map some memory // Alloc and map some memory
let pages = [ let pages = [
alloc_and_map(&mut vm.memory, 0), alloc_and_map(&mut vm.memory, 0),
@ -26,22 +41,17 @@ fuzz_target!(|data: &[u8]| {
} }
}); });
fn alloc_and_map(memory: &mut Memory, at: u64) -> *mut u8 { fn alloc_and_map(memory: &mut SoftPagedMem<TestTrapHandler>, at: u64) -> *mut u8 {
let ptr = Box::into_raw(Box::<Page>::default()).cast(); let ptr = Box::into_raw(Box::<Page>::default()).cast();
unsafe { unsafe {
memory memory
.map( .map(ptr, at, Permission::Write, PageSize::Size4K)
ptr,
at,
hbvm::mem::paging::Permission::Write,
PageSize::Size4K,
)
.unwrap() .unwrap()
}; };
ptr ptr
} }
fn unmap_and_dealloc(memory: &mut Memory, ptr: *mut u8, from: u64) { fn unmap_and_dealloc(memory: &mut SoftPagedMem<TestTrapHandler>, ptr: *mut u8, from: u64) {
memory.unmap(from).unwrap(); memory.unmap(from).unwrap();
let _ = unsafe { Box::from_raw(ptr.cast::<Page>()) }; let _ = unsafe { Box::from_raw(ptr.cast::<Page>()) };
} }
@ -59,7 +69,7 @@ impl HandlePageFault for TestTrapHandler {
fn page_fault( fn page_fault(
&mut self, &mut self,
_: MemoryAccessReason, _: MemoryAccessReason,
_: &mut Memory, _: &mut PageTable,
_: u64, _: u64,
_: PageSize, _: PageSize,
_: *mut u8, _: *mut u8,

View file

@ -14,8 +14,6 @@
#![cfg_attr(feature = "nightly", feature(fn_align))] #![cfg_attr(feature = "nightly", feature(fn_align))]
#![warn(missing_docs, clippy::missing_docs_in_private_items)] #![warn(missing_docs, clippy::missing_docs_in_private_items)]
use core::marker::PhantomData;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
extern crate alloc; extern crate alloc;
@ -26,16 +24,14 @@ mod bmc;
use { use {
bmc::BlockCopier, bmc::BlockCopier,
core::{cmp::Ordering, mem::size_of, ops}, core::{cmp::Ordering, mem::size_of, ops, slice::SliceIndex},
derive_more::Display, derive_more::Display,
hbbytecode::{ hbbytecode::{OpParam, ParamBB, ParamBBB, ParamBBBB, ParamBBD, ParamBBDH, ParamBBW, ParamBD},
valider, OpParam, ParamBB, ParamBBB, ParamBBBB, ParamBBD, ParamBBDH, ParamBBW, ParamBD,
},
value::{Value, ValueVariant}, value::{Value, ValueVariant},
}; };
/// HoleyBytes Virtual Machine /// HoleyBytes Virtual Machine
pub struct Vm<'a, Mem, const TIMER_QUOTIENT: usize> { pub struct Vm<Mem, const TIMER_QUOTIENT: usize> {
/// Holds 256 registers /// Holds 256 registers
/// ///
/// Writing to register 0 is considered undefined behaviour /// Writing to register 0 is considered undefined behaviour
@ -48,15 +44,6 @@ pub struct Vm<'a, Mem, const TIMER_QUOTIENT: usize> {
/// Program counter /// Program counter
pub pc: usize, pub pc: usize,
/// Program
program: *const u8,
/// Cached program length (without unreachable end)
program_len: usize,
/// Program lifetime
_program_lt: PhantomData<&'a [u8]>,
/// Program timer /// Program timer
timer: usize, timer: usize,
@ -64,7 +51,7 @@ pub struct Vm<'a, Mem, const TIMER_QUOTIENT: usize> {
copier: Option<BlockCopier>, copier: Option<BlockCopier>,
} }
impl<'a, Mem, const TIMER_QUOTIENT: usize> Vm<'a, Mem, TIMER_QUOTIENT> impl<Mem, const TIMER_QUOTIENT: usize> Vm<Mem, TIMER_QUOTIENT>
where where
Mem: Memory, Mem: Memory,
{ {
@ -72,25 +59,16 @@ where
/// ///
/// # Safety /// # Safety
/// Program code has to be validated /// Program code has to be validated
pub unsafe fn new_unchecked(program: &'a [u8], memory: Mem) -> Self { pub unsafe fn new(memory: Mem, entry: u64) -> Self {
Self { Self {
registers: [Value::from(0_u64); 256], registers: [Value::from(0_u64); 256],
memory, memory,
pc: 0, pc: entry as _,
program_len: program.len() - 12,
program: program[4..].as_ptr(),
_program_lt: Default::default(),
timer: 0, timer: 0,
copier: None, copier: None,
} }
} }
/// Create a new VM with program and trap handler only if it passes validation
pub fn new_validated(program: &'a [u8], memory: Mem) -> Result<Self, valider::Error> {
valider::validate(program)?;
Ok(unsafe { Self::new_unchecked(program, memory) })
}
/// Execute program /// Execute program
/// ///
/// Program can return [`VmRunError`] if a trap handling failed /// Program can return [`VmRunError`] if a trap handling failed
@ -98,11 +76,6 @@ where
pub fn run(&mut self) -> Result<VmRunOk, VmRunError> { pub fn run(&mut self) -> Result<VmRunOk, VmRunError> {
use hbbytecode::opcode::*; use hbbytecode::opcode::*;
loop { loop {
// Check instruction boundary
if self.pc >= self.program_len {
return Err(VmRunError::AddrOutOfBounds);
}
// Big match // Big match
// //
// Contribution guide: // Contribution guide:
@ -123,7 +96,11 @@ where
// - Yes, we assume you run 64 bit CPU. Else ?conradluget a better CPU // - Yes, we assume you run 64 bit CPU. Else ?conradluget a better CPU
// sorry 8 bit fans, HBVM won't run on your Speccy :( // sorry 8 bit fans, HBVM won't run on your Speccy :(
unsafe { unsafe {
match *self.program.add(self.pc) { match *self
.memory
.load_prog(self.pc)
.ok_or(VmRunError::ProgramFetchLoadEx(self.pc as _))?
{
UN => { UN => {
self.decode::<()>(); self.decode::<()>();
return Err(VmRunError::Unreachable); return Err(VmRunError::Unreachable);
@ -388,15 +365,22 @@ where
} }
/// Decode instruction operands /// Decode instruction operands
#[inline] #[inline(always)]
unsafe fn decode<T: OpParam>(&mut self) -> T { unsafe fn decode<T: OpParam>(&mut self) -> T {
let data = self.program.add(self.pc + 1).cast::<T>().read(); let pc1 = self.pc + 1;
let data = self
.memory
.load_prog_unchecked(pc1..pc1 + size_of::<T>())
.as_ptr()
.cast::<T>()
.read();
self.pc += 1 + size_of::<T>(); self.pc += 1 + size_of::<T>();
data data
} }
/// Perform binary operating over two registers /// Perform binary operating over two registers
#[inline] #[inline(always)]
unsafe fn binary_op<T: ValueVariant>(&mut self, op: impl Fn(T, T) -> T) { unsafe fn binary_op<T: ValueVariant>(&mut self, op: impl Fn(T, T) -> T) {
let ParamBBB(tg, a0, a1) = self.decode(); let ParamBBB(tg, a0, a1) = self.decode();
self.write_reg( self.write_reg(
@ -406,7 +390,7 @@ where
} }
/// Perform binary operation over register and immediate /// Perform binary operation over register and immediate
#[inline] #[inline(always)]
unsafe fn binary_op_imm<T: ValueVariant>(&mut self, op: impl Fn(T, T) -> T) { unsafe fn binary_op_imm<T: ValueVariant>(&mut self, op: impl Fn(T, T) -> T) {
let ParamBBD(tg, reg, imm) = self.decode(); let ParamBBD(tg, reg, imm) = self.decode();
self.write_reg( self.write_reg(
@ -416,14 +400,14 @@ where
} }
/// Perform binary operation over register and shift immediate /// Perform binary operation over register and shift immediate
#[inline] #[inline(always)]
unsafe fn binary_op_ims<T: ValueVariant>(&mut self, op: impl Fn(T, u32) -> T) { unsafe fn binary_op_ims<T: ValueVariant>(&mut self, op: impl Fn(T, u32) -> T) {
let ParamBBW(tg, reg, imm) = self.decode(); let ParamBBW(tg, reg, imm) = self.decode();
self.write_reg(tg, op(self.read_reg(reg).cast::<T>(), imm)); self.write_reg(tg, op(self.read_reg(reg).cast::<T>(), imm));
} }
/// Jump at `#3` if ordering on `#0 <=> #1` is equal to expected /// Jump at `#3` if ordering on `#0 <=> #1` is equal to expected
#[inline] #[inline(always)]
unsafe fn cond_jmp<T: ValueVariant + Ord>(&mut self, expected: Ordering) { unsafe fn cond_jmp<T: ValueVariant + Ord>(&mut self, expected: Ordering) {
let ParamBBD(a0, a1, ja) = self.decode(); let ParamBBD(a0, a1, ja) = self.decode();
if self if self
@ -437,14 +421,14 @@ where
} }
/// Read register /// Read register
#[inline] #[inline(always)]
unsafe fn read_reg(&self, n: u8) -> Value { unsafe fn read_reg(&self, n: u8) -> Value {
*self.registers.get_unchecked(n as usize) *self.registers.get_unchecked(n as usize)
} }
/// Write a register. /// Write a register.
/// Writing to register 0 is no-op. /// Writing to register 0 is no-op.
#[inline] #[inline(always)]
unsafe fn write_reg(&mut self, n: u8, value: impl Into<Value>) { unsafe fn write_reg(&mut self, n: u8, value: impl Into<Value>) {
if n != 0 { if n != 0 {
*self.registers.get_unchecked_mut(n as usize) = value.into(); *self.registers.get_unchecked_mut(n as usize) = value.into();
@ -452,7 +436,7 @@ where
} }
/// Load / Store Address check-computation überfunction /// Load / Store Address check-computation überfunction
#[inline] #[inline(always)]
unsafe fn ldst_addr_uber( unsafe fn ldst_addr_uber(
&self, &self,
dst: u8, dst: u8,
@ -485,6 +469,9 @@ pub enum VmRunError {
/// Unhandled load access exception /// Unhandled load access exception
LoadAccessEx(u64), LoadAccessEx(u64),
/// Unhandled instruction load access exception
ProgramFetchLoadEx(u64),
/// Unhandled store access exception /// Unhandled store access exception
StoreAccessEx(u64), StoreAccessEx(u64),
@ -529,6 +516,40 @@ pub trait Memory {
source: *const u8, source: *const u8,
count: usize, count: usize,
) -> Result<(), StoreError>; ) -> Result<(), StoreError>;
/// Fetch bytes from program section
///
/// # Why?
/// Even Holey Bytes programs operate with
/// single address space, the actual implementation
/// may be different, so for these reasons there is a
/// separate function.
///
/// Also if your memory implementation differentiates between
/// readable and executable memory, this is the way to distinguish
/// the loads.
///
/// # Notice for implementors
/// This is a hot function. This is called on each opcode fetch
/// and instruction decode. Inlining the implementation is highly
/// recommended!
///
/// If you utilise some more heavy memory implementation, consider
/// performing caching as HBVM does not do that for you.
///
/// Has to return all the requested data. If cannot fetch data of requested
/// length, return [`None`].
fn load_prog<I>(&mut self, index: I) -> Option<&I::Output>
where
I: SliceIndex<[u8]>;
/// Fetch bytes from program section, unchecked.
///
/// # Safety
/// You really have to be sure you get the bytes, got me?
unsafe fn load_prog_unchecked<I>(&mut self, index: I) -> &I::Output
where
I: SliceIndex<[u8]>;
} }
/// Unhandled load access trap /// Unhandled load access trap

View file

@ -10,15 +10,20 @@ use {
fn main() -> Result<(), Box<dyn std::error::Error>> { fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut prog = vec![]; let mut prog = vec![];
stdin().read_to_end(&mut prog)?; stdin().read_to_end(&mut prog)?;
println!("{prog:?}");
if let Err(e) = validate(&prog) { if let Err(e) = validate(&prog) {
eprintln!("Program validation error: {e:?}"); eprintln!("Program validation error: {e:?}");
return Ok(()); return Ok(());
} else { } else {
unsafe { unsafe {
let mut vm = let mut vm = Vm::<_, 0>::new(
Vm::<_, 0>::new_unchecked(&prog, SoftPagedMem::<TestTrapHandler>::default()); SoftPagedMem {
pf_handler: TestTrapHandler,
program: &prog,
root_pt: Box::into_raw(Default::default()),
},
4,
);
let data = { let data = {
let ptr = std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align_unchecked( let ptr = std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align_unchecked(
4096, 4096, 4096, 4096,
@ -32,7 +37,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
vm.memory vm.memory
.map( .map(
data, data,
0, 8192,
hbvm::softpaging::paging::Permission::Write, hbvm::softpaging::paging::Permission::Write,
PageSize::Size4K, PageSize::Size4K,
) )
@ -46,7 +51,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
data, data,
std::alloc::Layout::from_size_align_unchecked(4096, 4096), std::alloc::Layout::from_size_align_unchecked(4096, 4096),
); );
vm.memory.unmap(0).unwrap(); vm.memory.unmap(8192).unwrap();
} }
} }
Ok(()) Ok(())

View file

@ -0,0 +1,124 @@
//! Address lookup
use super::{
addr_extract_index,
paging::{PageTable, Permission},
PageSize,
};
/// Good result from address split
pub struct AddrPageLookupOk {
/// Virtual address
pub vaddr: u64,
/// Pointer to the start for perform operation
pub ptr: *mut u8,
/// Size to the end of page / end of desired size
pub size: usize,
/// Page permission
pub perm: Permission,
}
/// Errornous address split result
pub struct AddrPageLookupError {
/// Address of failure
pub addr: u64,
/// Requested page size
pub size: PageSize,
}
/// Address splitter into pages
pub struct AddrPageLookuper {
/// Current address
addr: u64,
/// Size left
size: usize,
/// Page table
pagetable: *const PageTable,
}
impl AddrPageLookuper {
/// Create a new page lookuper
#[inline]
pub const fn new(addr: u64, size: usize, pagetable: *const PageTable) -> Self {
Self {
addr,
size,
pagetable,
}
}
/// Bump address by size X
pub fn bump(&mut self, page_size: PageSize) {
self.addr += page_size as u64;
self.size = self.size.saturating_sub(page_size as _);
}
}
impl Iterator for AddrPageLookuper {
type Item = Result<AddrPageLookupOk, AddrPageLookupError>;
fn next(&mut self) -> Option<Self::Item> {
// The end, everything is fine
if self.size == 0 {
return None;
}
let (base, perm, size, offset) = 'a: {
let mut current_pt = self.pagetable;
// Walk the page table
for lvl in (0..5).rev() {
// Get an entry
unsafe {
let entry = (*current_pt)
.table
.get_unchecked(addr_extract_index(self.addr, lvl));
let ptr = entry.ptr();
match entry.permission() {
// No page → page fault
Permission::Empty => {
return Some(Err(AddrPageLookupError {
addr: self.addr,
size: PageSize::from_lvl(lvl)?,
}))
}
// Node → proceed waking
Permission::Node => current_pt = ptr as _,
// Leaf → return relevant data
perm => {
break 'a (
// Pointer in host memory
ptr as *mut u8,
perm,
PageSize::from_lvl(lvl)?,
// In-page offset
addr_extract_index(self.addr, lvl),
);
}
}
}
}
return None; // Reached the end (should not happen)
};
// Get available byte count in the selected page with offset
let avail = (size as usize - offset).clamp(0, self.size);
self.bump(size);
Some(Ok(AddrPageLookupOk {
vaddr: self.addr,
ptr: unsafe { base.add(offset) }, // Return pointer to the start of region
size: avail,
perm,
}))
}
}

View file

@ -0,0 +1,162 @@
//! Automatic memory mapping
use {
super::{
addr_extract_index,
paging::{PageTable, Permission, PtEntry, PtPointedData},
PageSize, SoftPagedMem,
},
alloc::boxed::Box,
derive_more::Display,
};
impl<'p, A> SoftPagedMem<'p, A> {
/// Maps host's memory into VM's memory
///
/// # Safety
/// - Your faith in the gods of UB
/// - Addr-san claims it's fine but who knows is she isn't lying :ferrisSus:
/// - Alright, Miri-sama is also fine with this, who knows why
pub unsafe fn map(
&mut self,
host: *mut u8,
target: u64,
perm: Permission,
pagesize: PageSize,
) -> Result<(), MapError> {
let mut current_pt = self.root_pt;
// Decide on what level depth are we going
let lookup_depth = match pagesize {
PageSize::Size4K => 0,
PageSize::Size2M => 1,
PageSize::Size1G => 2,
};
// Walk pagetable levels
for lvl in (lookup_depth + 1..5).rev() {
let entry = (*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lvl));
let ptr = entry.ptr();
match entry.permission() {
// Still not on target and already seeing empty entry?
// No worries! Let's create one (allocates).
Permission::Empty => {
// Increase children count
(*current_pt).childen += 1;
let table = Box::into_raw(Box::new(PtPointedData {
pt: PageTable::default(),
}));
core::ptr::write(entry, PtEntry::new(table, Permission::Node));
current_pt = table as _;
}
// Continue walking
Permission::Node => current_pt = ptr as _,
// There is some entry on place of node
_ => return Err(MapError::PageOnNode),
}
}
let node = (*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lookup_depth));
// Check if node is not mapped
if node.permission() != Permission::Empty {
return Err(MapError::AlreadyMapped);
}
// Write entry
(*current_pt).childen += 1;
core::ptr::write(node, PtEntry::new(host.cast(), perm));
Ok(())
}
/// Unmaps pages from VM's memory
///
/// If errors, it only means there is no entry to unmap and in most cases
/// just should be ignored.
pub fn unmap(&mut self, addr: u64) -> Result<(), NothingToUnmap> {
let mut current_pt = self.root_pt;
let mut page_tables = [core::ptr::null_mut(); 5];
// Walk page table in reverse
for lvl in (0..5).rev() {
let entry = unsafe {
(*current_pt)
.table
.get_unchecked_mut(addr_extract_index(addr, lvl))
};
let ptr = entry.ptr();
match entry.permission() {
// Nothing is there, throw an error, not critical!
Permission::Empty => return Err(NothingToUnmap),
// Node Save to visited pagetables and continue walking
Permission::Node => {
page_tables[lvl as usize] = entry;
current_pt = ptr as _
}
// Page entry zero it out!
// Zero page entry is completely valid entry with
// empty permission - no UB here!
_ => unsafe {
core::ptr::write_bytes(entry, 0, 1);
break;
},
}
}
// Now walk in order visited page tables
for entry in page_tables.into_iter() {
// Level not visited, skip.
if entry.is_null() {
continue;
}
unsafe {
let children = &mut (*(*entry).ptr()).pt.childen;
*children -= 1; // Decrease children count
// If there are no children, deallocate.
if *children == 0 {
let _ = Box::from_raw((*entry).ptr() as *mut PageTable);
// Zero visited entry
core::ptr::write_bytes(entry, 0, 1);
} else {
break;
}
}
}
Ok(())
}
}
/// Error mapping
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
pub enum MapError {
/// Entry was already mapped
#[display(fmt = "There is already a page mapped on specified address")]
AlreadyMapped,
/// When walking a page entry was
/// encounterd.
#[display(fmt = "There was a page mapped on the way instead of node")]
PageOnNode,
}
/// There was no entry in page table to unmap
///
/// No worry, don't panic, nothing bad has happened,
/// but if you are 120% sure there should be something,
/// double-check your addresses.
#[derive(Clone, Copy, Display, Debug)]
#[display(fmt = "There was no entry to unmap")]
pub struct NothingToUnmap;

View file

@ -1,26 +1,31 @@
//! Platform independent, software paged memory implementation //! Platform independent, software paged memory implementation
use self::lookup::{AddrPageLookupError, AddrPageLookupOk, AddrPageLookuper};
pub mod lookup;
pub mod paging; pub mod paging;
#[cfg(feature = "alloc")]
pub mod mapping;
use { use {
super::{LoadError, Memory, MemoryAccessReason, StoreError}, super::{LoadError, Memory, MemoryAccessReason, StoreError},
derive_more::Display, core::slice::SliceIndex,
paging::{PageTable, Permission}, paging::{PageTable, Permission},
}; };
#[cfg(feature = "alloc")]
use {alloc::boxed::Box, paging::PtEntry};
/// HoleyBytes software paged memory /// HoleyBytes software paged memory
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct SoftPagedMem<PfHandler> { pub struct SoftPagedMem<'p, PfH> {
/// Root page table /// Root page table
pub root_pt: *mut PageTable, pub root_pt: *mut PageTable,
/// Page fault handler /// Page fault handler
pub pf_handler: PfHandler, pub pf_handler: PfH,
/// Program memory segment
pub program: &'p [u8],
} }
impl<PfHandler: HandlePageFault> Memory for SoftPagedMem<PfHandler> { impl<'p, PfH: HandlePageFault> Memory for SoftPagedMem<'p, PfH> {
/// Load value from an address /// Load value from an address
/// ///
/// # Safety /// # Safety
@ -57,9 +62,27 @@ impl<PfHandler: HandlePageFault> Memory for SoftPagedMem<PfHandler> {
) )
.map_err(StoreError) .map_err(StoreError)
} }
/// Fetch slice from program memory section
#[inline(always)]
fn load_prog<I>(&mut self, index: I) -> Option<&I::Output>
where
I: SliceIndex<[u8]>,
{
self.program.get(index)
}
/// Fetch slice from program memory section, unchecked!
#[inline(always)]
unsafe fn load_prog_unchecked<I>(&mut self, index: I) -> &I::Output
where
I: SliceIndex<[u8]>,
{
self.program.get_unchecked(index)
}
} }
impl<PfHandler: HandlePageFault> SoftPagedMem<PfHandler> { impl<'p, PfH: HandlePageFault> SoftPagedMem<'p, PfH> {
// Everyone behold, the holy function, the god of HBVM memory accesses! // Everyone behold, the holy function, the god of HBVM memory accesses!
/// Split address to pages, check their permissions and feed pointers with offset /// Split address to pages, check their permissions and feed pointers with offset
@ -76,6 +99,37 @@ impl<PfHandler: HandlePageFault> SoftPagedMem<PfHandler> {
permission_check: fn(Permission) -> bool, permission_check: fn(Permission) -> bool,
action: fn(*mut u8, *mut u8, usize), action: fn(*mut u8, *mut u8, usize),
) -> Result<(), u64> { ) -> Result<(), u64> {
// Memory load from program section
let (src, len) = if src < self.program.len() as _ {
// Allow only loads
if reason != MemoryAccessReason::Load {
return Err(src);
}
// Determine how much data to copy from here
let to_copy = len.clamp(0, self.program.len().saturating_sub(src as _));
// Perform action
action(
unsafe { self.program.as_ptr().add(src as _).cast_mut() },
dst,
to_copy,
);
// Return shifted from what we've already copied
(
src.saturating_add(to_copy as _),
len.saturating_sub(to_copy),
)
} else {
(src, len) // Nothing weird!
};
// Nothing to copy? Don't bother doing anything, bail.
if len == 0 {
return Ok(());
}
// Create new splitter // Create new splitter
let mut pspl = AddrPageLookuper::new(src, len, self.root_pt); let mut pspl = AddrPageLookuper::new(src, len, self.root_pt);
loop { loop {
@ -121,271 +175,6 @@ impl<PfHandler: HandlePageFault> SoftPagedMem<PfHandler> {
} }
} }
/// Good result from address split
struct AddrPageLookupOk {
/// Virtual address
vaddr: u64,
/// Pointer to the start for perform operation
ptr: *mut u8,
/// Size to the end of page / end of desired size
size: usize,
/// Page permission
perm: Permission,
}
/// Errornous address split result
struct AddrPageLookupError {
/// Address of failure
addr: u64,
/// Requested page size
size: PageSize,
}
/// Address splitter into pages
struct AddrPageLookuper {
/// Current address
addr: u64,
/// Size left
size: usize,
/// Page table
pagetable: *const PageTable,
}
impl AddrPageLookuper {
/// Create a new page lookuper
#[inline]
pub const fn new(addr: u64, size: usize, pagetable: *const PageTable) -> Self {
Self {
addr,
size,
pagetable,
}
}
/// Bump address by size X
fn bump(&mut self, page_size: PageSize) {
self.addr += page_size as u64;
self.size = self.size.saturating_sub(page_size as _);
}
}
impl Iterator for AddrPageLookuper {
type Item = Result<AddrPageLookupOk, AddrPageLookupError>;
fn next(&mut self) -> Option<Self::Item> {
// The end, everything is fine
if self.size == 0 {
return None;
}
let (base, perm, size, offset) = 'a: {
let mut current_pt = self.pagetable;
// Walk the page table
for lvl in (0..5).rev() {
// Get an entry
unsafe {
let entry = (*current_pt)
.table
.get_unchecked(addr_extract_index(self.addr, lvl));
let ptr = entry.ptr();
match entry.permission() {
// No page → page fault
Permission::Empty => {
return Some(Err(AddrPageLookupError {
addr: self.addr,
size: PageSize::from_lvl(lvl)?,
}))
}
// Node → proceed waking
Permission::Node => current_pt = ptr as _,
// Leaf → return relevant data
perm => {
break 'a (
// Pointer in host memory
ptr as *mut u8,
perm,
PageSize::from_lvl(lvl)?,
// In-page offset
addr_extract_index(self.addr, lvl),
);
}
}
}
}
return None; // Reached the end (should not happen)
};
// Get available byte count in the selected page with offset
let avail = (size as usize - offset).clamp(0, self.size);
self.bump(size);
Some(Ok(AddrPageLookupOk {
vaddr: self.addr,
ptr: unsafe { base.add(offset) }, // Return pointer to the start of region
size: avail,
perm,
}))
}
}
#[cfg(feature = "alloc")]
impl<PfHandler: Default> Default for SoftPagedMem<PfHandler> {
fn default() -> Self {
Self {
root_pt: Box::into_raw(Default::default()),
pf_handler: Default::default(),
}
}
}
#[cfg(feature = "alloc")]
impl<A> Drop for SoftPagedMem<A> {
fn drop(&mut self) {
let _ = unsafe { Box::from_raw(self.root_pt) };
}
}
#[cfg(feature = "alloc")]
impl<A> SoftPagedMem<A> {
/// Maps host's memory into VM's memory
///
/// # Safety
/// - Your faith in the gods of UB
/// - Addr-san claims it's fine but who knows is she isn't lying :ferrisSus:
/// - Alright, Miri-sama is also fine with this, who knows why
pub unsafe fn map(
&mut self,
host: *mut u8,
target: u64,
perm: Permission,
pagesize: PageSize,
) -> Result<(), MapError> {
let mut current_pt = self.root_pt;
// Decide on what level depth are we going
let lookup_depth = match pagesize {
PageSize::Size4K => 0,
PageSize::Size2M => 1,
PageSize::Size1G => 2,
};
// Walk pagetable levels
for lvl in (lookup_depth + 1..5).rev() {
let entry = (*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lvl));
let ptr = entry.ptr();
match entry.permission() {
// Still not on target and already seeing empty entry?
// No worries! Let's create one (allocates).
Permission::Empty => {
// Increase children count
(*current_pt).childen += 1;
let table = Box::into_raw(Box::new(paging::PtPointedData {
pt: PageTable::default(),
}));
core::ptr::write(entry, PtEntry::new(table, Permission::Node));
current_pt = table as _;
}
// Continue walking
Permission::Node => current_pt = ptr as _,
// There is some entry on place of node
_ => return Err(MapError::PageOnNode),
}
}
let node = (*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lookup_depth));
// Check if node is not mapped
if node.permission() != Permission::Empty {
return Err(MapError::AlreadyMapped);
}
// Write entry
(*current_pt).childen += 1;
core::ptr::write(node, PtEntry::new(host.cast(), perm));
Ok(())
}
/// Unmaps pages from VM's memory
///
/// If errors, it only means there is no entry to unmap and in most cases
/// just should be ignored.
pub fn unmap(&mut self, addr: u64) -> Result<(), NothingToUnmap> {
let mut current_pt = self.root_pt;
let mut page_tables = [core::ptr::null_mut(); 5];
// Walk page table in reverse
for lvl in (0..5).rev() {
let entry = unsafe {
(*current_pt)
.table
.get_unchecked_mut(addr_extract_index(addr, lvl))
};
let ptr = entry.ptr();
match entry.permission() {
// Nothing is there, throw an error, not critical!
Permission::Empty => return Err(NothingToUnmap),
// Node Save to visited pagetables and continue walking
Permission::Node => {
page_tables[lvl as usize] = entry;
current_pt = ptr as _
}
// Page entry zero it out!
// Zero page entry is completely valid entry with
// empty permission - no UB here!
_ => unsafe {
core::ptr::write_bytes(entry, 0, 1);
break;
},
}
}
// Now walk in order visited page tables
for entry in page_tables.into_iter() {
// Level not visited, skip.
if entry.is_null() {
continue;
}
unsafe {
let children = &mut (*(*entry).ptr()).pt.childen;
*children -= 1; // Decrease children count
// If there are no children, deallocate.
if *children == 0 {
let _ = Box::from_raw((*entry).ptr() as *mut PageTable);
// Zero visited entry
core::ptr::write_bytes(entry, 0, 1);
} else {
break;
}
}
}
Ok(())
}
}
/// Extract index in page table on specified level /// Extract index in page table on specified level
/// ///
/// The level shall not be larger than 4, otherwise /// The level shall not be larger than 4, otherwise
@ -420,27 +209,6 @@ impl PageSize {
} }
} }
/// Error mapping
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
pub enum MapError {
/// Entry was already mapped
#[display(fmt = "There is already a page mapped on specified address")]
AlreadyMapped,
/// When walking a page entry was
/// encounterd.
#[display(fmt = "There was a page mapped on the way instead of node")]
PageOnNode,
}
/// There was no entry in page table to unmap
///
/// No worry, don't panic, nothing bad has happened,
/// but if you are 120% sure there should be something,
/// double-check your addresses.
#[derive(Clone, Copy, Display, Debug)]
#[display(fmt = "There was no entry to unmap")]
pub struct NothingToUnmap;
/// Permisison checks /// Permisison checks
pub mod perm_check { pub mod perm_check {
use super::paging::Permission; use super::paging::Permission;
@ -459,6 +227,12 @@ pub mod perm_check {
pub const fn writable(perm: Permission) -> bool { pub const fn writable(perm: Permission) -> bool {
matches!(perm, Permission::Write) matches!(perm, Permission::Write)
} }
/// Page is executable
#[inline(always)]
pub const fn executable(perm: Permission) -> bool {
matches!(perm, Permission::Exec)
}
} }
/// Handle VM traps /// Handle VM traps

View file

@ -261,8 +261,10 @@
# Memory # Memory
- Addresses are 64 bit - Addresses are 64 bit
- Address `0x0` is invalid and acessing it traps - Program should be in the same address space as all other data
- Memory implementation is arbitrary - Memory implementation is arbitrary
- Address `0x0` may or may not be valid. Count with compilers
considering it invalid!
- In case of accessing invalid address: - In case of accessing invalid address:
- Program shall trap (LoadAccessEx, StoreAccessEx) with parameter of accessed address - Program shall trap (LoadAccessEx, StoreAccessEx) with parameter of accessed address
- Value of register when trapped is undefined - Value of register when trapped is undefined