Changed memory interfacing

soft-float
Erin 2023-08-08 03:14:19 +02:00
commit d74b32a38d
9 changed files with 321 additions and 305 deletions

View File

@ -17,6 +17,7 @@ macro_rules! constmod {
};
}
#[allow(rustdoc::invalid_rust_codeblocks)]
/// Invoke macro with bytecode definition
/// # Input syntax
/// ```no_run
@ -31,7 +32,7 @@ macro_rules! constmod {
/// - Per-instructions there will be generated opcode-specific functions calling the generic ones
/// - Operand types
/// - R: Register (u8)
/// - I: Immediate (implements [`crate::Imm`] trait)
/// - I: Immediate
/// - L: Memory load / store size (u16)
/// - Other types are identity-mapped
///

View File

@ -1,19 +1,18 @@
//! Block memory copier state machine
use {
super::MemoryAccessReason,
crate::{
mem::{perm_check, HandlePageFault, Memory},
VmRunError,
},
super::{Memory, MemoryAccessReason, VmRunError},
core::{mem::MaybeUninit, task::Poll},
};
// Buffer size (defaults to 4 KiB, a smallest page size on most platforms)
/// Buffer size (defaults to 4 KiB, a smallest page size on most platforms)
const BUF_SIZE: usize = 4096;
// This should be equal to `BUF_SIZE`
/// Buffer of possibly uninitialised bytes, aligned to [`BUF_SIZE`]
#[repr(align(4096))]
struct AlignedBuf([MaybeUninit<u8>; BUF_SIZE]);
/// State for block memory copy
pub struct BlockCopier {
/// Source address
src: u64,
@ -26,6 +25,7 @@ pub struct BlockCopier {
}
impl BlockCopier {
/// Construct a new one
#[inline]
pub fn new(src: u64, dst: u64, count: usize) -> Self {
Self {
@ -40,11 +40,7 @@ impl BlockCopier {
///
/// # Safety
/// - Same as for [`Memory::load`] and [`Memory::store`]
pub unsafe fn poll(
&mut self,
memory: &mut Memory,
traph: &mut impl HandlePageFault,
) -> Poll<Result<(), BlkCopyError>> {
pub unsafe fn poll(&mut self, memory: &mut impl Memory) -> Poll<Result<(), BlkCopyError>> {
// Safety: Assuming uninit of array of MaybeUninit is sound
let mut buf = AlignedBuf(MaybeUninit::uninit().assume_init());
@ -56,7 +52,6 @@ impl BlockCopier {
self.dst,
buf.0.as_mut_ptr().cast(),
BUF_SIZE,
traph,
) {
return Poll::Ready(Err(e));
}
@ -68,7 +63,7 @@ impl BlockCopier {
Some(n) => self.src = n,
None => return Poll::Ready(Err(BlkCopyError::OutOfBounds)),
};
match self.dst.checked_add(BUF_SIZE as u64) {
Some(n) => self.dst = n,
None => return Poll::Ready(Err(BlkCopyError::OutOfBounds)),
@ -92,7 +87,6 @@ impl BlockCopier {
self.dst,
buf.0.as_mut_ptr().cast(),
self.rem,
traph,
) {
return Poll::Ready(Err(e));
}
@ -102,43 +96,27 @@ impl BlockCopier {
}
}
/// Load to buffer and store from buffer
#[inline]
unsafe fn act(
memory: &mut Memory,
memory: &mut impl Memory,
src: u64,
dst: u64,
buf: *mut u8,
count: usize,
traph: &mut impl HandlePageFault,
) -> Result<(), BlkCopyError> {
// Load to buffer
memory
.memory_access(
MemoryAccessReason::Load,
src,
buf,
count,
perm_check::readable,
|src, dst, count| core::ptr::copy(src, dst, count),
traph,
)
.map_err(|addr| BlkCopyError::Access {
.load(src, buf, count)
.map_err(|super::LoadError(addr)| BlkCopyError::Access {
access_reason: MemoryAccessReason::Load,
addr,
})?;
// Store from buffer
memory
.memory_access(
MemoryAccessReason::Store,
dst,
buf,
count,
perm_check::writable,
|dst, src, count| core::ptr::copy(src, dst, count),
traph,
)
.map_err(|addr| BlkCopyError::Access {
.store(dst, buf, count)
.map_err(|super::StoreError(addr)| BlkCopyError::Access {
access_reason: MemoryAccessReason::Store,
addr,
})?;

View File

@ -2,8 +2,8 @@
//!
//! # Alloc feature
//! - Enabled by default
//! - Provides [`mem::Memory`] mapping / unmapping, as well as
//! [`Default`] and [`Drop`] implementation
//! - Provides mapping / unmapping, as well as [`Default`] and [`Drop`]
//! implementations for soft-paged memory implementation
// # General safety notice:
// - Validation has to assure there is 256 registers (r0 - r255)
@ -11,26 +11,31 @@
// - Mapped pages should be at least 4 KiB
#![no_std]
#![cfg_attr(feature = "nightly", feature(fn_align))]
#![warn(missing_docs, clippy::missing_docs_in_private_items)]
use core::marker::PhantomData;
#[cfg(feature = "alloc")]
extern crate alloc;
pub mod mem;
pub mod softpaging;
pub mod value;
mod bmc;
use {
bmc::BlockCopier,
core::{cmp::Ordering, mem::size_of, ops},
derive_more::Display,
hbbytecode::{
valider, OpParam, ParamBB, ParamBBB, ParamBBBB, ParamBBD, ParamBBDH, ParamBBW, ParamBD,
},
mem::{bmc::BlockCopier, HandlePageFault, Memory},
value::{Value, ValueVariant},
};
/// HoleyBytes Virtual Machine
pub struct Vm<'a, PfHandler, const TIMER_QUOTIENT: usize> {
pub struct Vm<'a, Mem, const TIMER_QUOTIENT: usize> {
/// Holds 256 registers
///
/// Writing to register 0 is considered undefined behaviour
@ -38,20 +43,20 @@ pub struct Vm<'a, PfHandler, const TIMER_QUOTIENT: usize> {
pub registers: [Value; 256],
/// Memory implementation
pub memory: Memory,
/// Trap handler
pub pfhandler: PfHandler,
pub memory: Mem,
/// Program counter
pub pc: usize,
/// Program
program: &'a [u8],
program: *const u8,
/// Cached program length (without unreachable end)
program_len: usize,
/// Program lifetime
_program_lt: PhantomData<&'a [u8]>,
/// Program timer
timer: usize,
@ -59,34 +64,31 @@ pub struct Vm<'a, PfHandler, const TIMER_QUOTIENT: usize> {
copier: Option<BlockCopier>,
}
impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
Vm<'a, PfHandler, TIMER_QUOTIENT>
impl<'a, Mem, const TIMER_QUOTIENT: usize> Vm<'a, Mem, TIMER_QUOTIENT>
where
Mem: Memory,
{
/// Create a new VM with program and trap handler
///
/// # Safety
/// Program code has to be validated
pub unsafe fn new_unchecked(program: &'a [u8], traph: PfHandler, memory: Memory) -> Self {
pub unsafe fn new_unchecked(program: &'a [u8], memory: Mem) -> Self {
Self {
registers: [Value::from(0_u64); 256],
memory,
pfhandler: traph,
pc: 0,
program_len: program.len() - 12,
program: &program[4..],
program: program[4..].as_ptr(),
_program_lt: Default::default(),
timer: 0,
copier: None,
}
}
/// Create a new VM with program and trap handler only if it passes validation
pub fn new_validated(
program: &'a [u8],
traph: PfHandler,
memory: Memory,
) -> Result<Self, valider::Error> {
pub fn new_validated(program: &'a [u8], memory: Mem) -> Result<Self, valider::Error> {
valider::validate(program)?;
Ok(unsafe { Self::new_unchecked(program, traph, memory) })
Ok(unsafe { Self::new_unchecked(program, memory) })
}
/// Execute program
@ -121,7 +123,7 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
// - Yes, we assume you run 64 bit CPU. Else ?conradluget a better CPU
// sorry 8 bit fans, HBVM won't run on your Speccy :(
unsafe {
match *self.program.get_unchecked(self.pc) {
match *self.program.add(self.pc) {
UN => {
self.decode::<()>();
return Err(VmRunError::Unreachable);
@ -245,7 +247,6 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
.add(usize::from(dst) + usize::from(n))
.cast(),
usize::from(count).saturating_sub(n.into()),
&mut self.pfhandler,
)?;
}
ST => {
@ -255,14 +256,13 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
self.ldst_addr_uber(dst, base, off, count, 0)?,
self.registers.as_ptr().add(usize::from(dst)).cast(),
count.into(),
&mut self.pfhandler,
)?;
}
BMC => {
// Block memory copy
match if let Some(copier) = &mut self.copier {
// There is some copier, poll.
copier.poll(&mut self.memory, &mut self.pfhandler)
copier.poll(&mut self.memory)
} else {
// There is none, make one!
let ParamBBD(src, dst, count) = self.decode();
@ -279,7 +279,7 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
self.copier
.as_mut()
.unwrap_unchecked() // SAFETY: We just assigned there
.poll(&mut self.memory, &mut self.pfhandler)
.poll(&mut self.memory)
} {
// We are done, shift program counter
core::task::Poll::Ready(Ok(())) => {
@ -386,7 +386,7 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
/// Decode instruction operands
#[inline]
unsafe fn decode<T: OpParam>(&mut self) -> T {
let data = self.program.as_ptr().add(self.pc + 1).cast::<T>().read();
let data = self.program.add(self.pc + 1).cast::<T>().read();
self.pc += 1 + size_of::<T>();
data
}
@ -506,3 +506,54 @@ pub enum VmRunOk {
/// Environment call
Ecall,
}
/// Load-store memory access
pub trait Memory {
/// Load data from memory on address
///
/// # Safety
/// - Shall not overrun the buffer
unsafe fn load(&mut self, addr: u64, target: *mut u8, count: usize) -> Result<(), LoadError>;
/// Store data to memory on address
///
/// # Safety
/// - Shall not overrun the buffer
unsafe fn store(
&mut self,
addr: u64,
source: *const u8,
count: usize,
) -> Result<(), StoreError>;
}
/// Unhandled load access trap
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
#[display(fmt = "Load access error at address {_0:#x}")]
pub struct LoadError(pub u64);
/// Unhandled store access trap
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
#[display(fmt = "Store access error at address {_0:#x}")]
pub struct StoreError(pub u64);
/// Reason to access memory
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
pub enum MemoryAccessReason {
/// Memory was accessed for load (read)
Load,
/// Memory was accessed for store (write)
Store,
}
impl From<LoadError> for VmRunError {
fn from(value: LoadError) -> Self {
Self::LoadAccessEx(value.0)
}
}
impl From<StoreError> for VmRunError {
fn from(value: StoreError) -> Self {
Self::StoreAccessEx(value.0)
}
}

View File

@ -1,8 +1,8 @@
use {
hbbytecode::valider::validate,
hbvm::{
mem::{HandlePageFault, Memory, MemoryAccessReason, PageSize},
Vm,
softpaging::{paging::PageTable, HandlePageFault, PageSize, SoftPagedMem},
MemoryAccessReason, Vm,
},
std::io::{stdin, Read},
};
@ -16,7 +16,8 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
return Ok(());
} else {
unsafe {
let mut vm = Vm::<_, 0>::new_unchecked(&prog, TestTrapHandler, Default::default());
let mut vm =
Vm::<_, 0>::new_unchecked(&prog, SoftPagedMem::<TestTrapHandler>::default());
let data = {
let ptr = std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align_unchecked(
4096, 4096,
@ -31,7 +32,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
.map(
data,
0,
hbvm::mem::paging::Permission::Write,
hbvm::softpaging::paging::Permission::Write,
PageSize::Size4K,
)
.unwrap();
@ -54,12 +55,13 @@ pub fn time() -> u32 {
9
}
#[derive(Default)]
struct TestTrapHandler;
impl HandlePageFault for TestTrapHandler {
fn page_fault(
&mut self,
_: MemoryAccessReason,
_: &mut Memory,
_: &mut PageTable,
_: u64,
_: PageSize,
_: *mut u8,

View File

@ -1,20 +0,0 @@
//! Program trap handling interfaces
use super::{Memory, MemoryAccessReason, PageSize};
/// Handle VM traps
pub trait HandlePageFault {
/// Handle page fault
///
/// Return true if handling was sucessful,
/// otherwise the program will be interrupted and will
/// yield an error.
fn page_fault(
&mut self,
reason: MemoryAccessReason,
memory: &mut Memory,
vaddr: u64,
size: PageSize,
dataptr: *mut u8,
) -> bool;
}

View File

@ -1,14 +1,9 @@
//! Program memory implementation
//! Platform independent, software paged memory implementation
pub mod bmc;
pub mod paging;
mod pfhandler;
pub use pfhandler::HandlePageFault;
use {
super::VmRunError,
super::{LoadError, Memory, MemoryAccessReason, StoreError},
derive_more::Display,
paging::{PageTable, Permission},
};
@ -16,171 +11,21 @@ use {
#[cfg(feature = "alloc")]
use {alloc::boxed::Box, paging::PtEntry};
/// HoleyBytes virtual memory
/// HoleyBytes software paged memory
#[derive(Clone, Debug)]
pub struct Memory {
pub struct SoftPagedMem<PfHandler> {
/// Root page table
pub root_pt: *mut PageTable,
pub root_pt: *mut PageTable,
/// Page fault handler
pub pf_handler: PfHandler,
}
#[cfg(feature = "alloc")]
impl Default for Memory {
fn default() -> Self {
Self {
root_pt: Box::into_raw(Default::default()),
}
}
}
#[cfg(feature = "alloc")]
impl Drop for Memory {
fn drop(&mut self) {
let _ = unsafe { Box::from_raw(self.root_pt) };
}
}
impl Memory {
/// Maps host's memory into VM's memory
///
/// # Safety
/// - Your faith in the gods of UB
/// - Addr-san claims it's fine but who knows is she isn't lying :ferrisSus:
/// - Alright, Miri-sama is also fine with this, who knows why
#[cfg(feature = "alloc")]
pub unsafe fn map(
&mut self,
host: *mut u8,
target: u64,
perm: Permission,
pagesize: PageSize,
) -> Result<(), MapError> {
let mut current_pt = self.root_pt;
// Decide on what level depth are we going
let lookup_depth = match pagesize {
PageSize::Size4K => 0,
PageSize::Size2M => 1,
PageSize::Size1G => 2,
};
// Walk pagetable levels
for lvl in (lookup_depth + 1..5).rev() {
let entry = (*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lvl));
let ptr = entry.ptr();
match entry.permission() {
// Still not on target and already seeing empty entry?
// No worries! Let's create one (allocates).
Permission::Empty => {
// Increase children count
(*current_pt).childen += 1;
let table = Box::into_raw(Box::new(paging::PtPointedData {
pt: PageTable::default(),
}));
core::ptr::write(entry, PtEntry::new(table, Permission::Node));
current_pt = table as _;
}
// Continue walking
Permission::Node => current_pt = ptr as _,
// There is some entry on place of node
_ => return Err(MapError::PageOnNode),
}
}
let node = (*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lookup_depth));
// Check if node is not mapped
if node.permission() != Permission::Empty {
return Err(MapError::AlreadyMapped);
}
// Write entry
(*current_pt).childen += 1;
core::ptr::write(node, PtEntry::new(host.cast(), perm));
Ok(())
}
/// Unmaps pages from VM's memory
///
/// If errors, it only means there is no entry to unmap and in most cases
/// just should be ignored.
#[cfg(feature = "alloc")]
pub fn unmap(&mut self, addr: u64) -> Result<(), NothingToUnmap> {
let mut current_pt = self.root_pt;
let mut page_tables = [core::ptr::null_mut(); 5];
// Walk page table in reverse
for lvl in (0..5).rev() {
let entry = unsafe {
(*current_pt)
.table
.get_unchecked_mut(addr_extract_index(addr, lvl))
};
let ptr = entry.ptr();
match entry.permission() {
// Nothing is there, throw an error, not critical!
Permission::Empty => return Err(NothingToUnmap),
// Node Save to visited pagetables and continue walking
Permission::Node => {
page_tables[lvl as usize] = entry;
current_pt = ptr as _
}
// Page entry zero it out!
// Zero page entry is completely valid entry with
// empty permission - no UB here!
_ => unsafe {
core::ptr::write_bytes(entry, 0, 1);
break;
},
}
}
// Now walk in order visited page tables
for entry in page_tables.into_iter() {
// Level not visited, skip.
if entry.is_null() {
continue;
}
unsafe {
let children = &mut (*(*entry).ptr()).pt.childen;
*children -= 1; // Decrease children count
// If there are no children, deallocate.
if *children == 0 {
let _ = Box::from_raw((*entry).ptr() as *mut PageTable);
// Zero visited entry
core::ptr::write_bytes(entry, 0, 1);
} else {
break;
}
}
}
Ok(())
}
impl<PfHandler: HandlePageFault> Memory for SoftPagedMem<PfHandler> {
/// Load value from an address
///
/// # Safety
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
pub unsafe fn load(
&mut self,
addr: u64,
target: *mut u8,
count: usize,
traph: &mut impl HandlePageFault,
) -> Result<(), LoadError> {
unsafe fn load(&mut self, addr: u64, target: *mut u8, count: usize) -> Result<(), LoadError> {
self.memory_access(
MemoryAccessReason::Load,
addr,
@ -188,7 +33,6 @@ impl Memory {
count,
perm_check::readable,
|src, dst, count| core::ptr::copy_nonoverlapping(src, dst, count),
traph,
)
.map_err(LoadError)
}
@ -197,12 +41,11 @@ impl Memory {
///
/// # Safety
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
pub unsafe fn store(
unsafe fn store(
&mut self,
addr: u64,
source: *const u8,
count: usize,
traph: &mut impl HandlePageFault,
) -> Result<(), StoreError> {
self.memory_access(
MemoryAccessReason::Store,
@ -211,11 +54,12 @@ impl Memory {
count,
perm_check::writable,
|dst, src, count| core::ptr::copy_nonoverlapping(src, dst, count),
traph,
)
.map_err(StoreError)
}
}
impl<PfHandler: HandlePageFault> SoftPagedMem<PfHandler> {
// Everyone behold, the holy function, the god of HBVM memory accesses!
/// Split address to pages, check their permissions and feed pointers with offset
@ -231,7 +75,6 @@ impl Memory {
len: usize,
permission_check: fn(Permission) -> bool,
action: fn(*mut u8, *mut u8, usize),
traph: &mut impl HandlePageFault,
) -> Result<(), u64> {
// Create new splitter
let mut pspl = AddrPageLookuper::new(src, len, self.root_pt);
@ -255,7 +98,13 @@ impl Memory {
// No page found
Some(Err(AddrPageLookupError { addr, size })) => {
// Attempt to execute page fault handler
if traph.page_fault(reason, self, addr, size, dst) {
if self.pf_handler.page_fault(
reason,
unsafe { &mut *self.root_pt },
addr,
size,
dst,
) {
// Shift the splitter address
pspl.bump(size);
@ -389,6 +238,154 @@ impl Iterator for AddrPageLookuper {
}
}
#[cfg(feature = "alloc")]
impl<PfHandler: Default> Default for SoftPagedMem<PfHandler> {
fn default() -> Self {
Self {
root_pt: Box::into_raw(Default::default()),
pf_handler: Default::default(),
}
}
}
#[cfg(feature = "alloc")]
impl<A> Drop for SoftPagedMem<A> {
fn drop(&mut self) {
let _ = unsafe { Box::from_raw(self.root_pt) };
}
}
#[cfg(feature = "alloc")]
impl<A> SoftPagedMem<A> {
/// Maps host's memory into VM's memory
///
/// # Safety
/// - Your faith in the gods of UB
/// - Addr-san claims it's fine but who knows is she isn't lying :ferrisSus:
/// - Alright, Miri-sama is also fine with this, who knows why
pub unsafe fn map(
&mut self,
host: *mut u8,
target: u64,
perm: Permission,
pagesize: PageSize,
) -> Result<(), MapError> {
let mut current_pt = self.root_pt;
// Decide on what level depth are we going
let lookup_depth = match pagesize {
PageSize::Size4K => 0,
PageSize::Size2M => 1,
PageSize::Size1G => 2,
};
// Walk pagetable levels
for lvl in (lookup_depth + 1..5).rev() {
let entry = (*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lvl));
let ptr = entry.ptr();
match entry.permission() {
// Still not on target and already seeing empty entry?
// No worries! Let's create one (allocates).
Permission::Empty => {
// Increase children count
(*current_pt).childen += 1;
let table = Box::into_raw(Box::new(paging::PtPointedData {
pt: PageTable::default(),
}));
core::ptr::write(entry, PtEntry::new(table, Permission::Node));
current_pt = table as _;
}
// Continue walking
Permission::Node => current_pt = ptr as _,
// There is some entry on place of node
_ => return Err(MapError::PageOnNode),
}
}
let node = (*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lookup_depth));
// Check if node is not mapped
if node.permission() != Permission::Empty {
return Err(MapError::AlreadyMapped);
}
// Write entry
(*current_pt).childen += 1;
core::ptr::write(node, PtEntry::new(host.cast(), perm));
Ok(())
}
/// Unmaps pages from VM's memory
///
/// If errors, it only means there is no entry to unmap and in most cases
/// just should be ignored.
pub fn unmap(&mut self, addr: u64) -> Result<(), NothingToUnmap> {
let mut current_pt = self.root_pt;
let mut page_tables = [core::ptr::null_mut(); 5];
// Walk page table in reverse
for lvl in (0..5).rev() {
let entry = unsafe {
(*current_pt)
.table
.get_unchecked_mut(addr_extract_index(addr, lvl))
};
let ptr = entry.ptr();
match entry.permission() {
// Nothing is there, throw an error, not critical!
Permission::Empty => return Err(NothingToUnmap),
// Node Save to visited pagetables and continue walking
Permission::Node => {
page_tables[lvl as usize] = entry;
current_pt = ptr as _
}
// Page entry zero it out!
// Zero page entry is completely valid entry with
// empty permission - no UB here!
_ => unsafe {
core::ptr::write_bytes(entry, 0, 1);
break;
},
}
}
// Now walk in order visited page tables
for entry in page_tables.into_iter() {
// Level not visited, skip.
if entry.is_null() {
continue;
}
unsafe {
let children = &mut (*(*entry).ptr()).pt.childen;
*children -= 1; // Decrease children count
// If there are no children, deallocate.
if *children == 0 {
let _ = Box::from_raw((*entry).ptr() as *mut PageTable);
// Zero visited entry
core::ptr::write_bytes(entry, 0, 1);
} else {
break;
}
}
}
Ok(())
}
}
/// Extract index in page table on specified level
///
/// The level shall not be larger than 4, otherwise
@ -423,44 +420,6 @@ impl PageSize {
}
}
/// Unhandled load access trap
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
#[display(fmt = "Load access error at address {_0:#x}")]
pub struct LoadError(u64);
/// Unhandled store access trap
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
#[display(fmt = "Store access error at address {_0:#x}")]
pub struct StoreError(u64);
/// There was no entry in page table to unmap
///
/// No worry, don't panic, nothing bad has happened,
/// but if you are 120% sure there should be something,
/// double-check your addresses.
#[derive(Clone, Copy, Display, Debug)]
#[display(fmt = "There was no entry to unmap")]
pub struct NothingToUnmap;
/// Reason to access memory
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
pub enum MemoryAccessReason {
Load,
Store,
}
impl From<LoadError> for VmRunError {
fn from(value: LoadError) -> Self {
Self::LoadAccessEx(value.0)
}
}
impl From<StoreError> for VmRunError {
fn from(value: StoreError) -> Self {
Self::StoreAccessEx(value.0)
}
}
/// Error mapping
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
pub enum MapError {
@ -473,13 +432,22 @@ pub enum MapError {
PageOnNode,
}
/// There was no entry in page table to unmap
///
/// No worry, don't panic, nothing bad has happened,
/// but if you are 120% sure there should be something,
/// double-check your addresses.
#[derive(Clone, Copy, Display, Debug)]
#[display(fmt = "There was no entry to unmap")]
pub struct NothingToUnmap;
/// Permisison checks
pub mod perm_check {
use super::paging::Permission;
/// Page is readable
#[inline(always)]
pub fn readable(perm: Permission) -> bool {
pub const fn readable(perm: Permission) -> bool {
matches!(
perm,
Permission::Readonly | Permission::Write | Permission::Exec
@ -488,7 +456,26 @@ pub mod perm_check {
/// Page is writable
#[inline(always)]
pub fn writable(perm: Permission) -> bool {
perm == Permission::Write
pub const fn writable(perm: Permission) -> bool {
matches!(perm, Permission::Write)
}
}
/// Handle VM traps
pub trait HandlePageFault {
/// Handle page fault
///
/// Return true if handling was sucessful,
/// otherwise the program will be interrupted and will
/// yield an error.
fn page_fault(
&mut self,
reason: MemoryAccessReason,
pagetable: &mut PageTable,
vaddr: u64,
size: PageSize,
dataptr: *mut u8,
) -> bool
where
Self: Sized;
}

View File

@ -59,7 +59,9 @@ impl Debug for PtEntry {
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(align(4096))]
pub struct PageTable {
/// How much entries are in use
pub childen: u8,
/// Entries
pub table: [PtEntry; 256],
}

View File

@ -13,7 +13,10 @@ macro_rules! value_def {
#[derive(Copy, Clone)]
#[repr(packed)]
pub union Value {
$(pub $ty: $ty),*
$(
#[doc = concat!(stringify!($ty), " type")]
pub $ty: $ty
),*
}
@ -37,10 +40,22 @@ macro_rules! value_def {
}
impl Value {
/// Byte reinterpret value to target variant
#[inline]
pub fn cast<Variant: ValueVariant>(self) -> Variant {
pub fn cast<V: ValueVariant>(self) -> V {
/// Evil.
///
/// Transmute cannot be performed with generic type
/// as size is unknown, so union is used.
///
/// # Safety
/// If [`ValueVariant`] implemented correctly, it's fine :)
///
/// :ferrisClueless:
union Transmute<Variant: ValueVariant> {
/// Self
src: Value,
/// Target variant
variant: Variant,
}

View File

@ -260,13 +260,13 @@
# Memory
- Addresses are 64 bit
- Address `0x0` is invalid and acessing it traps
- Memory implementation is arbitrary
- In case of accessing invalid address:
- Program shall trap (LoadAccessEx, StoreAccessEx) with parameter of accessed address
- Value of register when trapped is undefined
## Recommendations
- Leave address `0x0` as invalid
- If paging used:
- Leave first page invalid
- Pages should be at least 4 KiB