Abstraction of memory

This commit is contained in:
Erin 2023-08-08 02:48:47 +02:00 committed by ondra05
parent 2aad3a1002
commit 1e92797775
7 changed files with 508 additions and 511 deletions

View file

@ -26,12 +26,12 @@ use {
hbbytecode::{ hbbytecode::{
valider, OpParam, ParamBB, ParamBBB, ParamBBBB, ParamBBD, ParamBBDH, ParamBBW, ParamBD, valider, OpParam, ParamBB, ParamBBB, ParamBBBB, ParamBBD, ParamBBDH, ParamBBW, ParamBD,
}, },
mem::{bmc::BlockCopier, HandlePageFault, Memory}, mem::bmc::BlockCopier,
value::{Value, ValueVariant}, value::{Value, ValueVariant},
}; };
/// HoleyBytes Virtual Machine /// HoleyBytes Virtual Machine
pub struct Vm<'a, PfHandler, const TIMER_QUOTIENT: usize> { pub struct Vm<'a, Memory, const TIMER_QUOTIENT: usize> {
/// Holds 256 registers /// Holds 256 registers
/// ///
/// Writing to register 0 is considered undefined behaviour /// Writing to register 0 is considered undefined behaviour
@ -41,9 +41,6 @@ pub struct Vm<'a, PfHandler, const TIMER_QUOTIENT: usize> {
/// Memory implementation /// Memory implementation
pub memory: Memory, pub memory: Memory,
/// Trap handler
pub pfhandler: PfHandler,
/// Program counter /// Program counter
pub pc: usize, pub pc: usize,
@ -63,18 +60,18 @@ pub struct Vm<'a, PfHandler, const TIMER_QUOTIENT: usize> {
copier: Option<BlockCopier>, copier: Option<BlockCopier>,
} }
impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize> impl<'a, Memory, const TIMER_QUOTIENT: usize> Vm<'a, Memory, TIMER_QUOTIENT>
Vm<'a, PfHandler, TIMER_QUOTIENT> where
Memory: mem::Memory,
{ {
/// Create a new VM with program and trap handler /// Create a new VM with program and trap handler
/// ///
/// # Safety /// # Safety
/// Program code has to be validated /// Program code has to be validated
pub unsafe fn new_unchecked(program: &'a [u8], traph: PfHandler, memory: Memory) -> Self { pub unsafe fn new_unchecked(program: &'a [u8], memory: Memory) -> Self {
Self { Self {
registers: [Value::from(0_u64); 256], registers: [Value::from(0_u64); 256],
memory, memory,
pfhandler: traph,
pc: 0, pc: 0,
program_len: program.len() - 12, program_len: program.len() - 12,
program: program[4..].as_ptr(), program: program[4..].as_ptr(),
@ -85,13 +82,9 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
} }
/// Create a new VM with program and trap handler only if it passes validation /// Create a new VM with program and trap handler only if it passes validation
pub fn new_validated( pub fn new_validated(program: &'a [u8], memory: Memory) -> Result<Self, valider::Error> {
program: &'a [u8],
traph: PfHandler,
memory: Memory,
) -> Result<Self, valider::Error> {
valider::validate(program)?; valider::validate(program)?;
Ok(unsafe { Self::new_unchecked(program, traph, memory) }) Ok(unsafe { Self::new_unchecked(program, memory) })
} }
/// Execute program /// Execute program
@ -250,7 +243,6 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
.add(usize::from(dst) + usize::from(n)) .add(usize::from(dst) + usize::from(n))
.cast(), .cast(),
usize::from(count).saturating_sub(n.into()), usize::from(count).saturating_sub(n.into()),
&mut self.pfhandler,
)?; )?;
} }
ST => { ST => {
@ -260,14 +252,13 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
self.ldst_addr_uber(dst, base, off, count, 0)?, self.ldst_addr_uber(dst, base, off, count, 0)?,
self.registers.as_ptr().add(usize::from(dst)).cast(), self.registers.as_ptr().add(usize::from(dst)).cast(),
count.into(), count.into(),
&mut self.pfhandler,
)?; )?;
} }
BMC => { BMC => {
// Block memory copy // Block memory copy
match if let Some(copier) = &mut self.copier { match if let Some(copier) = &mut self.copier {
// There is some copier, poll. // There is some copier, poll.
copier.poll(&mut self.memory, &mut self.pfhandler) copier.poll(&mut self.memory)
} else { } else {
// There is none, make one! // There is none, make one!
let ParamBBD(src, dst, count) = self.decode(); let ParamBBD(src, dst, count) = self.decode();
@ -284,7 +275,7 @@ impl<'a, PfHandler: HandlePageFault, const TIMER_QUOTIENT: usize>
self.copier self.copier
.as_mut() .as_mut()
.unwrap_unchecked() // SAFETY: We just assigned there .unwrap_unchecked() // SAFETY: We just assigned there
.poll(&mut self.memory, &mut self.pfhandler) .poll(&mut self.memory)
} { } {
// We are done, shift program counter // We are done, shift program counter
core::task::Poll::Ready(Ok(())) => { core::task::Poll::Ready(Ok(())) => {

View file

@ -1,9 +1,8 @@
use hbvm::mem::softpaged::{HandlePageFault, PageSize, SoftPagedMem};
use { use {
hbbytecode::valider::validate, hbbytecode::valider::validate,
hbvm::{ hbvm::{mem::MemoryAccessReason, Vm},
mem::{HandlePageFault, Memory, MemoryAccessReason, PageSize},
Vm,
},
std::io::{stdin, Read}, std::io::{stdin, Read},
}; };
@ -16,7 +15,8 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
return Ok(()); return Ok(());
} else { } else {
unsafe { unsafe {
let mut vm = Vm::<_, 0>::new_unchecked(&prog, TestTrapHandler, Default::default()); let mut vm =
Vm::<_, 0>::new_unchecked(&prog, SoftPagedMem::<TestTrapHandler>::default());
let data = { let data = {
let ptr = std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align_unchecked( let ptr = std::alloc::alloc_zeroed(std::alloc::Layout::from_size_align_unchecked(
4096, 4096, 4096, 4096,
@ -31,7 +31,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
.map( .map(
data, data,
0, 0,
hbvm::mem::paging::Permission::Write, hbvm::mem::softpaged::paging::Permission::Write,
PageSize::Size4K, PageSize::Size4K,
) )
.unwrap(); .unwrap();
@ -54,12 +54,13 @@ pub fn time() -> u32 {
9 9
} }
#[derive(Default)]
struct TestTrapHandler; struct TestTrapHandler;
impl HandlePageFault for TestTrapHandler { impl HandlePageFault for TestTrapHandler {
fn page_fault( fn page_fault(
&mut self, &mut self,
_: MemoryAccessReason, _: MemoryAccessReason,
_: &mut Memory, //_: &mut Memory,
_: u64, _: u64,
_: PageSize, _: PageSize,
_: *mut u8, _: *mut u8,

View file

@ -1,9 +1,6 @@
use { use {
super::MemoryAccessReason, super::MemoryAccessReason,
crate::{ crate::{mem::Memory, VmRunError},
mem::{HandlePageFault, Memory},
VmRunError,
},
core::{mem::MaybeUninit, task::Poll}, core::{mem::MaybeUninit, task::Poll},
}; };
@ -40,11 +37,7 @@ impl BlockCopier {
/// ///
/// # Safety /// # Safety
/// - Same as for [`Memory::load`] and [`Memory::store`] /// - Same as for [`Memory::load`] and [`Memory::store`]
pub unsafe fn poll( pub unsafe fn poll(&mut self, memory: &mut impl Memory) -> Poll<Result<(), BlkCopyError>> {
&mut self,
memory: &mut Memory,
traph: &mut impl HandlePageFault,
) -> Poll<Result<(), BlkCopyError>> {
// Safety: Assuming uninit of array of MaybeUninit is sound // Safety: Assuming uninit of array of MaybeUninit is sound
let mut buf = AlignedBuf(MaybeUninit::uninit().assume_init()); let mut buf = AlignedBuf(MaybeUninit::uninit().assume_init());
@ -56,7 +49,6 @@ impl BlockCopier {
self.dst, self.dst,
buf.0.as_mut_ptr().cast(), buf.0.as_mut_ptr().cast(),
BUF_SIZE, BUF_SIZE,
traph,
) { ) {
return Poll::Ready(Err(e)); return Poll::Ready(Err(e));
} }
@ -92,7 +84,6 @@ impl BlockCopier {
self.dst, self.dst,
buf.0.as_mut_ptr().cast(), buf.0.as_mut_ptr().cast(),
self.rem, self.rem,
traph,
) { ) {
return Poll::Ready(Err(e)); return Poll::Ready(Err(e));
} }
@ -104,16 +95,15 @@ impl BlockCopier {
#[inline] #[inline]
unsafe fn act( unsafe fn act(
memory: &mut Memory, memory: &mut impl Memory,
src: u64, src: u64,
dst: u64, dst: u64,
buf: *mut u8, buf: *mut u8,
count: usize, count: usize,
traph: &mut impl HandlePageFault,
) -> Result<(), BlkCopyError> { ) -> Result<(), BlkCopyError> {
// Load to buffer // Load to buffer
memory memory
.load(src, buf, count, traph) .load(src, buf, count)
.map_err(|super::LoadError(addr)| BlkCopyError::Access { .map_err(|super::LoadError(addr)| BlkCopyError::Access {
access_reason: MemoryAccessReason::Load, access_reason: MemoryAccessReason::Load,
addr, addr,
@ -121,7 +111,7 @@ unsafe fn act(
// Store from buffer // Store from buffer
memory memory
.store(dst, buf, count, traph) .store(dst, buf, count)
.map_err(|super::StoreError(addr)| BlkCopyError::Access { .map_err(|super::StoreError(addr)| BlkCopyError::Access {
access_reason: MemoryAccessReason::Store, access_reason: MemoryAccessReason::Store,
addr, addr,

View file

@ -1,426 +1,27 @@
//! Program memory implementation //! Program memory implementation
pub mod bmc; pub mod bmc;
pub mod paging; pub mod softpaged;
mod pfhandler; use {super::VmRunError, derive_more::Display};
pub use pfhandler::HandlePageFault; pub trait Memory {
/// Load data from memory on address
use {
super::VmRunError,
derive_more::Display,
paging::{PageTable, Permission},
};
#[cfg(feature = "alloc")]
use {alloc::boxed::Box, paging::PtEntry};
/// HoleyBytes virtual memory
#[derive(Clone, Debug)]
pub struct Memory {
/// Root page table
pub root_pt: *mut PageTable,
}
#[cfg(feature = "alloc")]
impl Default for Memory {
fn default() -> Self {
Self {
root_pt: Box::into_raw(Default::default()),
}
}
}
#[cfg(feature = "alloc")]
impl Drop for Memory {
fn drop(&mut self) {
let _ = unsafe { Box::from_raw(self.root_pt) };
}
}
impl Memory {
/// Maps host's memory into VM's memory
/// ///
/// # Safety /// # Safety
/// - Your faith in the gods of UB /// - Shall not overrun the buffer
/// - Addr-san claims it's fine but who knows is she isn't lying :ferrisSus: unsafe fn load(&mut self, addr: u64, target: *mut u8, count: usize) -> Result<(), LoadError>;
/// - Alright, Miri-sama is also fine with this, who knows why
#[cfg(feature = "alloc")]
pub unsafe fn map(
&mut self,
host: *mut u8,
target: u64,
perm: Permission,
pagesize: PageSize,
) -> Result<(), MapError> {
let mut current_pt = self.root_pt;
// Decide on what level depth are we going /// Store data to memory on address
let lookup_depth = match pagesize {
PageSize::Size4K => 0,
PageSize::Size2M => 1,
PageSize::Size1G => 2,
};
// Walk pagetable levels
for lvl in (lookup_depth + 1..5).rev() {
let entry = (*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lvl));
let ptr = entry.ptr();
match entry.permission() {
// Still not on target and already seeing empty entry?
// No worries! Let's create one (allocates).
Permission::Empty => {
// Increase children count
(*current_pt).childen += 1;
let table = Box::into_raw(Box::new(paging::PtPointedData {
pt: PageTable::default(),
}));
core::ptr::write(entry, PtEntry::new(table, Permission::Node));
current_pt = table as _;
}
// Continue walking
Permission::Node => current_pt = ptr as _,
// There is some entry on place of node
_ => return Err(MapError::PageOnNode),
}
}
let node = (*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lookup_depth));
// Check if node is not mapped
if node.permission() != Permission::Empty {
return Err(MapError::AlreadyMapped);
}
// Write entry
(*current_pt).childen += 1;
core::ptr::write(node, PtEntry::new(host.cast(), perm));
Ok(())
}
/// Unmaps pages from VM's memory
///
/// If errors, it only means there is no entry to unmap and in most cases
/// just should be ignored.
#[cfg(feature = "alloc")]
pub fn unmap(&mut self, addr: u64) -> Result<(), NothingToUnmap> {
let mut current_pt = self.root_pt;
let mut page_tables = [core::ptr::null_mut(); 5];
// Walk page table in reverse
for lvl in (0..5).rev() {
let entry = unsafe {
(*current_pt)
.table
.get_unchecked_mut(addr_extract_index(addr, lvl))
};
let ptr = entry.ptr();
match entry.permission() {
// Nothing is there, throw an error, not critical!
Permission::Empty => return Err(NothingToUnmap),
// Node Save to visited pagetables and continue walking
Permission::Node => {
page_tables[lvl as usize] = entry;
current_pt = ptr as _
}
// Page entry zero it out!
// Zero page entry is completely valid entry with
// empty permission - no UB here!
_ => unsafe {
core::ptr::write_bytes(entry, 0, 1);
break;
},
}
}
// Now walk in order visited page tables
for entry in page_tables.into_iter() {
// Level not visited, skip.
if entry.is_null() {
continue;
}
unsafe {
let children = &mut (*(*entry).ptr()).pt.childen;
*children -= 1; // Decrease children count
// If there are no children, deallocate.
if *children == 0 {
let _ = Box::from_raw((*entry).ptr() as *mut PageTable);
// Zero visited entry
core::ptr::write_bytes(entry, 0, 1);
} else {
break;
}
}
}
Ok(())
}
/// Load value from an address
/// ///
/// # Safety /// # Safety
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`] /// - Shall not overrun the buffer
pub unsafe fn load( unsafe fn store(
&mut self,
addr: u64,
target: *mut u8,
count: usize,
traph: &mut impl HandlePageFault,
) -> Result<(), LoadError> {
self.memory_access(
MemoryAccessReason::Load,
addr,
target,
count,
perm_check::readable,
|src, dst, count| core::ptr::copy_nonoverlapping(src, dst, count),
traph,
)
.map_err(LoadError)
}
/// Store value to an address
///
/// # Safety
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
pub unsafe fn store(
&mut self, &mut self,
addr: u64, addr: u64,
source: *const u8, source: *const u8,
count: usize, count: usize,
traph: &mut impl HandlePageFault, ) -> Result<(), StoreError>;
) -> Result<(), StoreError> {
self.memory_access(
MemoryAccessReason::Store,
addr,
source.cast_mut(),
count,
perm_check::writable,
|dst, src, count| core::ptr::copy_nonoverlapping(src, dst, count),
traph,
)
.map_err(StoreError)
}
// Everyone behold, the holy function, the god of HBVM memory accesses!
/// Split address to pages, check their permissions and feed pointers with offset
/// to a specified function.
///
/// If page is not found, execute page fault trap handler.
#[allow(clippy::too_many_arguments)] // Silence peasant
fn memory_access(
&mut self,
reason: MemoryAccessReason,
src: u64,
mut dst: *mut u8,
len: usize,
permission_check: fn(Permission) -> bool,
action: fn(*mut u8, *mut u8, usize),
traph: &mut impl HandlePageFault,
) -> Result<(), u64> {
// Create new splitter
let mut pspl = AddrPageLookuper::new(src, len, self.root_pt);
loop {
match pspl.next() {
// Page is found
Some(Ok(AddrPageLookupOk {
vaddr,
ptr,
size,
perm,
})) => {
if !permission_check(perm) {
return Err(vaddr);
}
// Perform specified memory action and bump destination pointer
action(ptr, dst, size);
dst = unsafe { dst.add(size) };
}
// No page found
Some(Err(AddrPageLookupError { addr, size })) => {
// Attempt to execute page fault handler
if traph.page_fault(reason, self, addr, size, dst) {
// Shift the splitter address
pspl.bump(size);
// Bump dst pointer
dst = unsafe { dst.add(size as _) };
} else {
return Err(addr); // Unhandleable, VM will yield.
}
}
// No remaining pages, we are done!
None => return Ok(()),
}
}
}
}
/// Good result from address split
struct AddrPageLookupOk {
/// Virtual address
vaddr: u64,
/// Pointer to the start for perform operation
ptr: *mut u8,
/// Size to the end of page / end of desired size
size: usize,
/// Page permission
perm: Permission,
}
/// Errornous address split result
struct AddrPageLookupError {
/// Address of failure
addr: u64,
/// Requested page size
size: PageSize,
}
/// Address splitter into pages
struct AddrPageLookuper {
/// Current address
addr: u64,
/// Size left
size: usize,
/// Page table
pagetable: *const PageTable,
}
impl AddrPageLookuper {
/// Create a new page lookuper
#[inline]
pub const fn new(addr: u64, size: usize, pagetable: *const PageTable) -> Self {
Self {
addr,
size,
pagetable,
}
}
/// Bump address by size X
fn bump(&mut self, page_size: PageSize) {
self.addr += page_size as u64;
self.size = self.size.saturating_sub(page_size as _);
}
}
impl Iterator for AddrPageLookuper {
type Item = Result<AddrPageLookupOk, AddrPageLookupError>;
fn next(&mut self) -> Option<Self::Item> {
// The end, everything is fine
if self.size == 0 {
return None;
}
let (base, perm, size, offset) = 'a: {
let mut current_pt = self.pagetable;
// Walk the page table
for lvl in (0..5).rev() {
// Get an entry
unsafe {
let entry = (*current_pt)
.table
.get_unchecked(addr_extract_index(self.addr, lvl));
let ptr = entry.ptr();
match entry.permission() {
// No page → page fault
Permission::Empty => {
return Some(Err(AddrPageLookupError {
addr: self.addr,
size: PageSize::from_lvl(lvl)?,
}))
}
// Node → proceed waking
Permission::Node => current_pt = ptr as _,
// Leaf → return relevant data
perm => {
break 'a (
// Pointer in host memory
ptr as *mut u8,
perm,
PageSize::from_lvl(lvl)?,
// In-page offset
addr_extract_index(self.addr, lvl),
);
}
}
}
}
return None; // Reached the end (should not happen)
};
// Get available byte count in the selected page with offset
let avail = (size as usize - offset).clamp(0, self.size);
self.bump(size);
Some(Ok(AddrPageLookupOk {
vaddr: self.addr,
ptr: unsafe { base.add(offset) }, // Return pointer to the start of region
size: avail,
perm,
}))
}
}
/// Extract index in page table on specified level
///
/// The level shall not be larger than 4, otherwise
/// the output of the function is unspecified (yes, it can also panic :)
pub fn addr_extract_index(addr: u64, lvl: u8) -> usize {
debug_assert!(lvl <= 4);
usize::try_from((addr >> (lvl * 8 + 12)) & ((1 << 8) - 1)).expect("?conradluget a better CPU")
}
/// Page size
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum PageSize {
/// 4 KiB page (on level 0)
Size4K = 4096,
/// 2 MiB page (on level 1)
Size2M = 1024 * 1024 * 2,
/// 1 GiB page (on level 2)
Size1G = 1024 * 1024 * 1024,
}
impl PageSize {
/// Convert page table level to size of page
const fn from_lvl(lvl: u8) -> Option<Self> {
match lvl {
0 => Some(PageSize::Size4K),
1 => Some(PageSize::Size2M),
2 => Some(PageSize::Size1G),
_ => None,
}
}
} }
/// Unhandled load access trap /// Unhandled load access trap
@ -433,15 +34,6 @@ pub struct LoadError(pub u64);
#[display(fmt = "Store access error at address {_0:#x}")] #[display(fmt = "Store access error at address {_0:#x}")]
pub struct StoreError(pub u64); pub struct StoreError(pub u64);
/// There was no entry in page table to unmap
///
/// No worry, don't panic, nothing bad has happened,
/// but if you are 120% sure there should be something,
/// double-check your addresses.
#[derive(Clone, Copy, Display, Debug)]
#[display(fmt = "There was no entry to unmap")]
pub struct NothingToUnmap;
/// Reason to access memory /// Reason to access memory
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)] #[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
pub enum MemoryAccessReason { pub enum MemoryAccessReason {
@ -460,35 +52,3 @@ impl From<StoreError> for VmRunError {
Self::StoreAccessEx(value.0) Self::StoreAccessEx(value.0)
} }
} }
/// Error mapping
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
pub enum MapError {
/// Entry was already mapped
#[display(fmt = "There is already a page mapped on specified address")]
AlreadyMapped,
/// When walking a page entry was
/// encounterd.
#[display(fmt = "There was a page mapped on the way instead of node")]
PageOnNode,
}
/// Permisison checks
pub mod perm_check {
use super::paging::Permission;
/// Page is readable
#[inline(always)]
pub const fn readable(perm: Permission) -> bool {
matches!(
perm,
Permission::Readonly | Permission::Write | Permission::Exec
)
}
/// Page is writable
#[inline(always)]
pub const fn writable(perm: Permission) -> bool {
matches!(perm, Permission::Write)
}
}

View file

@ -1,20 +0,0 @@
//! Program trap handling interfaces
use super::{Memory, MemoryAccessReason, PageSize};
/// Handle VM traps
pub trait HandlePageFault {
/// Handle page fault
///
/// Return true if handling was sucessful,
/// otherwise the program will be interrupted and will
/// yield an error.
fn page_fault(
&mut self,
reason: MemoryAccessReason,
memory: &mut Memory,
vaddr: u64,
size: PageSize,
dataptr: *mut u8,
) -> bool;
}

View file

@ -0,0 +1,475 @@
//! Platform independent, software paged memory implementation
pub mod paging;
use {
super::{LoadError, Memory, MemoryAccessReason, StoreError},
derive_more::Display,
paging::{PageTable, Permission},
};
#[cfg(feature = "alloc")]
use {alloc::boxed::Box, paging::PtEntry};
/// HoleyBytes software paged memory
#[derive(Clone, Debug)]
pub struct SoftPagedMem<PfHandler> {
/// Root page table
pub root_pt: *mut PageTable,
/// Page fault handler
pub pf_handler: PfHandler,
}
impl<PfHandler: HandlePageFault> Memory for SoftPagedMem<PfHandler> {
/// Load value from an address
///
/// # Safety
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
unsafe fn load(&mut self, addr: u64, target: *mut u8, count: usize) -> Result<(), LoadError> {
self.memory_access(
MemoryAccessReason::Load,
addr,
target,
count,
perm_check::readable,
|src, dst, count| core::ptr::copy_nonoverlapping(src, dst, count),
)
.map_err(LoadError)
}
/// Store value to an address
///
/// # Safety
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
unsafe fn store(
&mut self,
addr: u64,
source: *const u8,
count: usize,
) -> Result<(), StoreError> {
self.memory_access(
MemoryAccessReason::Store,
addr,
source.cast_mut(),
count,
perm_check::writable,
|dst, src, count| core::ptr::copy_nonoverlapping(src, dst, count),
)
.map_err(StoreError)
}
}
impl<PfHandler: HandlePageFault> SoftPagedMem<PfHandler> {
// Everyone behold, the holy function, the god of HBVM memory accesses!
/// Split address to pages, check their permissions and feed pointers with offset
/// to a specified function.
///
/// If page is not found, execute page fault trap handler.
#[allow(clippy::too_many_arguments)] // Silence peasant
fn memory_access(
&mut self,
reason: MemoryAccessReason,
src: u64,
mut dst: *mut u8,
len: usize,
permission_check: fn(Permission) -> bool,
action: fn(*mut u8, *mut u8, usize),
) -> Result<(), u64> {
// Create new splitter
let mut pspl = AddrPageLookuper::new(src, len, self.root_pt);
loop {
match pspl.next() {
// Page is found
Some(Ok(AddrPageLookupOk {
vaddr,
ptr,
size,
perm,
})) => {
if !permission_check(perm) {
return Err(vaddr);
}
// Perform specified memory action and bump destination pointer
action(ptr, dst, size);
dst = unsafe { dst.add(size) };
}
// No page found
Some(Err(AddrPageLookupError { addr, size })) => {
// Attempt to execute page fault handler
if self.pf_handler.page_fault(reason, addr, size, dst) {
// Shift the splitter address
pspl.bump(size);
// Bump dst pointer
dst = unsafe { dst.add(size as _) };
} else {
return Err(addr); // Unhandleable, VM will yield.
}
}
// No remaining pages, we are done!
None => return Ok(()),
}
}
}
}
/// Good result from address split
struct AddrPageLookupOk {
/// Virtual address
vaddr: u64,
/// Pointer to the start for perform operation
ptr: *mut u8,
/// Size to the end of page / end of desired size
size: usize,
/// Page permission
perm: Permission,
}
/// Errornous address split result
struct AddrPageLookupError {
/// Address of failure
addr: u64,
/// Requested page size
size: PageSize,
}
/// Address splitter into pages
struct AddrPageLookuper {
/// Current address
addr: u64,
/// Size left
size: usize,
/// Page table
pagetable: *const PageTable,
}
impl AddrPageLookuper {
/// Create a new page lookuper
#[inline]
pub const fn new(addr: u64, size: usize, pagetable: *const PageTable) -> Self {
Self {
addr,
size,
pagetable,
}
}
/// Bump address by size X
fn bump(&mut self, page_size: PageSize) {
self.addr += page_size as u64;
self.size = self.size.saturating_sub(page_size as _);
}
}
impl Iterator for AddrPageLookuper {
type Item = Result<AddrPageLookupOk, AddrPageLookupError>;
fn next(&mut self) -> Option<Self::Item> {
// The end, everything is fine
if self.size == 0 {
return None;
}
let (base, perm, size, offset) = 'a: {
let mut current_pt = self.pagetable;
// Walk the page table
for lvl in (0..5).rev() {
// Get an entry
unsafe {
let entry = (*current_pt)
.table
.get_unchecked(addr_extract_index(self.addr, lvl));
let ptr = entry.ptr();
match entry.permission() {
// No page → page fault
Permission::Empty => {
return Some(Err(AddrPageLookupError {
addr: self.addr,
size: PageSize::from_lvl(lvl)?,
}))
}
// Node → proceed waking
Permission::Node => current_pt = ptr as _,
// Leaf → return relevant data
perm => {
break 'a (
// Pointer in host memory
ptr as *mut u8,
perm,
PageSize::from_lvl(lvl)?,
// In-page offset
addr_extract_index(self.addr, lvl),
);
}
}
}
}
return None; // Reached the end (should not happen)
};
// Get available byte count in the selected page with offset
let avail = (size as usize - offset).clamp(0, self.size);
self.bump(size);
Some(Ok(AddrPageLookupOk {
vaddr: self.addr,
ptr: unsafe { base.add(offset) }, // Return pointer to the start of region
size: avail,
perm,
}))
}
}
#[cfg(feature = "alloc")]
impl<PfHandler: Default> Default for SoftPagedMem<PfHandler> {
fn default() -> Self {
Self {
root_pt: Box::into_raw(Default::default()),
pf_handler: Default::default(),
}
}
}
#[cfg(feature = "alloc")]
impl<A> Drop for SoftPagedMem<A> {
fn drop(&mut self) {
let _ = unsafe { Box::from_raw(self.root_pt) };
}
}
#[cfg(feature = "alloc")]
impl<A> SoftPagedMem<A> {
/// Maps host's memory into VM's memory
///
/// # Safety
/// - Your faith in the gods of UB
/// - Addr-san claims it's fine but who knows is she isn't lying :ferrisSus:
/// - Alright, Miri-sama is also fine with this, who knows why
pub unsafe fn map(
&mut self,
host: *mut u8,
target: u64,
perm: Permission,
pagesize: PageSize,
) -> Result<(), MapError> {
let mut current_pt = self.root_pt;
// Decide on what level depth are we going
let lookup_depth = match pagesize {
PageSize::Size4K => 0,
PageSize::Size2M => 1,
PageSize::Size1G => 2,
};
// Walk pagetable levels
for lvl in (lookup_depth + 1..5).rev() {
let entry = (*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lvl));
let ptr = entry.ptr();
match entry.permission() {
// Still not on target and already seeing empty entry?
// No worries! Let's create one (allocates).
Permission::Empty => {
// Increase children count
(*current_pt).childen += 1;
let table = Box::into_raw(Box::new(paging::PtPointedData {
pt: PageTable::default(),
}));
core::ptr::write(entry, PtEntry::new(table, Permission::Node));
current_pt = table as _;
}
// Continue walking
Permission::Node => current_pt = ptr as _,
// There is some entry on place of node
_ => return Err(MapError::PageOnNode),
}
}
let node = (*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lookup_depth));
// Check if node is not mapped
if node.permission() != Permission::Empty {
return Err(MapError::AlreadyMapped);
}
// Write entry
(*current_pt).childen += 1;
core::ptr::write(node, PtEntry::new(host.cast(), perm));
Ok(())
}
/// Unmaps pages from VM's memory
///
/// If errors, it only means there is no entry to unmap and in most cases
/// just should be ignored.
pub fn unmap(&mut self, addr: u64) -> Result<(), NothingToUnmap> {
let mut current_pt = self.root_pt;
let mut page_tables = [core::ptr::null_mut(); 5];
// Walk page table in reverse
for lvl in (0..5).rev() {
let entry = unsafe {
(*current_pt)
.table
.get_unchecked_mut(addr_extract_index(addr, lvl))
};
let ptr = entry.ptr();
match entry.permission() {
// Nothing is there, throw an error, not critical!
Permission::Empty => return Err(NothingToUnmap),
// Node Save to visited pagetables and continue walking
Permission::Node => {
page_tables[lvl as usize] = entry;
current_pt = ptr as _
}
// Page entry zero it out!
// Zero page entry is completely valid entry with
// empty permission - no UB here!
_ => unsafe {
core::ptr::write_bytes(entry, 0, 1);
break;
},
}
}
// Now walk in order visited page tables
for entry in page_tables.into_iter() {
// Level not visited, skip.
if entry.is_null() {
continue;
}
unsafe {
let children = &mut (*(*entry).ptr()).pt.childen;
*children -= 1; // Decrease children count
// If there are no children, deallocate.
if *children == 0 {
let _ = Box::from_raw((*entry).ptr() as *mut PageTable);
// Zero visited entry
core::ptr::write_bytes(entry, 0, 1);
} else {
break;
}
}
}
Ok(())
}
}
/// Extract index in page table on specified level
///
/// The level shall not be larger than 4, otherwise
/// the output of the function is unspecified (yes, it can also panic :)
pub fn addr_extract_index(addr: u64, lvl: u8) -> usize {
debug_assert!(lvl <= 4);
usize::try_from((addr >> (lvl * 8 + 12)) & ((1 << 8) - 1)).expect("?conradluget a better CPU")
}
/// Page size
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum PageSize {
/// 4 KiB page (on level 0)
Size4K = 4096,
/// 2 MiB page (on level 1)
Size2M = 1024 * 1024 * 2,
/// 1 GiB page (on level 2)
Size1G = 1024 * 1024 * 1024,
}
impl PageSize {
/// Convert page table level to size of page
const fn from_lvl(lvl: u8) -> Option<Self> {
match lvl {
0 => Some(PageSize::Size4K),
1 => Some(PageSize::Size2M),
2 => Some(PageSize::Size1G),
_ => None,
}
}
}
/// Error mapping
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
pub enum MapError {
/// Entry was already mapped
#[display(fmt = "There is already a page mapped on specified address")]
AlreadyMapped,
/// When walking a page entry was
/// encounterd.
#[display(fmt = "There was a page mapped on the way instead of node")]
PageOnNode,
}
/// There was no entry in page table to unmap
///
/// No worry, don't panic, nothing bad has happened,
/// but if you are 120% sure there should be something,
/// double-check your addresses.
#[derive(Clone, Copy, Display, Debug)]
#[display(fmt = "There was no entry to unmap")]
pub struct NothingToUnmap;
/// Permisison checks
pub mod perm_check {
use super::paging::Permission;
/// Page is readable
#[inline(always)]
pub const fn readable(perm: Permission) -> bool {
matches!(
perm,
Permission::Readonly | Permission::Write | Permission::Exec
)
}
/// Page is writable
#[inline(always)]
pub const fn writable(perm: Permission) -> bool {
matches!(perm, Permission::Write)
}
}
/// Handle VM traps
pub trait HandlePageFault {
/// Handle page fault
///
/// Return true if handling was sucessful,
/// otherwise the program will be interrupted and will
/// yield an error.
fn page_fault(
&mut self,
reason: MemoryAccessReason,
// memory: &mut SoftPagedMem<Self>, TODO: Make work
vaddr: u64,
size: PageSize,
dataptr: *mut u8,
) -> bool
where
Self: Sized;
}