ableos/hbvm/src/mem/softpaging/mod.rs

251 lines
7.1 KiB
Rust
Raw Normal View History

2023-08-07 19:48:47 -05:00
//! Platform independent, software paged memory implementation
use core::mem::size_of;
2023-08-10 05:39:03 -05:00
pub mod lookup;
2023-08-07 19:48:47 -05:00
pub mod paging;
2023-08-10 05:39:03 -05:00
#[cfg(feature = "alloc")]
pub mod mapping;
2023-08-07 19:48:47 -05:00
use {
2023-08-15 09:32:59 -05:00
crate::{LoadError, Memory, MemoryAccessReason, StoreError},
lookup::{AddrPageLookupError, AddrPageLookupOk, AddrPageLookuper},
2023-08-07 19:48:47 -05:00
paging::{PageTable, Permission},
};
/// HoleyBytes software paged memory
#[derive(Clone, Debug)]
2023-08-08 19:33:03 -05:00
pub struct SoftPagedMem<'p, PfH> {
2023-08-07 19:48:47 -05:00
/// Root page table
pub root_pt: *mut PageTable,
/// Page fault handler
2023-08-08 19:33:03 -05:00
pub pf_handler: PfH,
/// Program memory segment
pub program: &'p [u8],
2023-08-07 19:48:47 -05:00
}
2023-08-08 19:33:03 -05:00
impl<'p, PfH: HandlePageFault> Memory for SoftPagedMem<'p, PfH> {
2023-08-07 19:48:47 -05:00
/// Load value from an address
///
/// # Safety
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
unsafe fn load(&mut self, addr: u64, target: *mut u8, count: usize) -> Result<(), LoadError> {
self.memory_access(
MemoryAccessReason::Load,
addr,
target,
count,
perm_check::readable,
|src, dst, count| core::ptr::copy_nonoverlapping(src, dst, count),
)
.map_err(LoadError)
}
/// Store value to an address
///
/// # Safety
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
unsafe fn store(
&mut self,
addr: u64,
source: *const u8,
count: usize,
) -> Result<(), StoreError> {
self.memory_access(
MemoryAccessReason::Store,
addr,
source.cast_mut(),
count,
perm_check::writable,
|dst, src, count| core::ptr::copy_nonoverlapping(src, dst, count),
)
.map_err(StoreError)
}
2023-08-08 19:33:03 -05:00
2023-08-15 10:21:55 -05:00
#[inline(always)]
unsafe fn prog_read<T>(&mut self, addr: u64) -> Option<T> {
let addr = addr as usize;
self.program
.get(addr..addr + size_of::<T>())
.map(|x| x.as_ptr().cast::<T>().read())
2023-08-08 19:33:03 -05:00
}
2023-08-15 10:21:55 -05:00
#[inline(always)]
unsafe fn prog_read_unchecked<T>(&mut self, addr: u64) -> T {
self.program.as_ptr().add(addr as _).cast::<T>().read()
2023-08-08 19:33:03 -05:00
}
2023-08-07 19:48:47 -05:00
}
2023-08-08 19:33:03 -05:00
impl<'p, PfH: HandlePageFault> SoftPagedMem<'p, PfH> {
2023-08-07 19:48:47 -05:00
// Everyone behold, the holy function, the god of HBVM memory accesses!
/// Split address to pages, check their permissions and feed pointers with offset
/// to a specified function.
///
/// If page is not found, execute page fault trap handler.
#[allow(clippy::too_many_arguments)] // Silence peasant
fn memory_access(
&mut self,
reason: MemoryAccessReason,
src: u64,
mut dst: *mut u8,
len: usize,
permission_check: fn(Permission) -> bool,
action: fn(*mut u8, *mut u8, usize),
) -> Result<(), u64> {
2023-08-08 19:59:11 -05:00
// Memory load from program section
2023-08-08 19:53:55 -05:00
let (src, len) = if src < self.program.len() as _ {
2023-08-08 19:59:11 -05:00
// Allow only loads
2023-08-08 19:57:25 -05:00
if reason != MemoryAccessReason::Load {
return Err(src);
}
2023-08-08 19:59:11 -05:00
// Determine how much data to copy from here
2023-08-08 19:53:55 -05:00
let to_copy = len.clamp(0, self.program.len().saturating_sub(src as _));
2023-08-08 19:59:11 -05:00
// Perform action
2023-08-08 19:53:55 -05:00
action(
unsafe { self.program.as_ptr().add(src as _).cast_mut() },
dst,
to_copy,
);
2023-08-08 19:59:11 -05:00
// Return shifted from what we've already copied
2023-08-08 19:53:55 -05:00
(
src.saturating_add(to_copy as _),
len.saturating_sub(to_copy),
)
} else {
2023-08-08 19:59:11 -05:00
(src, len) // Nothing weird!
2023-08-08 19:53:55 -05:00
};
2023-08-08 19:59:11 -05:00
// Nothing to copy? Don't bother doing anything, bail.
2023-08-08 19:53:55 -05:00
if len == 0 {
return Ok(());
}
2023-08-07 19:48:47 -05:00
// Create new splitter
let mut pspl = AddrPageLookuper::new(src, len, self.root_pt);
loop {
match pspl.next() {
// Page is found
Some(Ok(AddrPageLookupOk {
vaddr,
ptr,
size,
perm,
})) => {
if !permission_check(perm) {
return Err(vaddr);
}
// Perform specified memory action and bump destination pointer
action(ptr, dst, size);
dst = unsafe { dst.add(size) };
}
// No page found
Some(Err(AddrPageLookupError { addr, size })) => {
// Attempt to execute page fault handler
2023-08-07 20:10:11 -05:00
if self.pf_handler.page_fault(
reason,
unsafe { &mut *self.root_pt },
addr,
size,
dst,
) {
2023-08-07 19:48:47 -05:00
// Shift the splitter address
pspl.bump(size);
// Bump dst pointer
dst = unsafe { dst.add(size as _) };
} else {
return Err(addr); // Unhandleable, VM will yield.
}
}
// No remaining pages, we are done!
None => return Ok(()),
}
}
}
}
/// Extract index in page table on specified level
///
/// The level shall not be larger than 4, otherwise
/// the output of the function is unspecified (yes, it can also panic :)
pub fn addr_extract_index(addr: u64, lvl: u8) -> usize {
debug_assert!(lvl <= 4);
usize::try_from((addr >> (lvl * 8 + 12)) & ((1 << 8) - 1)).expect("?conradluget a better CPU")
}
/// Page size
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum PageSize {
/// 4 KiB page (on level 0)
Size4K = 4096,
/// 2 MiB page (on level 1)
Size2M = 1024 * 1024 * 2,
/// 1 GiB page (on level 2)
Size1G = 1024 * 1024 * 1024,
}
impl PageSize {
/// Convert page table level to size of page
const fn from_lvl(lvl: u8) -> Option<Self> {
match lvl {
0 => Some(PageSize::Size4K),
1 => Some(PageSize::Size2M),
2 => Some(PageSize::Size1G),
_ => None,
}
}
}
/// Permisison checks
pub mod perm_check {
use super::paging::Permission;
/// Page is readable
#[inline(always)]
pub const fn readable(perm: Permission) -> bool {
matches!(
perm,
Permission::Readonly | Permission::Write | Permission::Exec
)
}
/// Page is writable
#[inline(always)]
pub const fn writable(perm: Permission) -> bool {
matches!(perm, Permission::Write)
}
2023-08-09 13:19:12 -05:00
/// Page is executable
#[inline(always)]
pub const fn executable(perm: Permission) -> bool {
matches!(perm, Permission::Exec)
}
2023-08-07 19:48:47 -05:00
}
/// Handle VM traps
pub trait HandlePageFault {
/// Handle page fault
///
/// Return true if handling was sucessful,
/// otherwise the program will be interrupted and will
/// yield an error.
fn page_fault(
&mut self,
reason: MemoryAccessReason,
2023-08-07 20:10:11 -05:00
pagetable: &mut PageTable,
2023-08-07 19:48:47 -05:00
vaddr: u64,
size: PageSize,
dataptr: *mut u8,
) -> bool
where
Self: Sized;
}