ableos/hbvm/src/mem/softpaging/mod.rs

283 lines
8.2 KiB
Rust
Raw Normal View History

2023-08-07 19:48:47 -05:00
//! Platform independent, software paged memory implementation
2023-08-17 18:28:02 -05:00
pub mod icache;
2023-08-10 05:39:03 -05:00
pub mod lookup;
2023-08-07 19:48:47 -05:00
pub mod paging;
2023-08-10 05:39:03 -05:00
#[cfg(feature = "alloc")]
pub mod mapping;
2023-08-07 19:48:47 -05:00
use {
super::{addr::Address, LoadError, Memory, MemoryAccessReason, StoreError},
core::mem::size_of,
icache::ICache,
2023-08-15 09:32:59 -05:00
lookup::{AddrPageLookupError, AddrPageLookupOk, AddrPageLookuper},
2023-08-07 19:48:47 -05:00
paging::{PageTable, Permission},
};
/// HoleyBytes software paged memory
2023-08-17 18:41:05 -05:00
///
2023-08-17 18:28:02 -05:00
/// - `OUT_PROG_EXEC`: set to `false` to disable executing program
/// not contained in initially provided program, even the pages
/// are executable
2023-08-07 19:48:47 -05:00
#[derive(Clone, Debug)]
2023-08-17 18:28:02 -05:00
pub struct SoftPagedMem<'p, PfH, const OUT_PROG_EXEC: bool = true> {
2023-08-07 19:48:47 -05:00
/// Root page table
pub root_pt: *mut PageTable,
/// Page fault handler
2023-08-08 19:33:03 -05:00
pub pf_handler: PfH,
/// Program memory segment
pub program: &'p [u8],
2023-08-17 18:28:02 -05:00
/// Program instruction cache
pub icache: ICache,
2023-08-07 19:48:47 -05:00
}
2023-08-17 18:28:02 -05:00
impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> Memory
for SoftPagedMem<'p, PfH, OUT_PROG_EXEC>
{
2023-08-07 19:48:47 -05:00
/// Load value from an address
///
/// # Safety
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
unsafe fn load(
&mut self,
addr: Address,
target: *mut u8,
count: usize,
) -> Result<(), LoadError> {
2023-08-07 19:48:47 -05:00
self.memory_access(
MemoryAccessReason::Load,
addr,
target,
count,
perm_check::readable,
2023-11-15 12:03:56 -06:00
|src, dst, count| unsafe { core::ptr::copy_nonoverlapping(src, dst, count) },
2023-08-07 19:48:47 -05:00
)
.map_err(LoadError)
}
/// Store value to an address
///
/// # Safety
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
unsafe fn store(
&mut self,
addr: Address,
2023-08-07 19:48:47 -05:00
source: *const u8,
count: usize,
) -> Result<(), StoreError> {
self.memory_access(
MemoryAccessReason::Store,
addr,
source.cast_mut(),
count,
perm_check::writable,
2023-11-15 12:03:56 -06:00
|dst, src, count| unsafe { core::ptr::copy_nonoverlapping(src, dst, count) },
2023-08-07 19:48:47 -05:00
)
.map_err(StoreError)
}
2023-08-08 19:33:03 -05:00
2023-08-15 10:21:55 -05:00
#[inline(always)]
unsafe fn prog_read<T>(&mut self, addr: Address) -> T {
if OUT_PROG_EXEC && addr.truncate_usize() > self.program.len() {
2023-11-15 12:03:56 -06:00
return unsafe { self.icache.fetch::<T>(addr, self.root_pt) }
.unwrap_or_else(|| unsafe { core::mem::zeroed() });
2023-08-17 18:28:02 -05:00
}
let addr = addr.truncate_usize();
self.program
.get(addr..addr + size_of::<T>())
2023-11-15 12:03:56 -06:00
.map(|x| unsafe { x.as_ptr().cast::<T>().read() })
.unwrap_or_else(|| unsafe { core::mem::zeroed() })
2023-08-08 19:33:03 -05:00
}
2023-08-07 19:48:47 -05:00
}
2023-08-17 18:28:02 -05:00
impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, PfH, OUT_PROG_EXEC> {
2023-08-07 19:48:47 -05:00
// Everyone behold, the holy function, the god of HBVM memory accesses!
/// Split address to pages, check their permissions and feed pointers with offset
/// to a specified function.
///
/// If page is not found, execute page fault trap handler.
#[allow(clippy::too_many_arguments)] // Silence peasant
fn memory_access(
&mut self,
reason: MemoryAccessReason,
src: Address,
2023-08-07 19:48:47 -05:00
mut dst: *mut u8,
len: usize,
permission_check: fn(Permission) -> bool,
action: fn(*mut u8, *mut u8, usize),
) -> Result<(), Address> {
2023-08-08 19:59:11 -05:00
// Memory load from program section
let (src, len) = if src.truncate_usize() < self.program.len() as _ {
2023-08-08 19:59:11 -05:00
// Allow only loads
2023-08-08 19:57:25 -05:00
if reason != MemoryAccessReason::Load {
return Err(src);
}
2023-08-08 19:59:11 -05:00
// Determine how much data to copy from here
let to_copy = len.clamp(0, self.program.len().saturating_sub(src.truncate_usize()));
2023-08-08 19:59:11 -05:00
// Perform action
2023-08-08 19:53:55 -05:00
action(
unsafe { self.program.as_ptr().add(src.truncate_usize()).cast_mut() },
2023-08-08 19:53:55 -05:00
dst,
to_copy,
);
2023-08-08 19:59:11 -05:00
// Return shifted from what we've already copied
2023-08-08 19:53:55 -05:00
(
src.saturating_add(to_copy as u64),
2023-08-08 19:53:55 -05:00
len.saturating_sub(to_copy),
)
} else {
2023-08-08 19:59:11 -05:00
(src, len) // Nothing weird!
2023-08-08 19:53:55 -05:00
};
2023-08-08 19:59:11 -05:00
// Nothing to copy? Don't bother doing anything, bail.
2023-08-08 19:53:55 -05:00
if len == 0 {
return Ok(());
}
2023-08-07 19:48:47 -05:00
// Create new splitter
let mut pspl = AddrPageLookuper::new(src, len, self.root_pt);
loop {
match pspl.next() {
// Page is found
Some(Ok(AddrPageLookupOk {
vaddr,
ptr,
size,
perm,
})) => {
if !permission_check(perm) {
return Err(vaddr);
}
// Perform specified memory action and bump destination pointer
action(ptr, dst, size);
dst = unsafe { dst.add(size) };
}
// No page found
Some(Err(AddrPageLookupError { addr, size })) => {
// Attempt to execute page fault handler
2023-08-07 20:10:11 -05:00
if self.pf_handler.page_fault(
reason,
unsafe { &mut *self.root_pt },
addr,
size,
dst,
) {
2023-08-07 19:48:47 -05:00
// Shift the splitter address
pspl.bump(size);
// Bump dst pointer
dst = unsafe { dst.add(size as _) };
} else {
return Err(addr); // Unhandleable, VM will yield.
}
}
// No remaining pages, we are done!
None => return Ok(()),
}
}
}
}
/// Extract index in page table on specified level
///
/// The level shall not be larger than 4, otherwise
/// the output of the function is unspecified (yes, it can also panic :)
pub fn addr_extract_index(addr: Address, lvl: u8) -> usize {
2023-08-07 19:48:47 -05:00
debug_assert!(lvl <= 4);
let addr = addr.get();
2023-08-07 19:48:47 -05:00
usize::try_from((addr >> (lvl * 8 + 12)) & ((1 << 8) - 1)).expect("?conradluget a better CPU")
}
/// Page size
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum PageSize {
/// 4 KiB page (on level 0)
Size4K = 4096,
/// 2 MiB page (on level 1)
Size2M = 1024 * 1024 * 2,
/// 1 GiB page (on level 2)
Size1G = 1024 * 1024 * 1024,
}
impl PageSize {
/// Convert page table level to size of page
const fn from_lvl(lvl: u8) -> Option<Self> {
match lvl {
0 => Some(PageSize::Size4K),
1 => Some(PageSize::Size2M),
2 => Some(PageSize::Size1G),
_ => None,
}
}
}
impl core::ops::Add<PageSize> for Address {
type Output = Self;
#[inline(always)]
fn add(self, rhs: PageSize) -> Self::Output {
self + (rhs as u64)
}
}
impl core::ops::AddAssign<PageSize> for Address {
#[inline(always)]
fn add_assign(&mut self, rhs: PageSize) {
*self = Self::new(self.get().wrapping_add(rhs as u64));
}
}
2023-08-07 19:48:47 -05:00
/// Permisison checks
pub mod perm_check {
use super::paging::Permission;
/// Page is readable
#[inline(always)]
pub const fn readable(perm: Permission) -> bool {
matches!(
perm,
Permission::Readonly | Permission::Write | Permission::Exec
)
}
/// Page is writable
#[inline(always)]
pub const fn writable(perm: Permission) -> bool {
matches!(perm, Permission::Write)
}
2023-08-09 13:19:12 -05:00
/// Page is executable
#[inline(always)]
pub const fn executable(perm: Permission) -> bool {
matches!(perm, Permission::Exec)
}
2023-08-07 19:48:47 -05:00
}
/// Handle VM traps
pub trait HandlePageFault {
/// Handle page fault
///
/// Return true if handling was sucessful,
/// otherwise the program will be interrupted and will
/// yield an error.
fn page_fault(
&mut self,
reason: MemoryAccessReason,
2023-08-07 20:10:11 -05:00
pagetable: &mut PageTable,
vaddr: Address,
2023-08-07 19:48:47 -05:00
size: PageSize,
dataptr: *mut u8,
) -> bool
where
Self: Sized;
}