holey-bytes/hbvm/src/mem/mod.rs

495 lines
14 KiB
Rust
Raw Normal View History

2023-06-24 17:18:31 -05:00
//! Program memory implementation
2023-07-25 19:04:26 -05:00
pub mod bmc;
2023-07-26 05:22:28 -05:00
pub mod paging;
2023-06-24 17:16:14 -05:00
2023-07-11 10:04:48 -05:00
mod pfhandler;
pub use pfhandler::HandlePageFault;
2023-07-11 03:33:25 -05:00
use {
2023-07-11 10:04:48 -05:00
super::VmRunError,
2023-07-11 03:33:25 -05:00
derive_more::Display,
2023-07-25 18:11:21 -05:00
paging::{PageTable, Permission},
2023-07-11 03:33:25 -05:00
};
2023-07-25 18:11:21 -05:00
#[cfg(feature = "alloc")]
use {alloc::boxed::Box, paging::PtEntry};
2023-06-24 17:16:14 -05:00
/// HoleyBytes virtual memory
#[derive(Clone, Debug)]
pub struct Memory {
2023-06-24 17:16:14 -05:00
/// Root page table
2023-07-25 18:11:21 -05:00
pub root_pt: *mut PageTable,
}
2023-07-25 18:11:21 -05:00
#[cfg(feature = "alloc")]
impl Default for Memory {
fn default() -> Self {
Self {
2023-07-25 18:11:21 -05:00
root_pt: Box::into_raw(Default::default()),
}
}
}
2023-07-25 18:11:21 -05:00
#[cfg(feature = "alloc")]
impl Drop for Memory {
fn drop(&mut self) {
let _ = unsafe { Box::from_raw(self.root_pt) };
}
}
impl Memory {
2023-07-12 07:56:11 -05:00
/// Maps host's memory into VM's memory
///
/// # Safety
2023-07-21 19:26:03 -05:00
/// - Your faith in the gods of UB
/// - Addr-san claims it's fine but who knows is she isn't lying :ferrisSus:
/// - Alright, Miri-sama is also fine with this, who knows why
2023-07-25 18:11:21 -05:00
#[cfg(feature = "alloc")]
2023-07-12 07:56:11 -05:00
pub unsafe fn map(
&mut self,
2023-07-20 13:47:50 -05:00
host: *mut u8,
target: u64,
perm: Permission,
2023-07-12 07:56:11 -05:00
pagesize: PageSize,
2023-07-20 13:47:50 -05:00
) -> Result<(), MapError> {
let mut current_pt = self.root_pt;
2023-07-21 18:03:09 -05:00
2023-07-21 19:26:03 -05:00
// Decide on what level depth are we going
2023-07-20 13:47:50 -05:00
let lookup_depth = match pagesize {
2023-07-26 05:22:28 -05:00
PageSize::Size4K => 0,
PageSize::Size2M => 1,
2023-07-20 13:47:50 -05:00
PageSize::Size1G => 2,
};
2023-07-21 19:26:03 -05:00
// Walk pagetable levels
2023-07-26 06:04:58 -05:00
for lvl in (lookup_depth + 1..5).rev() {
2023-07-20 13:47:50 -05:00
let entry = (*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lvl));
let ptr = entry.ptr();
match entry.permission() {
2023-07-21 19:26:03 -05:00
// Still not on target and already seeing empty entry?
// No worries! Let's create one (allocates).
2023-07-20 13:47:50 -05:00
Permission::Empty => {
2023-07-21 19:26:03 -05:00
// Increase children count
2023-07-20 13:47:50 -05:00
(*current_pt).childen += 1;
2023-07-21 19:26:03 -05:00
2023-07-20 13:47:50 -05:00
let table = Box::into_raw(Box::new(paging::PtPointedData {
pt: PageTable::default(),
}));
core::ptr::write(entry, PtEntry::new(table, Permission::Node));
current_pt = table as _;
}
2023-07-21 19:26:03 -05:00
// Continue walking
2023-07-20 13:47:50 -05:00
Permission::Node => current_pt = ptr as _,
2023-07-21 19:26:03 -05:00
// There is some entry on place of node
_ => return Err(MapError::PageOnNode),
2023-07-20 13:47:50 -05:00
}
}
2023-07-21 19:26:03 -05:00
let node = (*current_pt)
.table
2023-07-26 05:22:28 -05:00
.get_unchecked_mut(addr_extract_index(target, lookup_depth));
2023-07-21 19:26:03 -05:00
// Check if node is not mapped
if node.permission() != Permission::Empty {
return Err(MapError::AlreadyMapped);
}
2023-07-20 13:47:50 -05:00
// Write entry
(*current_pt).childen += 1;
2023-07-21 19:26:03 -05:00
core::ptr::write(node, PtEntry::new(host.cast(), perm));
2023-07-20 13:47:50 -05:00
Ok(())
2023-07-12 07:56:11 -05:00
}
/// Unmaps pages from VM's memory
2023-07-21 19:26:03 -05:00
///
/// If errors, it only means there is no entry to unmap and in most cases
/// just should be ignored.
2023-07-25 18:11:21 -05:00
#[cfg(feature = "alloc")]
2023-07-20 13:47:50 -05:00
pub fn unmap(&mut self, addr: u64) -> Result<(), NothingToUnmap> {
let mut current_pt = self.root_pt;
let mut page_tables = [core::ptr::null_mut(); 5];
2023-07-21 19:26:03 -05:00
// Walk page table in reverse
2023-07-20 13:47:50 -05:00
for lvl in (0..5).rev() {
let entry = unsafe {
(*current_pt)
.table
.get_unchecked_mut(addr_extract_index(addr, lvl))
};
let ptr = entry.ptr();
match entry.permission() {
2023-07-21 19:26:03 -05:00
// Nothing is there, throw an error, not critical!
2023-07-20 13:47:50 -05:00
Permission::Empty => return Err(NothingToUnmap),
2023-07-21 19:26:03 -05:00
// Node Save to visited pagetables and continue walking
2023-07-20 13:47:50 -05:00
Permission::Node => {
page_tables[lvl as usize] = entry;
current_pt = ptr as _
}
2023-07-21 19:26:03 -05:00
// Page entry zero it out!
// Zero page entry is completely valid entry with
// empty permission - no UB here!
2023-07-20 13:47:50 -05:00
_ => unsafe {
2023-07-21 19:26:03 -05:00
core::ptr::write_bytes(entry, 0, 1);
2023-07-26 06:04:58 -05:00
break;
2023-07-20 13:47:50 -05:00
},
}
}
2023-07-21 19:26:03 -05:00
// Now walk in order visited page tables
2023-07-20 13:47:50 -05:00
for entry in page_tables.into_iter() {
2023-07-21 19:26:03 -05:00
// Level not visited, skip.
2023-07-20 13:47:50 -05:00
if entry.is_null() {
continue;
}
unsafe {
let children = &mut (*(*entry).ptr()).pt.childen;
2023-07-26 05:22:28 -05:00
*children -= 1; // Decrease children count
2023-07-21 19:26:03 -05:00
// If there are no children, deallocate.
2023-07-20 13:47:50 -05:00
if *children == 0 {
2023-07-21 19:26:03 -05:00
let _ = Box::from_raw((*entry).ptr() as *mut PageTable);
2023-07-20 13:47:50 -05:00
2023-07-21 19:26:03 -05:00
// Zero visited entry
core::ptr::write_bytes(entry, 0, 1);
2023-07-26 05:22:28 -05:00
} else {
break;
2023-07-21 19:26:03 -05:00
}
2023-07-20 13:47:50 -05:00
}
}
Ok(())
2023-07-12 07:56:11 -05:00
}
/// Load value from an address
2023-06-24 17:16:14 -05:00
///
/// # Safety
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
pub unsafe fn load(
&mut self,
addr: u64,
target: *mut u8,
count: usize,
2023-07-11 10:04:48 -05:00
traph: &mut impl HandlePageFault,
2023-06-24 17:16:14 -05:00
) -> Result<(), LoadError> {
self.memory_access(
2023-06-24 17:16:14 -05:00
MemoryAccessReason::Load,
addr,
target,
count,
2023-07-21 19:26:03 -05:00
perm_check::readable,
|src, dst, count| core::ptr::copy_nonoverlapping(src, dst, count),
2023-06-24 17:16:14 -05:00
traph,
)
2023-06-24 17:28:20 -05:00
.map_err(LoadError)
}
/// Store value to an address
2023-06-24 17:16:14 -05:00
///
/// # Safety
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
pub unsafe fn store(
&mut self,
addr: u64,
source: *const u8,
count: usize,
2023-07-11 10:04:48 -05:00
traph: &mut impl HandlePageFault,
2023-06-24 17:16:14 -05:00
) -> Result<(), StoreError> {
self.memory_access(
2023-06-24 17:16:14 -05:00
MemoryAccessReason::Store,
addr,
source.cast_mut(),
count,
2023-07-21 19:26:03 -05:00
perm_check::writable,
|dst, src, count| core::ptr::copy_nonoverlapping(src, dst, count),
2023-06-24 17:16:14 -05:00
traph,
)
2023-06-24 17:28:20 -05:00
.map_err(StoreError)
}
2023-07-21 19:26:03 -05:00
// Everyone behold, the holy function, the god of HBVM memory accesses!
2023-06-24 17:16:14 -05:00
/// Split address to pages, check their permissions and feed pointers with offset
/// to a specified function.
///
/// If page is not found, execute page fault trap handler.
#[allow(clippy::too_many_arguments)] // Silence peasant
fn memory_access(
2023-06-24 17:16:14 -05:00
&mut self,
reason: MemoryAccessReason,
src: u64,
mut dst: *mut u8,
len: usize,
2023-06-24 17:16:14 -05:00
permission_check: fn(Permission) -> bool,
action: fn(*mut u8, *mut u8, usize),
2023-07-11 10:04:48 -05:00
traph: &mut impl HandlePageFault,
2023-06-24 17:28:20 -05:00
) -> Result<(), u64> {
2023-07-21 19:26:03 -05:00
// Create new splitter
2023-07-12 07:56:11 -05:00
let mut pspl = AddrPageLookuper::new(src, len, self.root_pt);
2023-06-24 17:16:14 -05:00
loop {
match pspl.next() {
2023-07-21 19:26:03 -05:00
// Page is found
2023-07-12 07:56:11 -05:00
Some(Ok(AddrPageLookupOk {
2023-06-24 17:28:20 -05:00
vaddr,
ptr,
size,
perm,
})) => {
2023-06-24 17:16:14 -05:00
if !permission_check(perm) {
2023-06-24 17:28:20 -05:00
return Err(vaddr);
2023-06-24 17:16:14 -05:00
}
2023-07-21 19:26:03 -05:00
// Perform specified memory action and bump destination pointer
2023-06-24 17:16:14 -05:00
action(ptr, dst, size);
dst = unsafe { dst.add(size) };
}
2023-07-21 19:26:03 -05:00
// No page found
2023-07-12 07:56:11 -05:00
Some(Err(AddrPageLookupError { addr, size })) => {
2023-07-21 19:26:03 -05:00
// Attempt to execute page fault handler
2023-06-24 17:16:14 -05:00
if traph.page_fault(reason, self, addr, size, dst) {
// Shift the splitter address
pspl.bump(size);
2023-06-24 17:16:14 -05:00
// Bump dst pointer
dst = unsafe { dst.add(size as _) };
} else {
2023-07-21 19:26:03 -05:00
return Err(addr); // Unhandleable, VM will yield.
2023-06-24 17:16:14 -05:00
}
}
2023-07-21 19:26:03 -05:00
// No remaining pages, we are done!
2023-06-24 17:16:14 -05:00
None => return Ok(()),
}
}
}
}
2023-07-21 19:26:03 -05:00
/// Good result from address split
2023-07-12 07:56:11 -05:00
struct AddrPageLookupOk {
2023-06-24 17:28:20 -05:00
/// Virtual address
vaddr: u64,
2023-06-24 17:16:14 -05:00
/// Pointer to the start for perform operation
ptr: *mut u8,
2023-06-24 17:16:14 -05:00
/// Size to the end of page / end of desired size
size: usize,
2023-06-24 17:16:14 -05:00
/// Page permission
perm: Permission,
}
2023-07-21 19:26:03 -05:00
/// Errornous address split result
2023-07-12 07:56:11 -05:00
struct AddrPageLookupError {
2023-06-24 17:16:14 -05:00
/// Address of failure
addr: u64,
2023-06-24 17:16:14 -05:00
/// Requested page size
size: PageSize,
}
/// Address splitter into pages
2023-07-12 07:56:11 -05:00
struct AddrPageLookuper {
2023-06-24 17:16:14 -05:00
/// Current address
addr: u64,
/// Size left
size: usize,
2023-06-24 17:16:14 -05:00
/// Page table
pagetable: *const PageTable,
}
2023-07-12 07:56:11 -05:00
impl AddrPageLookuper {
2023-07-21 19:26:03 -05:00
/// Create a new page lookuper
2023-07-26 13:54:24 -05:00
#[inline]
pub const fn new(addr: u64, size: usize, pagetable: *const PageTable) -> Self {
Self {
addr,
size,
pagetable,
}
}
2023-06-24 17:16:14 -05:00
/// Bump address by size X
fn bump(&mut self, page_size: PageSize) {
self.addr += page_size as u64;
self.size = self.size.saturating_sub(page_size as _);
}
}
2023-07-12 07:56:11 -05:00
impl Iterator for AddrPageLookuper {
type Item = Result<AddrPageLookupOk, AddrPageLookupError>;
fn next(&mut self) -> Option<Self::Item> {
2023-06-24 17:16:14 -05:00
// The end, everything is fine
if self.size == 0 {
return None;
}
let (base, perm, size, offset) = 'a: {
let mut current_pt = self.pagetable;
2023-06-24 17:16:14 -05:00
// Walk the page table
for lvl in (0..5).rev() {
2023-06-24 17:16:14 -05:00
// Get an entry
unsafe {
2023-07-20 13:47:50 -05:00
let entry = (*current_pt)
.table
.get_unchecked(addr_extract_index(self.addr, lvl));
let ptr = entry.ptr();
match entry.permission() {
2023-06-24 17:16:14 -05:00
// No page → page fault
Permission::Empty => {
2023-07-12 07:56:11 -05:00
return Some(Err(AddrPageLookupError {
2023-06-24 17:16:14 -05:00
addr: self.addr,
size: PageSize::from_lvl(lvl)?,
}))
}
// Node → proceed waking
Permission::Node => current_pt = ptr as _,
2023-06-24 17:16:14 -05:00
2023-07-20 13:47:50 -05:00
// Leaf → return relevant data
perm => {
break 'a (
2023-06-24 17:16:14 -05:00
// Pointer in host memory
ptr as *mut u8,
perm,
2023-06-24 17:16:14 -05:00
PageSize::from_lvl(lvl)?,
// In-page offset
addr_extract_index(self.addr, lvl),
2023-06-24 17:16:14 -05:00
);
}
}
}
}
2023-06-24 17:16:14 -05:00
return None; // Reached the end (should not happen)
};
2023-06-24 17:16:14 -05:00
// Get available byte count in the selected page with offset
let avail = (size as usize - offset).clamp(0, self.size);
self.bump(size);
2023-07-12 07:56:11 -05:00
Some(Ok(AddrPageLookupOk {
2023-06-24 17:28:20 -05:00
vaddr: self.addr,
2023-06-24 17:16:14 -05:00
ptr: unsafe { base.add(offset) }, // Return pointer to the start of region
size: avail,
perm,
2023-06-24 17:16:14 -05:00
}))
}
}
2023-07-21 19:26:03 -05:00
/// Extract index in page table on specified level
///
/// The level shall not be larger than 4, otherwise
/// the output of the function is unspecified (yes, it can also panic :)
pub fn addr_extract_index(addr: u64, lvl: u8) -> usize {
2023-07-20 13:47:50 -05:00
debug_assert!(lvl <= 4);
usize::try_from((addr >> (lvl * 8 + 12)) & ((1 << 8) - 1)).expect("?conradluget a better CPU")
2023-07-20 13:47:50 -05:00
}
2023-06-24 17:16:14 -05:00
/// Page size
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum PageSize {
/// 4 KiB page (on level 0)
Size4K = 4096,
/// 2 MiB page (on level 1)
Size2M = 1024 * 1024 * 2,
/// 1 GiB page (on level 2)
Size1G = 1024 * 1024 * 1024,
}
impl PageSize {
/// Convert page table level to size of page
2023-07-20 13:47:50 -05:00
const fn from_lvl(lvl: u8) -> Option<Self> {
2023-06-24 17:16:14 -05:00
match lvl {
0 => Some(PageSize::Size4K),
1 => Some(PageSize::Size2M),
2 => Some(PageSize::Size1G),
_ => None,
}
}
}
/// Unhandled load access trap
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
2023-07-21 19:26:03 -05:00
#[display(fmt = "Load access error at address {_0:#x}")]
2023-06-24 17:28:20 -05:00
pub struct LoadError(u64);
2023-06-24 17:16:14 -05:00
/// Unhandled store access trap
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
2023-07-21 19:26:03 -05:00
#[display(fmt = "Store access error at address {_0:#x}")]
2023-06-24 17:28:20 -05:00
pub struct StoreError(u64);
2023-06-24 17:16:14 -05:00
2023-07-21 19:26:03 -05:00
/// There was no entry in page table to unmap
///
/// No worry, don't panic, nothing bad has happened,
/// but if you are 120% sure there should be something,
/// double-check your addresses.
2023-07-20 13:47:50 -05:00
#[derive(Clone, Copy, Display, Debug)]
2023-07-21 19:26:03 -05:00
#[display(fmt = "There was no entry to unmap")]
2023-07-20 13:47:50 -05:00
pub struct NothingToUnmap;
2023-07-21 19:26:03 -05:00
/// Reason to access memory
2023-06-24 17:16:14 -05:00
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
pub enum MemoryAccessReason {
Load,
Store,
}
impl From<LoadError> for VmRunError {
2023-06-24 17:28:20 -05:00
fn from(value: LoadError) -> Self {
Self::LoadAccessEx(value.0)
2023-06-24 17:16:14 -05:00
}
}
impl From<StoreError> for VmRunError {
2023-06-24 17:28:20 -05:00
fn from(value: StoreError) -> Self {
Self::StoreAccessEx(value.0)
}
}
2023-07-20 13:47:50 -05:00
2023-07-21 19:26:03 -05:00
/// Error mapping
2023-07-20 13:47:50 -05:00
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
pub enum MapError {
2023-07-21 19:26:03 -05:00
/// Entry was already mapped
#[display(fmt = "There is already a page mapped on specified address")]
2023-07-20 13:47:50 -05:00
AlreadyMapped,
2023-07-21 19:26:03 -05:00
/// When walking a page entry was
/// encounterd.
#[display(fmt = "There was a page mapped on the way instead of node")]
PageOnNode,
}
/// Permisison checks
pub mod perm_check {
use super::paging::Permission;
/// Page is readable
#[inline(always)]
pub fn readable(perm: Permission) -> bool {
matches!(
perm,
Permission::Readonly | Permission::Write | Permission::Exec
)
}
/// Page is writable
#[inline(always)]
pub fn writable(perm: Permission) -> bool {
perm == Permission::Write
}
2023-07-20 13:47:50 -05:00
}