1
0
Fork 0
forked from koniifer/ableos
ableos-framebuffer/hbvm/src/vm/mem/mod.rs

429 lines
12 KiB
Rust
Raw Normal View History

2023-06-24 17:18:31 -05:00
//! Program memory implementation
2023-06-24 17:18:31 -05:00
pub mod paging;
2023-06-24 17:16:14 -05:00
use self::paging::{PageTable, Permission, PtEntry};
2023-06-24 17:16:14 -05:00
use super::{trap::HandleTrap, VmRunError};
use alloc::boxed::Box;
2023-06-24 17:18:31 -05:00
use core::mem::MaybeUninit;
2023-06-24 17:16:14 -05:00
use derive_more::Display;
2023-06-24 17:16:14 -05:00
/// HoleyBytes virtual memory
#[derive(Clone, Debug)]
pub struct Memory {
2023-06-24 17:16:14 -05:00
/// Root page table
root_pt: *mut PageTable,
}
impl Default for Memory {
fn default() -> Self {
Self {
root_pt: Box::into_raw(Box::default()),
}
}
}
impl Drop for Memory {
fn drop(&mut self) {
let _ = unsafe { Box::from_raw(self.root_pt) };
}
}
impl Memory {
// HACK: Just for allocation testing, will be removed when proper memory interfaces
// implemented.
pub fn insert_test_page(&mut self) {
unsafe {
let mut entry = PtEntry::new(
{
let layout = alloc::alloc::Layout::from_size_align_unchecked(4096, 4096);
let ptr = alloc::alloc::alloc_zeroed(layout);
if ptr.is_null() {
alloc::alloc::handle_alloc_error(layout);
}
core::ptr::write_bytes(ptr, 69, 10);
ptr.cast()
},
Permission::Write,
);
for _ in 0..4 {
let mut pt = Box::<PageTable>::default();
pt[0] = entry;
entry = PtEntry::new(Box::into_raw(pt) as _, Permission::Node);
}
2023-06-24 17:16:14 -05:00
(*self.root_pt)[0] = entry;
}
}
/// Load value from an address
2023-06-24 17:16:14 -05:00
///
/// # Safety
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
pub unsafe fn load(
&mut self,
addr: u64,
target: *mut u8,
count: usize,
traph: &mut impl HandleTrap,
) -> Result<(), LoadError> {
self.memory_access(
2023-06-24 17:16:14 -05:00
MemoryAccessReason::Load,
addr,
target,
count,
|perm| {
matches!(
perm,
Permission::Readonly | Permission::Write | Permission::Exec
)
},
|src, dst, count| core::ptr::copy_nonoverlapping(src, dst, count),
2023-06-24 17:16:14 -05:00
traph,
)
2023-06-24 17:28:20 -05:00
.map_err(LoadError)
}
/// Store value to an address
2023-06-24 17:16:14 -05:00
///
/// # Safety
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
pub unsafe fn store(
&mut self,
addr: u64,
source: *const u8,
count: usize,
traph: &mut impl HandleTrap,
) -> Result<(), StoreError> {
self.memory_access(
2023-06-24 17:16:14 -05:00
MemoryAccessReason::Store,
addr,
source.cast_mut(),
count,
|perm| perm == Permission::Write,
|dst, src, count| core::ptr::copy_nonoverlapping(src, dst, count),
2023-06-24 17:16:14 -05:00
traph,
)
2023-06-24 17:28:20 -05:00
.map_err(StoreError)
}
/// Copy a block of memory
2023-06-24 17:16:14 -05:00
///
/// # Safety
/// - Same as for [`Self::load`] and [`Self::store`]
/// - Your faith in the gods of UB
/// - Addr-san claims it's fine but who knows is she isn't lying :ferrisSus:
pub unsafe fn block_copy(
&mut self,
src: u64,
dst: u64,
count: usize,
traph: &mut impl HandleTrap,
2023-06-24 17:28:20 -05:00
) -> Result<(), BlkCopyError> {
2023-06-24 17:16:14 -05:00
// Yea, i know it is possible to do this more efficiently, but I am too lazy.
2023-06-24 17:16:14 -05:00
const STACK_BUFFER_SIZE: usize = 512;
2023-06-24 17:16:14 -05:00
// Decide if to use stack-allocated buffer or to heap allocate
// Deallocation is again decided on size at the end of the function
let mut buf = MaybeUninit::<[u8; STACK_BUFFER_SIZE]>::uninit();
let buf = if count <= STACK_BUFFER_SIZE {
buf.as_mut_ptr().cast()
} else {
unsafe {
2023-06-24 17:16:14 -05:00
let layout = core::alloc::Layout::from_size_align_unchecked(count, 1);
let ptr = alloc::alloc::alloc(layout);
if ptr.is_null() {
alloc::alloc::handle_alloc_error(layout);
}
ptr
}
2023-06-24 17:16:14 -05:00
};
2023-06-24 17:16:14 -05:00
// Perform memory block transfer
let status = (|| {
// Load to buffer
self.memory_access(
MemoryAccessReason::Load,
src,
buf,
count,
|perm| {
matches!(
perm,
Permission::Readonly | Permission::Write | Permission::Exec
)
},
2023-06-24 17:16:14 -05:00
|src, dst, count| core::ptr::copy(src, dst, count),
traph,
)
2023-06-24 17:28:20 -05:00
.map_err(|addr| BlkCopyError {
access_reason: MemoryAccessReason::Load,
addr,
})?;
2023-06-24 17:16:14 -05:00
// Store from buffer
self.memory_access(
MemoryAccessReason::Store,
dst,
buf,
count,
|perm| perm == Permission::Write,
|dst, src, count| core::ptr::copy(src, dst, count),
traph,
)
2023-06-24 17:28:20 -05:00
.map_err(|addr| BlkCopyError {
access_reason: MemoryAccessReason::Store,
addr,
})?;
2023-06-24 17:16:14 -05:00
2023-06-24 17:28:20 -05:00
Ok::<_, BlkCopyError>(())
2023-06-24 17:16:14 -05:00
})();
2023-06-24 17:16:14 -05:00
// Deallocate if used heap-allocated array
if count > STACK_BUFFER_SIZE {
alloc::alloc::dealloc(
buf,
core::alloc::Layout::from_size_align_unchecked(count, 1),
);
}
status
}
2023-06-24 17:16:14 -05:00
/// Split address to pages, check their permissions and feed pointers with offset
/// to a specified function.
///
/// If page is not found, execute page fault trap handler.
#[allow(clippy::too_many_arguments)] // Silence peasant
fn memory_access(
2023-06-24 17:16:14 -05:00
&mut self,
reason: MemoryAccessReason,
src: u64,
mut dst: *mut u8,
len: usize,
2023-06-24 17:16:14 -05:00
permission_check: fn(Permission) -> bool,
action: fn(*mut u8, *mut u8, usize),
traph: &mut impl HandleTrap,
2023-06-24 17:28:20 -05:00
) -> Result<(), u64> {
2023-06-24 17:16:14 -05:00
let mut pspl = AddrSplitter::new(src, len, self.root_pt);
loop {
match pspl.next() {
// Page found
2023-06-24 17:28:20 -05:00
Some(Ok(AddrSplitOk {
vaddr,
ptr,
size,
perm,
})) => {
2023-06-24 17:16:14 -05:00
if !permission_check(perm) {
2023-06-24 17:28:20 -05:00
return Err(vaddr);
2023-06-24 17:16:14 -05:00
}
2023-06-24 17:16:14 -05:00
// Perform memory action and bump dst pointer
action(ptr, dst, size);
dst = unsafe { dst.add(size) };
}
Some(Err(AddrSplitError { addr, size })) => {
// Execute page fault handler
if traph.page_fault(reason, self, addr, size, dst) {
// Shift the splitter address
pspl.bump(size);
2023-06-24 17:16:14 -05:00
// Bump dst pointer
dst = unsafe { dst.add(size as _) };
} else {
2023-06-24 17:28:20 -05:00
return Err(addr); // Unhandleable
2023-06-24 17:16:14 -05:00
}
}
None => return Ok(()),
}
}
}
}
2023-06-24 17:16:14 -05:00
/// Result from address split
struct AddrSplitOk {
2023-06-24 17:28:20 -05:00
/// Virtual address
vaddr: u64,
2023-06-24 17:16:14 -05:00
/// Pointer to the start for perform operation
ptr: *mut u8,
2023-06-24 17:16:14 -05:00
/// Size to the end of page / end of desired size
size: usize,
2023-06-24 17:16:14 -05:00
/// Page permission
perm: Permission,
}
2023-06-24 17:16:14 -05:00
struct AddrSplitError {
/// Address of failure
addr: u64,
2023-06-24 17:16:14 -05:00
/// Requested page size
size: PageSize,
}
/// Address splitter into pages
struct AddrSplitter {
/// Current address
addr: u64,
/// Size left
size: usize,
2023-06-24 17:16:14 -05:00
/// Page table
pagetable: *const PageTable,
}
2023-06-24 17:16:14 -05:00
impl AddrSplitter {
/// Create a new page splitter
pub const fn new(addr: u64, size: usize, pagetable: *const PageTable) -> Self {
Self {
addr,
size,
pagetable,
}
}
2023-06-24 17:16:14 -05:00
/// Bump address by size X
fn bump(&mut self, page_size: PageSize) {
self.addr += page_size as u64;
self.size = self.size.saturating_sub(page_size as _);
}
}
2023-06-24 17:16:14 -05:00
impl Iterator for AddrSplitter {
type Item = Result<AddrSplitOk, AddrSplitError>;
fn next(&mut self) -> Option<Self::Item> {
2023-06-24 17:16:14 -05:00
// The end, everything is fine
if self.size == 0 {
return None;
}
let (base, perm, size, offset) = 'a: {
let mut current_pt = self.pagetable;
2023-06-24 17:16:14 -05:00
// Walk the page table
for lvl in (0..5).rev() {
2023-06-24 17:16:14 -05:00
// Get an entry
unsafe {
let entry = (*current_pt).get_unchecked(
usize::try_from((self.addr >> (lvl * 9 + 12)) & ((1 << 9) - 1))
.expect("?conradluget a better CPU"),
);
let ptr = entry.ptr();
match entry.permission() {
2023-06-24 17:16:14 -05:00
// No page → page fault
Permission::Empty => {
return Some(Err(AddrSplitError {
addr: self.addr,
size: PageSize::from_lvl(lvl)?,
}))
}
// Node → proceed waking
Permission::Node => current_pt = ptr as _,
2023-06-24 17:16:14 -05:00
// Leaft → return relevant data
perm => {
break 'a (
2023-06-24 17:16:14 -05:00
// Pointer in host memory
ptr as *mut u8,
perm,
2023-06-24 17:16:14 -05:00
PageSize::from_lvl(lvl)?,
// In-page offset
self.addr as usize & ((1 << (lvl * 9 + 12)) - 1),
2023-06-24 17:16:14 -05:00
);
}
}
}
}
2023-06-24 17:16:14 -05:00
return None; // Reached the end (should not happen)
};
2023-06-24 17:16:14 -05:00
// Get available byte count in the selected page with offset
let avail = (size as usize - offset).clamp(0, self.size);
self.bump(size);
Some(Ok(AddrSplitOk {
2023-06-24 17:28:20 -05:00
vaddr: self.addr,
2023-06-24 17:16:14 -05:00
ptr: unsafe { base.add(offset) }, // Return pointer to the start of region
size: avail,
perm,
2023-06-24 17:16:14 -05:00
}))
}
}
/// Page size
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum PageSize {
/// 4 KiB page (on level 0)
Size4K = 4096,
/// 2 MiB page (on level 1)
Size2M = 1024 * 1024 * 2,
/// 1 GiB page (on level 2)
Size1G = 1024 * 1024 * 1024,
}
impl PageSize {
/// Convert page table level to size of page
fn from_lvl(lvl: u8) -> Option<Self> {
match lvl {
0 => Some(PageSize::Size4K),
1 => Some(PageSize::Size2M),
2 => Some(PageSize::Size1G),
_ => None,
}
}
}
/// Unhandled load access trap
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
2023-06-24 17:28:20 -05:00
pub struct LoadError(u64);
2023-06-24 17:16:14 -05:00
/// Unhandled store access trap
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
2023-06-24 17:28:20 -05:00
pub struct StoreError(u64);
2023-06-24 17:16:14 -05:00
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
pub enum MemoryAccessReason {
Load,
Store,
}
2023-06-24 17:28:20 -05:00
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct BlkCopyError {
access_reason: MemoryAccessReason,
addr: u64,
}
impl From<BlkCopyError> for VmRunError {
fn from(value: BlkCopyError) -> Self {
match value.access_reason {
MemoryAccessReason::Load => Self::LoadAccessEx(value.addr),
MemoryAccessReason::Store => Self::StoreAccessEx(value.addr),
2023-06-24 17:16:14 -05:00
}
}
}
impl From<LoadError> for VmRunError {
2023-06-24 17:28:20 -05:00
fn from(value: LoadError) -> Self {
Self::LoadAccessEx(value.0)
2023-06-24 17:16:14 -05:00
}
}
impl From<StoreError> for VmRunError {
2023-06-24 17:28:20 -05:00
fn from(value: StoreError) -> Self {
Self::StoreAccessEx(value.0)
}
}