holey-bytes/hbvm/src/vm/mem/mod.rs

169 lines
4.9 KiB
Rust
Raw Normal View History

2023-06-11 06:26:16 -05:00
// HACK: This is temporary implementation so we can have memory instructions working
mod paging;
use self::paging::{PageTable, Permission, PtEntry};
use alloc::boxed::Box;
use core::mem::MaybeUninit;
use {crate::vm::value::Value, ma_size::MemAccessSize};
#[derive(Clone, Debug)]
pub struct Memory {
root_pt: *mut PageTable,
}
impl Default for Memory {
fn default() -> Self {
Self {
root_pt: Box::into_raw(Box::default()),
}
}
}
impl Drop for Memory {
fn drop(&mut self) {
let _ = unsafe { Box::from_raw(self.root_pt) };
}
}
impl Memory {
// HACK: Just for allocation testing, will be removed when proper memory interfaces
// implemented.
pub fn insert_test_page(&mut self) {
unsafe {
let mut entry = PtEntry::new(
{
let layout = alloc::alloc::Layout::from_size_align_unchecked(4096, 4096);
let ptr = alloc::alloc::alloc(layout);
if ptr.is_null() {
alloc::alloc::handle_alloc_error(layout);
}
core::ptr::write_bytes(ptr, 69, 10);
ptr.cast()
},
Permission::Write,
);
for _ in 0..4 {
let mut pt = Box::<PageTable>::default();
pt[0] = entry;
entry = PtEntry::new(Box::into_raw(pt) as _, Permission::Node);
}
self.root_pt_mut()[0] = entry;
}
}
/// Load value from an address
pub fn load<S: MemAccessSize>(&self, addr: u64) -> Option<Value> {
2023-06-11 06:47:33 -05:00
let lookup = self.page_lookup(addr)?;
match lookup.perm {
Permission::Empty | Permission::Node => None,
Permission::Readonly | Permission::Write | Permission::Exec => {
let mut value = MaybeUninit::<Value>::zeroed();
unsafe {
core::ptr::copy_nonoverlapping::<u8>(lookup.ptr, value.as_mut_ptr().cast(), 1);
Some(value.assume_init())
}
}
}
}
/// Store value to an address
pub fn store<S: MemAccessSize>(&mut self, addr: u64, value: Value) -> Result<(), ()> {
let lookup = self.page_lookup(addr).ok_or(())?;
match lookup.perm {
Permission::Write => {
unsafe {
core::ptr::copy_nonoverlapping::<u8>(
(&value as *const Value).cast(),
lookup.ptr,
1,
)
};
Ok(())
}
_ => Err(()),
}
}
#[inline]
pub fn root_pt(&self) -> &PageTable {
unsafe { &*self.root_pt }
}
#[inline]
pub fn root_pt_mut(&mut self) -> &mut PageTable {
unsafe { &mut *self.root_pt }
}
/// Resolve page and offset from the page
fn page_lookup(&self, addr: u64) -> Option<PageLookupResult> {
2023-06-11 06:26:16 -05:00
let mut current_pt = self.root_pt;
for lvl in (0..5).rev() {
unsafe {
let entry = (*current_pt).get_unchecked(
usize::try_from((addr >> (lvl * 9 + 12)) & ((1 << 9) - 1))
.expect("?conradluget a better CPU"),
);
let ptr = entry.ptr();
match entry.permission() {
Permission::Empty => return None,
Permission::Node => current_pt = ptr as _,
2023-06-11 06:47:33 -05:00
_ if lvl > 2 => return None,
perm => {
return Some(PageLookupResult {
perm,
ptr: ptr as _,
size: match lvl {
0 => 4096,
1 => 1024_usize.pow(2) * 2,
2 => 4096_usize.pow(3),
_ => unreachable!(),
},
offset: addr as usize & ((1 << 12) - 1),
})
2023-06-11 06:26:16 -05:00
}
}
}
}
None
}
2023-06-11 06:47:33 -05:00
}
2023-06-11 06:26:16 -05:00
2023-06-11 06:47:33 -05:00
struct PageLookupResult {
perm: Permission,
ptr: *mut u8,
size: usize,
offset: usize,
2023-06-11 06:26:16 -05:00
}
macro_rules! size_markers {
($($name:ident = $size:expr),* $(,)?) => {
pub mod ma_size {
/// # Safety
/// Implementor has to assure that [`MemAccessSize::BYTES`] won't be larger than
/// size of [`Value`]
pub unsafe trait MemAccessSize {
const BYTES: usize;
}
$(
pub struct $name;
unsafe impl MemAccessSize for $name {
const BYTES: usize = $size;
}
)*
}
};
}
size_markers! {
Byte = 1,
Doublet = 2,
Quadlet = 4,
Octlet = 8,
}