2023-06-07 17:25:38 -05:00
|
|
|
// HACK: This is temporary implementation so we can have memory instructions working
|
|
|
|
|
|
|
|
use {
|
|
|
|
crate::vm::value::Value, alloc::boxed::Box, core::mem::MaybeUninit, hashbrown::HashMap,
|
|
|
|
ma_size::MemAccessSize,
|
|
|
|
};
|
|
|
|
|
2023-06-08 16:23:23 -05:00
|
|
|
pub const PAGE_SIZE: usize = 8192;
|
2023-06-07 17:25:38 -05:00
|
|
|
|
|
|
|
#[derive(Clone, Debug, Default)]
|
|
|
|
pub struct Memory {
|
2023-06-08 16:23:23 -05:00
|
|
|
pages: HashMap<u64, Box<[u8; PAGE_SIZE]>>,
|
2023-06-07 17:25:38 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Memory {
|
|
|
|
// HACK: Just for allocation testing, will be removed when proper memory interfaces
|
|
|
|
// implemented.
|
|
|
|
pub fn insert_test_page(&mut self) {
|
|
|
|
self.pages.insert(0, unsafe {
|
2023-06-08 16:23:23 -05:00
|
|
|
use alloc::alloc::{alloc_zeroed, handle_alloc_error, Layout};
|
|
|
|
let layout = Layout::new::<[u8; PAGE_SIZE]>();
|
2023-06-07 17:25:38 -05:00
|
|
|
let ptr = alloc_zeroed(layout);
|
|
|
|
if ptr.is_null() {
|
|
|
|
handle_alloc_error(layout);
|
|
|
|
}
|
|
|
|
Box::from_raw(ptr.cast())
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn load<S: MemAccessSize>(&self, addr: u64) -> Option<Value> {
|
|
|
|
let (page, offset) = split_addr(addr);
|
2023-06-08 16:23:23 -05:00
|
|
|
if offset + S::BYTES <= PAGE_SIZE - 1 {
|
2023-06-07 17:25:38 -05:00
|
|
|
let mut value = MaybeUninit::<Value>::zeroed();
|
|
|
|
unsafe {
|
|
|
|
core::ptr::copy_nonoverlapping(
|
2023-06-08 16:23:23 -05:00
|
|
|
self.pages.get(&page)?.as_ptr().add(offset),
|
2023-06-07 17:25:38 -05:00
|
|
|
value.as_mut_ptr().cast(),
|
2023-06-08 16:23:23 -05:00
|
|
|
S::BYTES,
|
2023-06-07 17:25:38 -05:00
|
|
|
);
|
|
|
|
Some(value.assume_init())
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn store<S: MemAccessSize>(&mut self, addr: u64, value: Value) -> Result<(), ()> {
|
|
|
|
let (page, offset) = split_addr(addr);
|
2023-06-08 16:23:23 -05:00
|
|
|
if offset + S::BYTES <= PAGE_SIZE - 1 {
|
2023-06-07 17:25:38 -05:00
|
|
|
unsafe {
|
|
|
|
core::ptr::copy_nonoverlapping(
|
|
|
|
(&value as *const Value).cast::<u8>(),
|
|
|
|
self.pages
|
|
|
|
.get_mut(&page)
|
|
|
|
.ok_or(())?
|
|
|
|
.as_mut_ptr()
|
2023-06-08 16:23:23 -05:00
|
|
|
.add(offset),
|
|
|
|
S::BYTES,
|
2023-06-07 17:25:38 -05:00
|
|
|
)
|
|
|
|
};
|
|
|
|
Ok(())
|
|
|
|
} else {
|
|
|
|
Err(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
2023-06-08 16:23:23 -05:00
|
|
|
pub const fn split_addr(addr: u64) -> (u64, usize) {
|
|
|
|
(addr >> PAGE_SIZE.count_ones(), (addr as usize & PAGE_SIZE))
|
2023-06-07 17:25:38 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
macro_rules! size_markers {
|
|
|
|
($($name:ident = $size:expr),* $(,)?) => {
|
|
|
|
pub mod ma_size {
|
2023-06-08 16:23:23 -05:00
|
|
|
/// # Safety
|
|
|
|
/// Implementor has to assure that [`MemAccessSize::BYTES`] won't be larger than
|
|
|
|
/// size of [`Value`]
|
2023-06-07 17:25:38 -05:00
|
|
|
pub unsafe trait MemAccessSize {
|
2023-06-08 16:23:23 -05:00
|
|
|
const BYTES: usize;
|
2023-06-07 17:25:38 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
$(
|
|
|
|
pub struct $name;
|
|
|
|
unsafe impl MemAccessSize for $name {
|
2023-06-08 16:23:23 -05:00
|
|
|
const BYTES: usize = $size;
|
2023-06-07 17:25:38 -05:00
|
|
|
}
|
|
|
|
)*
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
size_markers! {
|
|
|
|
Byte = 1,
|
|
|
|
Doublet = 2,
|
|
|
|
Quadlet = 4,
|
|
|
|
Octlet = 8,
|
|
|
|
}
|