2023-06-24 17:18:31 -05:00
|
|
|
//! Program memory implementation
|
2023-06-20 19:07:48 -05:00
|
|
|
|
2023-06-24 17:18:31 -05:00
|
|
|
pub mod paging;
|
2023-06-24 17:16:14 -05:00
|
|
|
|
2023-07-11 10:04:48 -05:00
|
|
|
mod pfhandler;
|
|
|
|
|
|
|
|
pub use pfhandler::HandlePageFault;
|
|
|
|
|
2023-07-11 03:33:25 -05:00
|
|
|
use {
|
|
|
|
self::paging::{PageTable, Permission, PtEntry},
|
2023-07-11 10:04:48 -05:00
|
|
|
super::VmRunError,
|
2023-07-11 03:33:25 -05:00
|
|
|
alloc::boxed::Box,
|
|
|
|
core::mem::MaybeUninit,
|
|
|
|
derive_more::Display,
|
|
|
|
};
|
2023-06-20 19:07:48 -05:00
|
|
|
|
2023-06-24 17:16:14 -05:00
|
|
|
/// HoleyBytes virtual memory
|
2023-06-20 19:07:48 -05:00
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
pub struct Memory {
|
2023-06-24 17:16:14 -05:00
|
|
|
/// Root page table
|
2023-06-20 19:07:48 -05:00
|
|
|
root_pt: *mut PageTable,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for Memory {
|
|
|
|
fn default() -> Self {
|
|
|
|
Self {
|
|
|
|
root_pt: Box::into_raw(Box::default()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Drop for Memory {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
let _ = unsafe { Box::from_raw(self.root_pt) };
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Memory {
|
|
|
|
// HACK: Just for allocation testing, will be removed when proper memory interfaces
|
|
|
|
// implemented.
|
|
|
|
pub fn insert_test_page(&mut self) {
|
|
|
|
unsafe {
|
|
|
|
let mut entry = PtEntry::new(
|
|
|
|
{
|
|
|
|
let layout = alloc::alloc::Layout::from_size_align_unchecked(4096, 4096);
|
|
|
|
let ptr = alloc::alloc::alloc_zeroed(layout);
|
|
|
|
if ptr.is_null() {
|
|
|
|
alloc::alloc::handle_alloc_error(layout);
|
|
|
|
}
|
|
|
|
|
|
|
|
core::ptr::write_bytes(ptr, 69, 10);
|
|
|
|
ptr.cast()
|
|
|
|
},
|
|
|
|
Permission::Write,
|
|
|
|
);
|
|
|
|
|
|
|
|
for _ in 0..4 {
|
|
|
|
let mut pt = Box::<PageTable>::default();
|
2023-07-20 13:47:50 -05:00
|
|
|
pt.table[0] = entry;
|
2023-06-20 19:07:48 -05:00
|
|
|
entry = PtEntry::new(Box::into_raw(pt) as _, Permission::Node);
|
|
|
|
}
|
|
|
|
|
2023-07-20 13:47:50 -05:00
|
|
|
(*self.root_pt).table[0] = entry;
|
2023-06-20 19:07:48 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-12 07:56:11 -05:00
|
|
|
/// Maps host's memory into VM's memory
|
|
|
|
///
|
|
|
|
/// # Safety
|
|
|
|
/// Who knows.
|
|
|
|
pub unsafe fn map(
|
|
|
|
&mut self,
|
2023-07-20 13:47:50 -05:00
|
|
|
host: *mut u8,
|
|
|
|
target: u64,
|
|
|
|
perm: Permission,
|
2023-07-12 07:56:11 -05:00
|
|
|
pagesize: PageSize,
|
2023-07-20 13:47:50 -05:00
|
|
|
) -> Result<(), MapError> {
|
|
|
|
let mut current_pt = self.root_pt;
|
|
|
|
|
|
|
|
let lookup_depth = match pagesize {
|
|
|
|
PageSize::Size4K => 4,
|
|
|
|
PageSize::Size2M => 3,
|
|
|
|
PageSize::Size1G => 2,
|
|
|
|
};
|
|
|
|
|
|
|
|
// Lookup pagetable above
|
|
|
|
for lvl in (0..lookup_depth).rev() {
|
|
|
|
let entry = (*current_pt)
|
|
|
|
.table
|
|
|
|
.get_unchecked_mut(addr_extract_index(target, lvl));
|
|
|
|
|
|
|
|
let ptr = entry.ptr();
|
|
|
|
match entry.permission() {
|
|
|
|
Permission::Empty => {
|
|
|
|
(*current_pt).childen += 1;
|
|
|
|
let table = Box::into_raw(Box::new(paging::PtPointedData {
|
|
|
|
pt: PageTable::default(),
|
|
|
|
}));
|
|
|
|
|
|
|
|
core::ptr::write(entry, PtEntry::new(table, Permission::Node));
|
|
|
|
current_pt = table as _;
|
|
|
|
}
|
|
|
|
Permission::Node => current_pt = ptr as _,
|
|
|
|
_ => return Err(MapError::AlreadyMapped),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write entry
|
|
|
|
(*current_pt).childen += 1;
|
|
|
|
core::ptr::write(
|
|
|
|
(*current_pt)
|
|
|
|
.table
|
|
|
|
.get_unchecked_mut(addr_extract_index(target, 4 - lookup_depth)),
|
|
|
|
PtEntry::new(host.cast(), perm),
|
|
|
|
);
|
|
|
|
|
|
|
|
Ok(())
|
2023-07-12 07:56:11 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Unmaps pages from VM's memory
|
2023-07-20 13:47:50 -05:00
|
|
|
pub fn unmap(&mut self, addr: u64) -> Result<(), NothingToUnmap> {
|
|
|
|
let mut current_pt = self.root_pt;
|
|
|
|
let mut page_tables = [core::ptr::null_mut(); 5];
|
|
|
|
|
|
|
|
for lvl in (0..5).rev() {
|
|
|
|
let entry = unsafe {
|
|
|
|
(*current_pt)
|
|
|
|
.table
|
|
|
|
.get_unchecked_mut(addr_extract_index(addr, lvl))
|
|
|
|
};
|
|
|
|
|
|
|
|
let ptr = entry.ptr();
|
|
|
|
match entry.permission() {
|
|
|
|
Permission::Empty => return Err(NothingToUnmap),
|
|
|
|
Permission::Node => {
|
|
|
|
page_tables[lvl as usize] = entry;
|
|
|
|
current_pt = ptr as _
|
|
|
|
}
|
|
|
|
_ => unsafe {
|
|
|
|
core::ptr::write(entry, Default::default());
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for entry in page_tables.into_iter() {
|
|
|
|
if entry.is_null() {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
let children = &mut (*(*entry).ptr()).pt.childen;
|
|
|
|
*children -= 1;
|
|
|
|
if *children == 0 {
|
|
|
|
core::mem::drop(Box::from_raw((*entry).ptr() as *mut PageTable));
|
|
|
|
}
|
|
|
|
|
|
|
|
core::ptr::write(entry, Default::default());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
2023-07-12 07:56:11 -05:00
|
|
|
}
|
|
|
|
|
2023-06-20 19:07:48 -05:00
|
|
|
/// Load value from an address
|
2023-06-24 17:16:14 -05:00
|
|
|
///
|
|
|
|
/// # Safety
|
|
|
|
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
|
|
|
|
pub unsafe fn load(
|
|
|
|
&mut self,
|
|
|
|
addr: u64,
|
|
|
|
target: *mut u8,
|
|
|
|
count: usize,
|
2023-07-11 10:04:48 -05:00
|
|
|
traph: &mut impl HandlePageFault,
|
2023-06-24 17:16:14 -05:00
|
|
|
) -> Result<(), LoadError> {
|
2023-06-20 19:07:48 -05:00
|
|
|
self.memory_access(
|
2023-06-24 17:16:14 -05:00
|
|
|
MemoryAccessReason::Load,
|
2023-06-20 19:07:48 -05:00
|
|
|
addr,
|
|
|
|
target,
|
|
|
|
count,
|
|
|
|
|perm| {
|
|
|
|
matches!(
|
|
|
|
perm,
|
|
|
|
Permission::Readonly | Permission::Write | Permission::Exec
|
|
|
|
)
|
|
|
|
},
|
|
|
|
|src, dst, count| core::ptr::copy_nonoverlapping(src, dst, count),
|
2023-06-24 17:16:14 -05:00
|
|
|
traph,
|
2023-06-20 19:07:48 -05:00
|
|
|
)
|
2023-06-24 17:28:20 -05:00
|
|
|
.map_err(LoadError)
|
2023-06-20 19:07:48 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Store value to an address
|
2023-06-24 17:16:14 -05:00
|
|
|
///
|
|
|
|
/// # Safety
|
|
|
|
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
|
|
|
|
pub unsafe fn store(
|
|
|
|
&mut self,
|
|
|
|
addr: u64,
|
|
|
|
source: *const u8,
|
|
|
|
count: usize,
|
2023-07-11 10:04:48 -05:00
|
|
|
traph: &mut impl HandlePageFault,
|
2023-06-24 17:16:14 -05:00
|
|
|
) -> Result<(), StoreError> {
|
2023-06-20 19:07:48 -05:00
|
|
|
self.memory_access(
|
2023-06-24 17:16:14 -05:00
|
|
|
MemoryAccessReason::Store,
|
2023-06-20 19:07:48 -05:00
|
|
|
addr,
|
|
|
|
source.cast_mut(),
|
|
|
|
count,
|
|
|
|
|perm| perm == Permission::Write,
|
|
|
|
|dst, src, count| core::ptr::copy_nonoverlapping(src, dst, count),
|
2023-06-24 17:16:14 -05:00
|
|
|
traph,
|
2023-06-20 19:07:48 -05:00
|
|
|
)
|
2023-06-24 17:28:20 -05:00
|
|
|
.map_err(StoreError)
|
2023-06-20 19:07:48 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Copy a block of memory
|
2023-06-24 17:16:14 -05:00
|
|
|
///
|
|
|
|
/// # Safety
|
|
|
|
/// - Same as for [`Self::load`] and [`Self::store`]
|
|
|
|
/// - Your faith in the gods of UB
|
|
|
|
/// - Addr-san claims it's fine but who knows is she isn't lying :ferrisSus:
|
|
|
|
pub unsafe fn block_copy(
|
|
|
|
&mut self,
|
|
|
|
src: u64,
|
|
|
|
dst: u64,
|
|
|
|
count: usize,
|
2023-07-11 10:04:48 -05:00
|
|
|
traph: &mut impl HandlePageFault,
|
2023-06-24 17:28:20 -05:00
|
|
|
) -> Result<(), BlkCopyError> {
|
2023-06-24 17:16:14 -05:00
|
|
|
// Yea, i know it is possible to do this more efficiently, but I am too lazy.
|
2023-06-20 19:07:48 -05:00
|
|
|
|
2023-06-24 17:16:14 -05:00
|
|
|
const STACK_BUFFER_SIZE: usize = 512;
|
2023-06-20 19:07:48 -05:00
|
|
|
|
2023-06-24 17:16:14 -05:00
|
|
|
// Decide if to use stack-allocated buffer or to heap allocate
|
|
|
|
// Deallocation is again decided on size at the end of the function
|
|
|
|
let mut buf = MaybeUninit::<[u8; STACK_BUFFER_SIZE]>::uninit();
|
|
|
|
let buf = if count <= STACK_BUFFER_SIZE {
|
|
|
|
buf.as_mut_ptr().cast()
|
|
|
|
} else {
|
2023-06-20 19:07:48 -05:00
|
|
|
unsafe {
|
2023-06-24 17:16:14 -05:00
|
|
|
let layout = core::alloc::Layout::from_size_align_unchecked(count, 1);
|
|
|
|
let ptr = alloc::alloc::alloc(layout);
|
|
|
|
if ptr.is_null() {
|
|
|
|
alloc::alloc::handle_alloc_error(layout);
|
|
|
|
}
|
|
|
|
|
|
|
|
ptr
|
2023-06-20 19:07:48 -05:00
|
|
|
}
|
2023-06-24 17:16:14 -05:00
|
|
|
};
|
2023-06-20 19:07:48 -05:00
|
|
|
|
2023-06-24 17:16:14 -05:00
|
|
|
// Perform memory block transfer
|
|
|
|
let status = (|| {
|
|
|
|
// Load to buffer
|
|
|
|
self.memory_access(
|
|
|
|
MemoryAccessReason::Load,
|
|
|
|
src,
|
|
|
|
buf,
|
|
|
|
count,
|
|
|
|
|perm| {
|
|
|
|
matches!(
|
|
|
|
perm,
|
|
|
|
Permission::Readonly | Permission::Write | Permission::Exec
|
|
|
|
)
|
2023-06-20 19:07:48 -05:00
|
|
|
},
|
2023-06-24 17:16:14 -05:00
|
|
|
|src, dst, count| core::ptr::copy(src, dst, count),
|
|
|
|
traph,
|
|
|
|
)
|
2023-06-24 17:28:20 -05:00
|
|
|
.map_err(|addr| BlkCopyError {
|
|
|
|
access_reason: MemoryAccessReason::Load,
|
|
|
|
addr,
|
|
|
|
})?;
|
2023-06-20 19:07:48 -05:00
|
|
|
|
2023-06-24 17:16:14 -05:00
|
|
|
// Store from buffer
|
|
|
|
self.memory_access(
|
|
|
|
MemoryAccessReason::Store,
|
|
|
|
dst,
|
|
|
|
buf,
|
|
|
|
count,
|
|
|
|
|perm| perm == Permission::Write,
|
|
|
|
|dst, src, count| core::ptr::copy(src, dst, count),
|
|
|
|
traph,
|
|
|
|
)
|
2023-06-24 17:28:20 -05:00
|
|
|
.map_err(|addr| BlkCopyError {
|
|
|
|
access_reason: MemoryAccessReason::Store,
|
|
|
|
addr,
|
|
|
|
})?;
|
2023-06-24 17:16:14 -05:00
|
|
|
|
2023-06-24 17:28:20 -05:00
|
|
|
Ok::<_, BlkCopyError>(())
|
2023-06-24 17:16:14 -05:00
|
|
|
})();
|
2023-06-20 19:07:48 -05:00
|
|
|
|
2023-06-24 17:16:14 -05:00
|
|
|
// Deallocate if used heap-allocated array
|
|
|
|
if count > STACK_BUFFER_SIZE {
|
|
|
|
alloc::alloc::dealloc(
|
|
|
|
buf,
|
|
|
|
core::alloc::Layout::from_size_align_unchecked(count, 1),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
status
|
2023-06-20 19:07:48 -05:00
|
|
|
}
|
|
|
|
|
2023-06-24 17:16:14 -05:00
|
|
|
/// Split address to pages, check their permissions and feed pointers with offset
|
|
|
|
/// to a specified function.
|
|
|
|
///
|
|
|
|
/// If page is not found, execute page fault trap handler.
|
|
|
|
#[allow(clippy::too_many_arguments)] // Silence peasant
|
2023-06-20 19:07:48 -05:00
|
|
|
fn memory_access(
|
2023-06-24 17:16:14 -05:00
|
|
|
&mut self,
|
|
|
|
reason: MemoryAccessReason,
|
2023-06-20 19:07:48 -05:00
|
|
|
src: u64,
|
|
|
|
mut dst: *mut u8,
|
|
|
|
len: usize,
|
2023-06-24 17:16:14 -05:00
|
|
|
permission_check: fn(Permission) -> bool,
|
|
|
|
action: fn(*mut u8, *mut u8, usize),
|
2023-07-11 10:04:48 -05:00
|
|
|
traph: &mut impl HandlePageFault,
|
2023-06-24 17:28:20 -05:00
|
|
|
) -> Result<(), u64> {
|
2023-07-12 07:56:11 -05:00
|
|
|
let mut pspl = AddrPageLookuper::new(src, len, self.root_pt);
|
2023-06-24 17:16:14 -05:00
|
|
|
loop {
|
|
|
|
match pspl.next() {
|
|
|
|
// Page found
|
2023-07-12 07:56:11 -05:00
|
|
|
Some(Ok(AddrPageLookupOk {
|
2023-06-24 17:28:20 -05:00
|
|
|
vaddr,
|
|
|
|
ptr,
|
|
|
|
size,
|
|
|
|
perm,
|
|
|
|
})) => {
|
2023-06-24 17:16:14 -05:00
|
|
|
if !permission_check(perm) {
|
2023-06-24 17:28:20 -05:00
|
|
|
return Err(vaddr);
|
2023-06-24 17:16:14 -05:00
|
|
|
}
|
2023-06-20 19:07:48 -05:00
|
|
|
|
2023-06-24 17:16:14 -05:00
|
|
|
// Perform memory action and bump dst pointer
|
|
|
|
action(ptr, dst, size);
|
|
|
|
dst = unsafe { dst.add(size) };
|
|
|
|
}
|
2023-07-12 07:56:11 -05:00
|
|
|
Some(Err(AddrPageLookupError { addr, size })) => {
|
2023-06-24 17:16:14 -05:00
|
|
|
// Execute page fault handler
|
|
|
|
if traph.page_fault(reason, self, addr, size, dst) {
|
|
|
|
// Shift the splitter address
|
|
|
|
pspl.bump(size);
|
2023-06-20 19:07:48 -05:00
|
|
|
|
2023-06-24 17:16:14 -05:00
|
|
|
// Bump dst pointer
|
|
|
|
dst = unsafe { dst.add(size as _) };
|
|
|
|
} else {
|
2023-06-24 17:28:20 -05:00
|
|
|
return Err(addr); // Unhandleable
|
2023-06-24 17:16:14 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
None => return Ok(()),
|
|
|
|
}
|
|
|
|
}
|
2023-06-20 19:07:48 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-06-24 17:16:14 -05:00
|
|
|
/// Result from address split
|
2023-07-12 07:56:11 -05:00
|
|
|
struct AddrPageLookupOk {
|
2023-06-24 17:28:20 -05:00
|
|
|
/// Virtual address
|
|
|
|
vaddr: u64,
|
|
|
|
|
2023-06-24 17:16:14 -05:00
|
|
|
/// Pointer to the start for perform operation
|
2023-06-20 19:07:48 -05:00
|
|
|
ptr: *mut u8,
|
2023-06-24 17:16:14 -05:00
|
|
|
|
|
|
|
/// Size to the end of page / end of desired size
|
2023-06-20 19:07:48 -05:00
|
|
|
size: usize,
|
2023-06-24 17:16:14 -05:00
|
|
|
|
|
|
|
/// Page permission
|
2023-06-20 19:07:48 -05:00
|
|
|
perm: Permission,
|
|
|
|
}
|
|
|
|
|
2023-07-12 07:56:11 -05:00
|
|
|
struct AddrPageLookupError {
|
2023-06-24 17:16:14 -05:00
|
|
|
/// Address of failure
|
2023-06-20 19:07:48 -05:00
|
|
|
addr: u64,
|
2023-06-24 17:16:14 -05:00
|
|
|
|
|
|
|
/// Requested page size
|
|
|
|
size: PageSize,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Address splitter into pages
|
2023-07-12 07:56:11 -05:00
|
|
|
struct AddrPageLookuper {
|
2023-06-24 17:16:14 -05:00
|
|
|
/// Current address
|
|
|
|
addr: u64,
|
|
|
|
|
|
|
|
/// Size left
|
2023-06-20 19:07:48 -05:00
|
|
|
size: usize,
|
2023-06-24 17:16:14 -05:00
|
|
|
|
|
|
|
/// Page table
|
2023-06-20 19:07:48 -05:00
|
|
|
pagetable: *const PageTable,
|
|
|
|
}
|
|
|
|
|
2023-07-12 07:56:11 -05:00
|
|
|
impl AddrPageLookuper {
|
2023-06-24 17:16:14 -05:00
|
|
|
/// Create a new page splitter
|
2023-06-20 19:07:48 -05:00
|
|
|
pub const fn new(addr: u64, size: usize, pagetable: *const PageTable) -> Self {
|
|
|
|
Self {
|
|
|
|
addr,
|
|
|
|
size,
|
|
|
|
pagetable,
|
|
|
|
}
|
|
|
|
}
|
2023-06-24 17:16:14 -05:00
|
|
|
|
|
|
|
/// Bump address by size X
|
|
|
|
fn bump(&mut self, page_size: PageSize) {
|
|
|
|
self.addr += page_size as u64;
|
|
|
|
self.size = self.size.saturating_sub(page_size as _);
|
|
|
|
}
|
2023-06-20 19:07:48 -05:00
|
|
|
}
|
|
|
|
|
2023-07-12 07:56:11 -05:00
|
|
|
impl Iterator for AddrPageLookuper {
|
|
|
|
type Item = Result<AddrPageLookupOk, AddrPageLookupError>;
|
2023-06-20 19:07:48 -05:00
|
|
|
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
2023-06-24 17:16:14 -05:00
|
|
|
// The end, everything is fine
|
2023-06-20 19:07:48 -05:00
|
|
|
if self.size == 0 {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
let (base, perm, size, offset) = 'a: {
|
|
|
|
let mut current_pt = self.pagetable;
|
2023-06-24 17:16:14 -05:00
|
|
|
|
|
|
|
// Walk the page table
|
2023-06-20 19:07:48 -05:00
|
|
|
for lvl in (0..5).rev() {
|
2023-06-24 17:16:14 -05:00
|
|
|
// Get an entry
|
2023-06-20 19:07:48 -05:00
|
|
|
unsafe {
|
2023-07-20 13:47:50 -05:00
|
|
|
let entry = (*current_pt)
|
|
|
|
.table
|
|
|
|
.get_unchecked(addr_extract_index(self.addr, lvl));
|
2023-06-20 19:07:48 -05:00
|
|
|
|
|
|
|
let ptr = entry.ptr();
|
|
|
|
match entry.permission() {
|
2023-06-24 17:16:14 -05:00
|
|
|
// No page → page fault
|
|
|
|
Permission::Empty => {
|
2023-07-12 07:56:11 -05:00
|
|
|
return Some(Err(AddrPageLookupError {
|
2023-06-24 17:16:14 -05:00
|
|
|
addr: self.addr,
|
|
|
|
size: PageSize::from_lvl(lvl)?,
|
|
|
|
}))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Node → proceed waking
|
2023-06-20 19:07:48 -05:00
|
|
|
Permission::Node => current_pt = ptr as _,
|
2023-06-24 17:16:14 -05:00
|
|
|
|
2023-07-20 13:47:50 -05:00
|
|
|
// Leaf → return relevant data
|
2023-06-20 19:07:48 -05:00
|
|
|
perm => {
|
|
|
|
break 'a (
|
2023-06-24 17:16:14 -05:00
|
|
|
// Pointer in host memory
|
2023-06-20 19:07:48 -05:00
|
|
|
ptr as *mut u8,
|
|
|
|
perm,
|
2023-06-24 17:16:14 -05:00
|
|
|
PageSize::from_lvl(lvl)?,
|
|
|
|
// In-page offset
|
2023-06-20 19:07:48 -05:00
|
|
|
self.addr as usize & ((1 << (lvl * 9 + 12)) - 1),
|
2023-06-24 17:16:14 -05:00
|
|
|
);
|
2023-06-20 19:07:48 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-06-24 17:16:14 -05:00
|
|
|
return None; // Reached the end (should not happen)
|
2023-06-20 19:07:48 -05:00
|
|
|
};
|
|
|
|
|
2023-06-24 17:16:14 -05:00
|
|
|
// Get available byte count in the selected page with offset
|
|
|
|
let avail = (size as usize - offset).clamp(0, self.size);
|
|
|
|
self.bump(size);
|
|
|
|
|
2023-07-12 07:56:11 -05:00
|
|
|
Some(Ok(AddrPageLookupOk {
|
2023-06-24 17:28:20 -05:00
|
|
|
vaddr: self.addr,
|
2023-06-24 17:16:14 -05:00
|
|
|
ptr: unsafe { base.add(offset) }, // Return pointer to the start of region
|
2023-06-20 19:07:48 -05:00
|
|
|
size: avail,
|
|
|
|
perm,
|
2023-06-24 17:16:14 -05:00
|
|
|
}))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-20 13:47:50 -05:00
|
|
|
fn addr_extract_index(addr: u64, lvl: u8) -> usize {
|
|
|
|
debug_assert!(lvl <= 4);
|
|
|
|
usize::try_from((addr >> (lvl * 9 + 12)) & ((1 << 9) - 1)).expect("?conradluget a better CPU")
|
|
|
|
}
|
|
|
|
|
2023-06-24 17:16:14 -05:00
|
|
|
/// Page size
|
|
|
|
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
|
|
|
pub enum PageSize {
|
|
|
|
/// 4 KiB page (on level 0)
|
|
|
|
Size4K = 4096,
|
|
|
|
|
|
|
|
/// 2 MiB page (on level 1)
|
|
|
|
Size2M = 1024 * 1024 * 2,
|
|
|
|
|
|
|
|
/// 1 GiB page (on level 2)
|
|
|
|
Size1G = 1024 * 1024 * 1024,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PageSize {
|
|
|
|
/// Convert page table level to size of page
|
2023-07-20 13:47:50 -05:00
|
|
|
const fn from_lvl(lvl: u8) -> Option<Self> {
|
2023-06-24 17:16:14 -05:00
|
|
|
match lvl {
|
|
|
|
0 => Some(PageSize::Size4K),
|
|
|
|
1 => Some(PageSize::Size2M),
|
|
|
|
2 => Some(PageSize::Size1G),
|
|
|
|
_ => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Unhandled load access trap
|
|
|
|
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
|
2023-06-24 17:28:20 -05:00
|
|
|
pub struct LoadError(u64);
|
2023-06-24 17:16:14 -05:00
|
|
|
|
|
|
|
/// Unhandled store access trap
|
|
|
|
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
|
2023-06-24 17:28:20 -05:00
|
|
|
pub struct StoreError(u64);
|
2023-06-24 17:16:14 -05:00
|
|
|
|
2023-07-20 13:47:50 -05:00
|
|
|
#[derive(Clone, Copy, Display, Debug)]
|
|
|
|
pub struct NothingToUnmap;
|
|
|
|
|
2023-06-24 17:16:14 -05:00
|
|
|
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
|
|
|
|
pub enum MemoryAccessReason {
|
|
|
|
Load,
|
|
|
|
Store,
|
|
|
|
}
|
|
|
|
|
2023-06-24 17:28:20 -05:00
|
|
|
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
|
|
|
pub struct BlkCopyError {
|
|
|
|
access_reason: MemoryAccessReason,
|
|
|
|
addr: u64,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<BlkCopyError> for VmRunError {
|
|
|
|
fn from(value: BlkCopyError) -> Self {
|
|
|
|
match value.access_reason {
|
|
|
|
MemoryAccessReason::Load => Self::LoadAccessEx(value.addr),
|
|
|
|
MemoryAccessReason::Store => Self::StoreAccessEx(value.addr),
|
2023-06-24 17:16:14 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<LoadError> for VmRunError {
|
2023-06-24 17:28:20 -05:00
|
|
|
fn from(value: LoadError) -> Self {
|
|
|
|
Self::LoadAccessEx(value.0)
|
2023-06-24 17:16:14 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl From<StoreError> for VmRunError {
|
2023-06-24 17:28:20 -05:00
|
|
|
fn from(value: StoreError) -> Self {
|
|
|
|
Self::StoreAccessEx(value.0)
|
2023-06-20 19:07:48 -05:00
|
|
|
}
|
|
|
|
}
|
2023-07-20 13:47:50 -05:00
|
|
|
|
|
|
|
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
|
|
|
|
pub enum MapError {
|
|
|
|
AlreadyMapped,
|
|
|
|
}
|