This commit is contained in:
Erin 2023-08-10 12:39:03 +02:00 committed by ondra05
parent 34a82b55dc
commit 4c38b1ffb5
3 changed files with 293 additions and 280 deletions

View file

@ -0,0 +1,124 @@
//! Address lookup
use super::{
addr_extract_index,
paging::{PageTable, Permission},
PageSize,
};
/// Good result from address split
pub struct AddrPageLookupOk {
/// Virtual address
pub vaddr: u64,
/// Pointer to the start for perform operation
pub ptr: *mut u8,
/// Size to the end of page / end of desired size
pub size: usize,
/// Page permission
pub perm: Permission,
}
/// Errornous address split result
pub struct AddrPageLookupError {
/// Address of failure
pub addr: u64,
/// Requested page size
pub size: PageSize,
}
/// Address splitter into pages
pub struct AddrPageLookuper {
/// Current address
addr: u64,
/// Size left
size: usize,
/// Page table
pagetable: *const PageTable,
}
impl AddrPageLookuper {
/// Create a new page lookuper
#[inline]
pub const fn new(addr: u64, size: usize, pagetable: *const PageTable) -> Self {
Self {
addr,
size,
pagetable,
}
}
/// Bump address by size X
pub fn bump(&mut self, page_size: PageSize) {
self.addr += page_size as u64;
self.size = self.size.saturating_sub(page_size as _);
}
}
impl Iterator for AddrPageLookuper {
type Item = Result<AddrPageLookupOk, AddrPageLookupError>;
fn next(&mut self) -> Option<Self::Item> {
// The end, everything is fine
if self.size == 0 {
return None;
}
let (base, perm, size, offset) = 'a: {
let mut current_pt = self.pagetable;
// Walk the page table
for lvl in (0..5).rev() {
// Get an entry
unsafe {
let entry = (*current_pt)
.table
.get_unchecked(addr_extract_index(self.addr, lvl));
let ptr = entry.ptr();
match entry.permission() {
// No page → page fault
Permission::Empty => {
return Some(Err(AddrPageLookupError {
addr: self.addr,
size: PageSize::from_lvl(lvl)?,
}))
}
// Node → proceed waking
Permission::Node => current_pt = ptr as _,
// Leaf → return relevant data
perm => {
break 'a (
// Pointer in host memory
ptr as *mut u8,
perm,
PageSize::from_lvl(lvl)?,
// In-page offset
addr_extract_index(self.addr, lvl),
);
}
}
}
}
return None; // Reached the end (should not happen)
};
// Get available byte count in the selected page with offset
let avail = (size as usize - offset).clamp(0, self.size);
self.bump(size);
Some(Ok(AddrPageLookupOk {
vaddr: self.addr,
ptr: unsafe { base.add(offset) }, // Return pointer to the start of region
size: avail,
perm,
}))
}
}

View file

@ -0,0 +1,163 @@
//! Automatic memory mapping
use {
super::{
addr_extract_index,
paging::{PageTable, Permission, PtEntry, PtPointedData},
PageSize, SoftPagedMem,
},
alloc::boxed::Box,
derive_more::Display,
};
#[cfg(feature = "alloc")]
impl<'p, A> SoftPagedMem<'p, A> {
/// Maps host's memory into VM's memory
///
/// # Safety
/// - Your faith in the gods of UB
/// - Addr-san claims it's fine but who knows is she isn't lying :ferrisSus:
/// - Alright, Miri-sama is also fine with this, who knows why
pub unsafe fn map(
&mut self,
host: *mut u8,
target: u64,
perm: Permission,
pagesize: PageSize,
) -> Result<(), MapError> {
let mut current_pt = self.root_pt;
// Decide on what level depth are we going
let lookup_depth = match pagesize {
PageSize::Size4K => 0,
PageSize::Size2M => 1,
PageSize::Size1G => 2,
};
// Walk pagetable levels
for lvl in (lookup_depth + 1..5).rev() {
let entry = (*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lvl));
let ptr = entry.ptr();
match entry.permission() {
// Still not on target and already seeing empty entry?
// No worries! Let's create one (allocates).
Permission::Empty => {
// Increase children count
(*current_pt).childen += 1;
let table = Box::into_raw(Box::new(PtPointedData {
pt: PageTable::default(),
}));
core::ptr::write(entry, PtEntry::new(table, Permission::Node));
current_pt = table as _;
}
// Continue walking
Permission::Node => current_pt = ptr as _,
// There is some entry on place of node
_ => return Err(MapError::PageOnNode),
}
}
let node = (*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lookup_depth));
// Check if node is not mapped
if node.permission() != Permission::Empty {
return Err(MapError::AlreadyMapped);
}
// Write entry
(*current_pt).childen += 1;
core::ptr::write(node, PtEntry::new(host.cast(), perm));
Ok(())
}
/// Unmaps pages from VM's memory
///
/// If errors, it only means there is no entry to unmap and in most cases
/// just should be ignored.
pub fn unmap(&mut self, addr: u64) -> Result<(), NothingToUnmap> {
let mut current_pt = self.root_pt;
let mut page_tables = [core::ptr::null_mut(); 5];
// Walk page table in reverse
for lvl in (0..5).rev() {
let entry = unsafe {
(*current_pt)
.table
.get_unchecked_mut(addr_extract_index(addr, lvl))
};
let ptr = entry.ptr();
match entry.permission() {
// Nothing is there, throw an error, not critical!
Permission::Empty => return Err(NothingToUnmap),
// Node Save to visited pagetables and continue walking
Permission::Node => {
page_tables[lvl as usize] = entry;
current_pt = ptr as _
}
// Page entry zero it out!
// Zero page entry is completely valid entry with
// empty permission - no UB here!
_ => unsafe {
core::ptr::write_bytes(entry, 0, 1);
break;
},
}
}
// Now walk in order visited page tables
for entry in page_tables.into_iter() {
// Level not visited, skip.
if entry.is_null() {
continue;
}
unsafe {
let children = &mut (*(*entry).ptr()).pt.childen;
*children -= 1; // Decrease children count
// If there are no children, deallocate.
if *children == 0 {
let _ = Box::from_raw((*entry).ptr() as *mut PageTable);
// Zero visited entry
core::ptr::write_bytes(entry, 0, 1);
} else {
break;
}
}
}
Ok(())
}
}
/// Error mapping
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
pub enum MapError {
/// Entry was already mapped
#[display(fmt = "There is already a page mapped on specified address")]
AlreadyMapped,
/// When walking a page entry was
/// encounterd.
#[display(fmt = "There was a page mapped on the way instead of node")]
PageOnNode,
}
/// There was no entry in page table to unmap
///
/// No worry, don't panic, nothing bad has happened,
/// but if you are 120% sure there should be something,
/// double-check your addresses.
#[derive(Clone, Copy, Display, Debug)]
#[display(fmt = "There was no entry to unmap")]
pub struct NothingToUnmap;

View file

@ -1,17 +1,19 @@
//! Platform independent, software paged memory implementation //! Platform independent, software paged memory implementation
use self::lookup::{AddrPageLookupError, AddrPageLookupOk, AddrPageLookuper};
pub mod lookup;
pub mod paging; pub mod paging;
#[cfg(feature = "alloc")]
pub mod mapping;
use { use {
super::{LoadError, Memory, MemoryAccessReason, StoreError}, super::{LoadError, Memory, MemoryAccessReason, StoreError},
core::slice::SliceIndex, core::slice::SliceIndex,
derive_more::Display,
paging::{PageTable, Permission}, paging::{PageTable, Permission},
}; };
#[cfg(feature = "alloc")]
use {alloc::boxed::Box, paging::PtEntry};
/// HoleyBytes software paged memory /// HoleyBytes software paged memory
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct SoftPagedMem<'p, PfH> { pub struct SoftPagedMem<'p, PfH> {
@ -173,261 +175,6 @@ impl<'p, PfH: HandlePageFault> SoftPagedMem<'p, PfH> {
} }
} }
/// Good result from address split
struct AddrPageLookupOk {
/// Virtual address
vaddr: u64,
/// Pointer to the start for perform operation
ptr: *mut u8,
/// Size to the end of page / end of desired size
size: usize,
/// Page permission
perm: Permission,
}
/// Errornous address split result
struct AddrPageLookupError {
/// Address of failure
addr: u64,
/// Requested page size
size: PageSize,
}
/// Address splitter into pages
struct AddrPageLookuper {
/// Current address
addr: u64,
/// Size left
size: usize,
/// Page table
pagetable: *const PageTable,
}
impl AddrPageLookuper {
/// Create a new page lookuper
#[inline]
pub const fn new(addr: u64, size: usize, pagetable: *const PageTable) -> Self {
Self {
addr,
size,
pagetable,
}
}
/// Bump address by size X
fn bump(&mut self, page_size: PageSize) {
self.addr += page_size as u64;
self.size = self.size.saturating_sub(page_size as _);
}
}
impl Iterator for AddrPageLookuper {
type Item = Result<AddrPageLookupOk, AddrPageLookupError>;
fn next(&mut self) -> Option<Self::Item> {
// The end, everything is fine
if self.size == 0 {
return None;
}
let (base, perm, size, offset) = 'a: {
let mut current_pt = self.pagetable;
// Walk the page table
for lvl in (0..5).rev() {
// Get an entry
unsafe {
let entry = (*current_pt)
.table
.get_unchecked(addr_extract_index(self.addr, lvl));
let ptr = entry.ptr();
match entry.permission() {
// No page → page fault
Permission::Empty => {
return Some(Err(AddrPageLookupError {
addr: self.addr,
size: PageSize::from_lvl(lvl)?,
}))
}
// Node → proceed waking
Permission::Node => current_pt = ptr as _,
// Leaf → return relevant data
perm => {
break 'a (
// Pointer in host memory
ptr as *mut u8,
perm,
PageSize::from_lvl(lvl)?,
// In-page offset
addr_extract_index(self.addr, lvl),
);
}
}
}
}
return None; // Reached the end (should not happen)
};
// Get available byte count in the selected page with offset
let avail = (size as usize - offset).clamp(0, self.size);
self.bump(size);
Some(Ok(AddrPageLookupOk {
vaddr: self.addr,
ptr: unsafe { base.add(offset) }, // Return pointer to the start of region
size: avail,
perm,
}))
}
}
#[cfg(feature = "alloc")]
impl<'p, A> Drop for SoftPagedMem<'p, A> {
fn drop(&mut self) {
let _ = unsafe { Box::from_raw(self.root_pt) };
}
}
#[cfg(feature = "alloc")]
impl<'p, A> SoftPagedMem<'p, A> {
/// Maps host's memory into VM's memory
///
/// # Safety
/// - Your faith in the gods of UB
/// - Addr-san claims it's fine but who knows is she isn't lying :ferrisSus:
/// - Alright, Miri-sama is also fine with this, who knows why
pub unsafe fn map(
&mut self,
host: *mut u8,
target: u64,
perm: Permission,
pagesize: PageSize,
) -> Result<(), MapError> {
let mut current_pt = self.root_pt;
// Decide on what level depth are we going
let lookup_depth = match pagesize {
PageSize::Size4K => 0,
PageSize::Size2M => 1,
PageSize::Size1G => 2,
};
// Walk pagetable levels
for lvl in (lookup_depth + 1..5).rev() {
let entry = (*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lvl));
let ptr = entry.ptr();
match entry.permission() {
// Still not on target and already seeing empty entry?
// No worries! Let's create one (allocates).
Permission::Empty => {
// Increase children count
(*current_pt).childen += 1;
let table = Box::into_raw(Box::new(paging::PtPointedData {
pt: PageTable::default(),
}));
core::ptr::write(entry, PtEntry::new(table, Permission::Node));
current_pt = table as _;
}
// Continue walking
Permission::Node => current_pt = ptr as _,
// There is some entry on place of node
_ => return Err(MapError::PageOnNode),
}
}
let node = (*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lookup_depth));
// Check if node is not mapped
if node.permission() != Permission::Empty {
return Err(MapError::AlreadyMapped);
}
// Write entry
(*current_pt).childen += 1;
core::ptr::write(node, PtEntry::new(host.cast(), perm));
Ok(())
}
/// Unmaps pages from VM's memory
///
/// If errors, it only means there is no entry to unmap and in most cases
/// just should be ignored.
pub fn unmap(&mut self, addr: u64) -> Result<(), NothingToUnmap> {
let mut current_pt = self.root_pt;
let mut page_tables = [core::ptr::null_mut(); 5];
// Walk page table in reverse
for lvl in (0..5).rev() {
let entry = unsafe {
(*current_pt)
.table
.get_unchecked_mut(addr_extract_index(addr, lvl))
};
let ptr = entry.ptr();
match entry.permission() {
// Nothing is there, throw an error, not critical!
Permission::Empty => return Err(NothingToUnmap),
// Node Save to visited pagetables and continue walking
Permission::Node => {
page_tables[lvl as usize] = entry;
current_pt = ptr as _
}
// Page entry zero it out!
// Zero page entry is completely valid entry with
// empty permission - no UB here!
_ => unsafe {
core::ptr::write_bytes(entry, 0, 1);
break;
},
}
}
// Now walk in order visited page tables
for entry in page_tables.into_iter() {
// Level not visited, skip.
if entry.is_null() {
continue;
}
unsafe {
let children = &mut (*(*entry).ptr()).pt.childen;
*children -= 1; // Decrease children count
// If there are no children, deallocate.
if *children == 0 {
let _ = Box::from_raw((*entry).ptr() as *mut PageTable);
// Zero visited entry
core::ptr::write_bytes(entry, 0, 1);
} else {
break;
}
}
}
Ok(())
}
}
/// Extract index in page table on specified level /// Extract index in page table on specified level
/// ///
/// The level shall not be larger than 4, otherwise /// The level shall not be larger than 4, otherwise
@ -462,27 +209,6 @@ impl PageSize {
} }
} }
/// Error mapping
#[derive(Clone, Copy, Display, Debug, PartialEq, Eq)]
pub enum MapError {
/// Entry was already mapped
#[display(fmt = "There is already a page mapped on specified address")]
AlreadyMapped,
/// When walking a page entry was
/// encounterd.
#[display(fmt = "There was a page mapped on the way instead of node")]
PageOnNode,
}
/// There was no entry in page table to unmap
///
/// No worry, don't panic, nothing bad has happened,
/// but if you are 120% sure there should be something,
/// double-check your addresses.
#[derive(Clone, Copy, Display, Debug)]
#[display(fmt = "There was no entry to unmap")]
pub struct NothingToUnmap;
/// Permisison checks /// Permisison checks
pub mod perm_check { pub mod perm_check {
use super::paging::Permission; use super::paging::Permission;