forked from AbleOS/ableos
232 lines
5.3 KiB
Rust
232 lines
5.3 KiB
Rust
|
//! The Memory Manager
|
||
|
|
||
|
use alloc::collections::VecDeque;
|
||
|
use derive_more::*;
|
||
|
|
||
|
pub use crate::arch::PAGE_SIZE;
|
||
|
pub const MAX_ORDER: usize = 10;
|
||
|
|
||
|
#[repr(transparent)]
|
||
|
#[derive(
|
||
|
Add,
|
||
|
AddAssign,
|
||
|
Binary,
|
||
|
BitAnd,
|
||
|
BitAndAssign,
|
||
|
BitOr,
|
||
|
BitOrAssign,
|
||
|
BitXor,
|
||
|
BitXorAssign,
|
||
|
Clone,
|
||
|
Constructor,
|
||
|
Copy,
|
||
|
Display,
|
||
|
Div,
|
||
|
DivAssign,
|
||
|
Eq,
|
||
|
From,
|
||
|
LowerHex,
|
||
|
Mul,
|
||
|
MulAssign,
|
||
|
Not,
|
||
|
Octal,
|
||
|
Ord,
|
||
|
PartialEq,
|
||
|
PartialOrd,
|
||
|
Rem,
|
||
|
RemAssign,
|
||
|
Shl,
|
||
|
ShlAssign,
|
||
|
Shr,
|
||
|
ShrAssign,
|
||
|
Sub,
|
||
|
SubAssign,
|
||
|
Sum,
|
||
|
UpperHex,
|
||
|
)]
|
||
|
#[display(fmt = "0x{:x}", _0)]
|
||
|
#[from(forward)]
|
||
|
pub struct VirtualAddress(usize);
|
||
|
|
||
|
impl VirtualAddress {
|
||
|
#[cfg(target_arch = "riscv64")]
|
||
|
/// Returns an array of Virtual Page Numbers
|
||
|
// FIXME: SV48 and SV57 support
|
||
|
pub fn vpns(&self) -> [usize; 3] {
|
||
|
[
|
||
|
// [20:12]
|
||
|
(self.0 >> 12) & 0x1ff,
|
||
|
// [29:21]
|
||
|
(self.0 >> 21) & 0x1ff,
|
||
|
// [38:30]
|
||
|
(self.0 >> 30) & 0x1ff,
|
||
|
]
|
||
|
}
|
||
|
|
||
|
pub fn as_addr(&self) -> usize {
|
||
|
self.0
|
||
|
}
|
||
|
|
||
|
pub fn as_ptr<T>(&self) -> *const T {
|
||
|
self.0 as _
|
||
|
}
|
||
|
|
||
|
pub fn as_mut_ptr<T>(&mut self) -> *mut T {
|
||
|
self.0 as _
|
||
|
}
|
||
|
}
|
||
|
|
||
|
#[repr(transparent)]
|
||
|
#[derive(
|
||
|
Add,
|
||
|
AddAssign,
|
||
|
Binary,
|
||
|
BitAnd,
|
||
|
BitAndAssign,
|
||
|
BitOr,
|
||
|
BitOrAssign,
|
||
|
BitXor,
|
||
|
BitXorAssign,
|
||
|
Clone,
|
||
|
Constructor,
|
||
|
Copy,
|
||
|
Display,
|
||
|
Div,
|
||
|
DivAssign,
|
||
|
Eq,
|
||
|
From,
|
||
|
LowerHex,
|
||
|
Mul,
|
||
|
MulAssign,
|
||
|
Not,
|
||
|
Octal,
|
||
|
Ord,
|
||
|
PartialEq,
|
||
|
PartialOrd,
|
||
|
Rem,
|
||
|
RemAssign,
|
||
|
Shl,
|
||
|
ShlAssign,
|
||
|
Shr,
|
||
|
ShrAssign,
|
||
|
Sub,
|
||
|
SubAssign,
|
||
|
Sum,
|
||
|
UpperHex,
|
||
|
)]
|
||
|
#[display(fmt = "0x{:x}", _0)]
|
||
|
#[from(forward)]
|
||
|
pub struct PhysicalAddress(usize);
|
||
|
|
||
|
impl PhysicalAddress {
|
||
|
#[cfg(target_arch = "riscv64")]
|
||
|
/// Returns an array of Physical Page Numbers
|
||
|
// FIXME: SV48 and SV57 support
|
||
|
pub fn ppns(&self) -> [usize; 3] {
|
||
|
[
|
||
|
// [20:12]
|
||
|
(self.0 >> 12) & 0x1ff,
|
||
|
// [29:21]
|
||
|
(self.0 >> 21) & 0x1ff,
|
||
|
// [55:30]
|
||
|
(self.0 >> 30) & 0x3ffffff,
|
||
|
]
|
||
|
}
|
||
|
|
||
|
pub fn as_addr(&self) -> usize {
|
||
|
self.0
|
||
|
}
|
||
|
|
||
|
pub fn as_ptr<T>(&self) -> *const T {
|
||
|
self.0 as _
|
||
|
}
|
||
|
|
||
|
pub fn as_mut_ptr<T>(&self) -> *mut T {
|
||
|
self.0 as _
|
||
|
}
|
||
|
}
|
||
|
|
||
|
pub struct MemoryManager {
|
||
|
free_lists: [VecDeque<PhysicalAddress>; MAX_ORDER + 1],
|
||
|
}
|
||
|
|
||
|
impl MemoryManager {
|
||
|
pub const fn new() -> Self {
|
||
|
Self {
|
||
|
free_lists: [const { VecDeque::new() }; MAX_ORDER + 1],
|
||
|
}
|
||
|
}
|
||
|
|
||
|
// FIXME: this method should take a length and turn that into an order
|
||
|
pub fn allocate_pages(&mut self, order: usize) -> Option<PhysicalAddress> {
|
||
|
self.get_free_pages(order)
|
||
|
}
|
||
|
|
||
|
// FIXME: this method should take a length and turn that into an order
|
||
|
pub fn zallocate_pages(&mut self, order: usize) -> Option<PhysicalAddress> {
|
||
|
let alloc = self.allocate_pages(order)?;
|
||
|
unsafe {
|
||
|
alloc.as_mut_ptr::<u8>().write_bytes(0, PAGE_SIZE << order);
|
||
|
}
|
||
|
Some(alloc)
|
||
|
}
|
||
|
|
||
|
/// # Safety
|
||
|
/// This method assumes that `address` is in range of this allocator
|
||
|
// FIXME: this method should take a length and turn that into an order
|
||
|
pub unsafe fn deallocate_pages(&mut self, address: PhysicalAddress, order: usize) {
|
||
|
self.free_lists[order].push_front(address);
|
||
|
self.merge_buddies(order, address)
|
||
|
}
|
||
|
|
||
|
/// # Safety
|
||
|
/// This method assumes that the given address range,
|
||
|
/// a) starts and ends at an address aligned to page boundaries,
|
||
|
/// b) are valid free pages not already added,
|
||
|
/// FIXME: c) starts and ends at an address aligned to `PAGE_SIZE << MAX_ORDER`
|
||
|
pub unsafe fn add_range(&mut self, start_addr: PhysicalAddress, page_count: usize) {
|
||
|
for i in 0..page_count / 1024 {
|
||
|
self.free_lists[MAX_ORDER].push_back(start_addr + (i * 1024 * PAGE_SIZE).into());
|
||
|
}
|
||
|
}
|
||
|
|
||
|
fn get_free_pages(&mut self, order: usize) -> Option<PhysicalAddress> {
|
||
|
// We can't get such a page!
|
||
|
if order > MAX_ORDER {
|
||
|
return None;
|
||
|
}
|
||
|
|
||
|
if self.free_lists[order].len() > 0 {
|
||
|
return self.free_lists[order].pop_front();
|
||
|
}
|
||
|
|
||
|
self.get_free_pages(order + 1).map(|addr| {
|
||
|
self.free_lists[order].push_front(addr ^ (PAGE_SIZE << order).into());
|
||
|
addr
|
||
|
})
|
||
|
}
|
||
|
|
||
|
fn merge_buddies(&mut self, order: usize, address: PhysicalAddress) {
|
||
|
// if we can't have any higher order blocks, we can't merge
|
||
|
if order > MAX_ORDER - 1 {
|
||
|
return;
|
||
|
}
|
||
|
|
||
|
let buddy_address = address ^ (PAGE_SIZE << order).into();
|
||
|
log::debug!("merge buddy: 0x{buddy_address:x}");
|
||
|
if let Some(buddy_index) = self.free_lists[order]
|
||
|
.iter()
|
||
|
.position(|blk| *blk == buddy_address)
|
||
|
{
|
||
|
self.free_lists[order].pop_front();
|
||
|
self.free_lists[order].remove(buddy_index);
|
||
|
let new_address = address.min(buddy_address);
|
||
|
log::debug!(
|
||
|
"Merging 0x{address:x} @ {order} with 0x{buddy_address:x} at 0x{new_address:x}"
|
||
|
);
|
||
|
self.free_lists[order + 1].push_front(new_address);
|
||
|
self.merge_buddies(order + 1, new_address)
|
||
|
}
|
||
|
}
|
||
|
}
|