forked from AbleOS/ableos
Buddy page allocator
Soon this will turn into an actual memory manager
This commit is contained in:
parent
f8630f6471
commit
59c34f5849
|
@ -123,7 +123,6 @@ impl Heap {
|
||||||
let size = size + mem::size_of::<Header>();
|
let size = size + mem::size_of::<Header>();
|
||||||
let chunks_needed = (size + CHUNK_SIZE - 1) / CHUNK_SIZE;
|
let chunks_needed = (size + CHUNK_SIZE - 1) / CHUNK_SIZE;
|
||||||
let chunk_alignment = (alignment + CHUNK_SIZE - 1) / CHUNK_SIZE;
|
let chunk_alignment = (alignment + CHUNK_SIZE - 1) / CHUNK_SIZE;
|
||||||
log::debug!("size: {size} chunks: {chunks_needed} align: {chunk_alignment}");
|
|
||||||
|
|
||||||
if chunks_needed + chunk_alignment > self.free_chunks() {
|
if chunks_needed + chunk_alignment > self.free_chunks() {
|
||||||
return None;
|
return None;
|
||||||
|
@ -137,11 +136,6 @@ impl Heap {
|
||||||
// Align the starting address and verify that we haven't gone outside the calculated free area
|
// Align the starting address and verify that we haven't gone outside the calculated free area
|
||||||
let addr =
|
let addr =
|
||||||
addr_unaligned + alignment - (addr_unaligned + mem::size_of::<Header>()) % alignment;
|
addr_unaligned + alignment - (addr_unaligned + mem::size_of::<Header>()) % alignment;
|
||||||
log::debug!(
|
|
||||||
"Addr unaligned: 0x{addr_unaligned:x} (offset: 0x{:x})",
|
|
||||||
addr_unaligned - chunks_addr
|
|
||||||
);
|
|
||||||
log::trace!("Addr: 0x{addr:x} (offset: 0x{:x})", addr - chunks_addr);
|
|
||||||
let aligned_first_chunk = (addr - chunks_addr) / CHUNK_SIZE;
|
let aligned_first_chunk = (addr - chunks_addr) / CHUNK_SIZE;
|
||||||
assert!(first_chunk <= aligned_first_chunk);
|
assert!(first_chunk <= aligned_first_chunk);
|
||||||
assert!(
|
assert!(
|
||||||
|
@ -158,7 +152,6 @@ impl Heap {
|
||||||
self.allocated_chunks += chunks_needed;
|
self.allocated_chunks += chunks_needed;
|
||||||
|
|
||||||
let ptr: *mut u8 = unsafe { mem::transmute(header.add(1)) };
|
let ptr: *mut u8 = unsafe { mem::transmute(header.add(1)) };
|
||||||
log::trace!("{ptr:p}");
|
|
||||||
// FIXME: zero or scrub memory?
|
// FIXME: zero or scrub memory?
|
||||||
assert!(ptr.is_aligned_to(alignment));
|
assert!(ptr.is_aligned_to(alignment));
|
||||||
NonNull::new(ptr)
|
NonNull::new(ptr)
|
||||||
|
|
|
@ -1,14 +1,14 @@
|
||||||
use core::sync::atomic::AtomicU64;
|
use core::sync::atomic::AtomicU64;
|
||||||
use limine::{LimineMemmapEntry, LimineMemoryMapEntryType, NonNullPtr};
|
use limine::{LimineMemmapEntry, LimineMemoryMapEntryType, NonNullPtr};
|
||||||
use spin::{Mutex, Once};
|
use spin::{Mutex, Once};
|
||||||
use x86_64::{
|
use x86_64::{structures::paging::OffsetPageTable, VirtAddr};
|
||||||
structures::paging::{FrameAllocator, FrameDeallocator, OffsetPageTable, PhysFrame, Size4KiB},
|
use crate::memory::MemoryManager;
|
||||||
PhysAddr, VirtAddr,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub static PAGE_TABLE: Once<Mutex<OffsetPageTable>> = Once::new();
|
pub const PAGE_SIZE: usize = 4096;
|
||||||
pub static FRAME_ALLOC: Once<Mutex<FrameAlloc>> = Once::new();
|
|
||||||
|
pub static MEMORY_MANAGER: Once<Mutex<MemoryManager>> = Once::new();
|
||||||
pub static HHDM_OFFSET: AtomicU64 = AtomicU64::new(0);
|
pub static HHDM_OFFSET: AtomicU64 = AtomicU64::new(0);
|
||||||
|
static PAGE_TABLE: Once<Mutex<OffsetPageTable>> = Once::new();
|
||||||
|
|
||||||
/// Initialise page table
|
/// Initialise page table
|
||||||
pub unsafe fn init_pt(phys_base: VirtAddr) {
|
pub unsafe fn init_pt(phys_base: VirtAddr) {
|
||||||
|
@ -27,45 +27,19 @@ pub unsafe fn init_pt(phys_base: VirtAddr) {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Initialise page frame allocator
|
/// Initialise memory manager
|
||||||
pub unsafe fn init_falloc(mmap: &'static [NonNullPtr<LimineMemmapEntry>]) {
|
pub fn initialize(mmap: &'static [NonNullPtr<LimineMemmapEntry>]) {
|
||||||
log::info!("Initialising frame allocator");
|
let mut memory_manager = MemoryManager::new();
|
||||||
FRAME_ALLOC.call_once(|| Mutex::new(FrameAlloc::new(mmap)));
|
|
||||||
|
for entry in mmap {
|
||||||
|
if entry.typ != LimineMemoryMapEntryType::Usable {
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct FrameAlloc {
|
unsafe {
|
||||||
mmap: &'static [NonNullPtr<LimineMemmapEntry>],
|
memory_manager.add_range(entry.base as usize, entry.len as usize / PAGE_SIZE);
|
||||||
next: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl Send for FrameAlloc {}
|
|
||||||
|
|
||||||
impl FrameAlloc {
|
|
||||||
pub unsafe fn new(mmap: &'static [NonNullPtr<LimineMemmapEntry>]) -> Self {
|
|
||||||
Self { mmap, next: 0 }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn usable_frames(&self) -> impl Iterator<Item = PhysFrame> {
|
|
||||||
self.mmap
|
|
||||||
.iter()
|
|
||||||
.filter(|e| e.typ == LimineMemoryMapEntryType::Usable)
|
|
||||||
.map(|e| e.base..e.base + e.len)
|
|
||||||
.flat_map(|r| r.step_by(4096))
|
|
||||||
.map(PhysAddr::new)
|
|
||||||
.map(PhysFrame::containing_address)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl FrameAllocator<Size4KiB> for FrameAlloc {
|
MEMORY_MANAGER.call_once(|| Mutex::new(memory_manager));
|
||||||
fn allocate_frame(&mut self) -> Option<PhysFrame<Size4KiB>> {
|
|
||||||
let f = self.usable_frames().nth(self.next);
|
|
||||||
self.next += 1;
|
|
||||||
f
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FrameDeallocator<Size4KiB> for FrameAlloc {
|
|
||||||
unsafe fn deallocate_frame(&mut self, frame: PhysFrame<Size4KiB>) {
|
|
||||||
// TODO
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +1,12 @@
|
||||||
|
pub mod memory;
|
||||||
mod gdt;
|
mod gdt;
|
||||||
mod interrupts;
|
mod interrupts;
|
||||||
mod logging;
|
mod logging;
|
||||||
mod memory;
|
|
||||||
|
|
||||||
pub use logging::log;
|
pub use logging::log;
|
||||||
|
pub use memory::PAGE_SIZE;
|
||||||
|
|
||||||
use crate::allocator;
|
use crate::{allocator, arch::memory::MEMORY_MANAGER};
|
||||||
use limine::{
|
use limine::{
|
||||||
LimineHhdmRequest, LimineKernelFileRequest, LimineMemmapRequest, LimineModuleRequest,
|
LimineHhdmRequest, LimineKernelFileRequest, LimineMemmapRequest, LimineModuleRequest,
|
||||||
};
|
};
|
||||||
|
@ -26,8 +27,10 @@ unsafe extern "C" fn _kernel_start() -> ! {
|
||||||
.offset,
|
.offset,
|
||||||
));
|
));
|
||||||
|
|
||||||
|
allocator::init();
|
||||||
|
|
||||||
static MMAP_REQ: LimineMemmapRequest = LimineMemmapRequest::new(0);
|
static MMAP_REQ: LimineMemmapRequest = LimineMemmapRequest::new(0);
|
||||||
memory::init_falloc(
|
memory::initialize(
|
||||||
MMAP_REQ
|
MMAP_REQ
|
||||||
.get_response()
|
.get_response()
|
||||||
.get()
|
.get()
|
||||||
|
@ -35,10 +38,25 @@ unsafe extern "C" fn _kernel_start() -> ! {
|
||||||
.memmap(),
|
.memmap(),
|
||||||
);
|
);
|
||||||
|
|
||||||
allocator::init();
|
|
||||||
gdt::init();
|
gdt::init();
|
||||||
interrupts::init();
|
interrupts::init();
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut mm = MEMORY_MANAGER.get().unwrap().lock();
|
||||||
|
let alloc_0 = mm.allocate_pages(0).unwrap();
|
||||||
|
log::debug!("Addr: {:p}", alloc_0);
|
||||||
|
let alloc_1 = mm.allocate_pages(0).unwrap();
|
||||||
|
log::debug!("Addr: {:p}", alloc_1);
|
||||||
|
mm.deallocate_pages(alloc_0, 0);
|
||||||
|
let alloc_2 = mm.allocate_pages(1).unwrap();
|
||||||
|
log::debug!("Addr: {:p}", alloc_2);
|
||||||
|
mm.deallocate_pages(alloc_1, 0);
|
||||||
|
mm.deallocate_pages(alloc_2, 1);
|
||||||
|
let alloc_3 = mm.allocate_pages(1).unwrap();
|
||||||
|
log::debug!("Addr: {:p}", alloc_3);
|
||||||
|
mm.deallocate_pages(alloc_3, 1);
|
||||||
|
}
|
||||||
|
|
||||||
static KFILE_REQ: LimineKernelFileRequest = LimineKernelFileRequest::new(0);
|
static KFILE_REQ: LimineKernelFileRequest = LimineKernelFileRequest::new(0);
|
||||||
static MOD_REQ: LimineModuleRequest = LimineModuleRequest::new(0);
|
static MOD_REQ: LimineModuleRequest = LimineModuleRequest::new(0);
|
||||||
crate::kmain::kmain(
|
crate::kmain::kmain(
|
||||||
|
|
|
@ -3,6 +3,7 @@
|
||||||
#![feature(
|
#![feature(
|
||||||
abi_x86_interrupt,
|
abi_x86_interrupt,
|
||||||
alloc_error_handler,
|
alloc_error_handler,
|
||||||
|
inline_const,
|
||||||
panic_info_message,
|
panic_info_message,
|
||||||
pointer_is_aligned,
|
pointer_is_aligned,
|
||||||
prelude_import,
|
prelude_import,
|
||||||
|
@ -16,6 +17,7 @@ mod allocator;
|
||||||
mod arch;
|
mod arch;
|
||||||
mod kmain;
|
mod kmain;
|
||||||
mod logger;
|
mod logger;
|
||||||
|
mod memory;
|
||||||
mod task;
|
mod task;
|
||||||
|
|
||||||
use versioning::Version;
|
use versioning::Version;
|
||||||
|
|
77
kernel/src/memory.rs
Normal file
77
kernel/src/memory.rs
Normal file
|
@ -0,0 +1,77 @@
|
||||||
|
//! The Memory Manager
|
||||||
|
|
||||||
|
use alloc::vec::Vec;
|
||||||
|
|
||||||
|
pub use crate::arch::PAGE_SIZE;
|
||||||
|
pub const MAX_ORDER: usize = 10;
|
||||||
|
|
||||||
|
pub struct MemoryManager {
|
||||||
|
free_lists: [Vec<usize>; MAX_ORDER + 1],
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MemoryManager {
|
||||||
|
pub const fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
free_lists: [const { Vec::new() }; MAX_ORDER + 1],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn allocate_pages(&mut self, order: usize) -> Option<*mut u8> {
|
||||||
|
self.get_free_pages(order).map(|addr| addr as *mut u8)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # Safety
|
||||||
|
/// This method assumes that `address` is in range of this allocator
|
||||||
|
pub unsafe fn deallocate_pages(&mut self, address: *mut u8, order: usize) {
|
||||||
|
self.free_lists[order].push(address as usize);
|
||||||
|
self.merge_buddies(order, address as usize)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # Safety
|
||||||
|
/// This method assumes that the given address range,
|
||||||
|
/// a) starts at an address aligned to page boundaries,
|
||||||
|
/// b) are valid free pages not already added,
|
||||||
|
/// FIXME: c) has a multiple of `1 << MAX_ORDER` number of pages
|
||||||
|
pub unsafe fn add_range(&mut self, start_addr: usize, page_count: usize) {
|
||||||
|
for i in 0..page_count / 1024 {
|
||||||
|
self.free_lists[MAX_ORDER].push(start_addr + i * 1024 * PAGE_SIZE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_free_pages(&mut self, order: usize) -> Option<usize> {
|
||||||
|
// We can't get such a page!
|
||||||
|
if order > MAX_ORDER {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.free_lists[order].len() > 0 {
|
||||||
|
return self.free_lists[order].pop();
|
||||||
|
}
|
||||||
|
|
||||||
|
self.get_free_pages(order + 1).map(|addr| {
|
||||||
|
self.free_lists[order].push(addr ^ (PAGE_SIZE << order));
|
||||||
|
addr
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn merge_buddies(&mut self, order: usize, address: usize) {
|
||||||
|
// if we can't have any higher order blocks, we can't merge
|
||||||
|
if order > MAX_ORDER - 1 {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let buddy_address = address ^ (PAGE_SIZE << order);
|
||||||
|
log::debug!("merge buddy: 0x{buddy_address:x}");
|
||||||
|
if let Some(buddy_index) = self.free_lists[order]
|
||||||
|
.iter()
|
||||||
|
.position(|blk| *blk == buddy_address)
|
||||||
|
{
|
||||||
|
self.free_lists[order].pop();
|
||||||
|
self.free_lists[order].remove(buddy_index);
|
||||||
|
let new_address = address.min(buddy_address);
|
||||||
|
log::debug!("Merging 0x{address:x} @ {order} with 0x{buddy_address:x} at 0x{new_address:x}");
|
||||||
|
self.free_lists[order + 1].push(new_address);
|
||||||
|
self.merge_buddies(order + 1, new_address)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in a new issue