Buddy page allocator

Soon this will turn into an actual memory manager
memory-manager
Asya 2023-01-23 15:06:05 +03:00
parent 1013e86b61
commit a5fc0a162c
Signed by: asya
GPG Key ID: 4679BF7DCC044783
6 changed files with 119 additions and 55 deletions

View File

@ -123,7 +123,6 @@ impl Heap {
let size = size + mem::size_of::<Header>();
let chunks_needed = (size + CHUNK_SIZE - 1) / CHUNK_SIZE;
let chunk_alignment = (alignment + CHUNK_SIZE - 1) / CHUNK_SIZE;
log::debug!("size: {size} chunks: {chunks_needed} align: {chunk_alignment}");
if chunks_needed + chunk_alignment > self.free_chunks() {
return None;
@ -137,11 +136,6 @@ impl Heap {
// Align the starting address and verify that we haven't gone outside the calculated free area
let addr =
addr_unaligned + alignment - (addr_unaligned + mem::size_of::<Header>()) % alignment;
log::debug!(
"Addr unaligned: 0x{addr_unaligned:x} (offset: 0x{:x})",
addr_unaligned - chunks_addr
);
log::trace!("Addr: 0x{addr:x} (offset: 0x{:x})", addr - chunks_addr);
let aligned_first_chunk = (addr - chunks_addr) / CHUNK_SIZE;
assert!(first_chunk <= aligned_first_chunk);
assert!(
@ -158,7 +152,6 @@ impl Heap {
self.allocated_chunks += chunks_needed;
let ptr: *mut u8 = unsafe { mem::transmute(header.add(1)) };
log::trace!("{ptr:p}");
// FIXME: zero or scrub memory?
assert!(ptr.is_aligned_to(alignment));
NonNull::new(ptr)

View File

@ -1,14 +1,14 @@
use core::sync::atomic::AtomicU64;
use limine::{LimineMemmapEntry, LimineMemoryMapEntryType, NonNullPtr};
use spin::{Mutex, Once};
use x86_64::{
structures::paging::{FrameAllocator, FrameDeallocator, OffsetPageTable, PhysFrame, Size4KiB},
PhysAddr, VirtAddr,
};
use x86_64::{structures::paging::OffsetPageTable, VirtAddr};
use crate::memory::MemoryManager;
pub static PAGE_TABLE: Once<Mutex<OffsetPageTable>> = Once::new();
pub static FRAME_ALLOC: Once<Mutex<FrameAlloc>> = Once::new();
pub const PAGE_SIZE: usize = 4096;
pub static MEMORY_MANAGER: Once<Mutex<MemoryManager>> = Once::new();
pub static HHDM_OFFSET: AtomicU64 = AtomicU64::new(0);
static PAGE_TABLE: Once<Mutex<OffsetPageTable>> = Once::new();
/// Initialise page table
pub unsafe fn init_pt(phys_base: VirtAddr) {
@ -27,45 +27,19 @@ pub unsafe fn init_pt(phys_base: VirtAddr) {
});
}
/// Initialise page frame allocator
pub unsafe fn init_falloc(mmap: &'static [NonNullPtr<LimineMemmapEntry>]) {
log::info!("Initialising frame allocator");
FRAME_ALLOC.call_once(|| Mutex::new(FrameAlloc::new(mmap)));
}
/// Initialise memory manager
pub fn initialize(mmap: &'static [NonNullPtr<LimineMemmapEntry>]) {
let mut memory_manager = MemoryManager::new();
pub struct FrameAlloc {
mmap: &'static [NonNullPtr<LimineMemmapEntry>],
next: usize,
}
for entry in mmap {
if entry.typ != LimineMemoryMapEntryType::Usable {
continue;
}
unsafe impl Send for FrameAlloc {}
impl FrameAlloc {
pub unsafe fn new(mmap: &'static [NonNullPtr<LimineMemmapEntry>]) -> Self {
Self { mmap, next: 0 }
unsafe {
memory_manager.add_range(entry.base as usize, entry.len as usize / PAGE_SIZE);
}
}
fn usable_frames(&self) -> impl Iterator<Item = PhysFrame> {
self.mmap
.iter()
.filter(|e| e.typ == LimineMemoryMapEntryType::Usable)
.map(|e| e.base..e.base + e.len)
.flat_map(|r| r.step_by(4096))
.map(PhysAddr::new)
.map(PhysFrame::containing_address)
}
}
unsafe impl FrameAllocator<Size4KiB> for FrameAlloc {
fn allocate_frame(&mut self) -> Option<PhysFrame<Size4KiB>> {
let f = self.usable_frames().nth(self.next);
self.next += 1;
f
}
}
impl FrameDeallocator<Size4KiB> for FrameAlloc {
unsafe fn deallocate_frame(&mut self, frame: PhysFrame<Size4KiB>) {
// TODO
}
MEMORY_MANAGER.call_once(|| Mutex::new(memory_manager));
}

View File

@ -1,11 +1,12 @@
pub mod memory;
mod gdt;
mod interrupts;
mod logging;
mod memory;
pub use logging::log;
pub use memory::PAGE_SIZE;
use crate::allocator;
use crate::{allocator, arch::memory::MEMORY_MANAGER};
use limine::{
LimineHhdmRequest, LimineKernelFileRequest, LimineMemmapRequest, LimineModuleRequest,
};
@ -26,8 +27,10 @@ unsafe extern "C" fn _kernel_start() -> ! {
.offset,
));
allocator::init();
static MMAP_REQ: LimineMemmapRequest = LimineMemmapRequest::new(0);
memory::init_falloc(
memory::initialize(
MMAP_REQ
.get_response()
.get()
@ -35,10 +38,25 @@ unsafe extern "C" fn _kernel_start() -> ! {
.memmap(),
);
allocator::init();
gdt::init();
interrupts::init();
{
let mut mm = MEMORY_MANAGER.get().unwrap().lock();
let alloc_0 = mm.allocate_pages(0).unwrap();
log::debug!("Addr: {:p}", alloc_0);
let alloc_1 = mm.allocate_pages(0).unwrap();
log::debug!("Addr: {:p}", alloc_1);
mm.deallocate_pages(alloc_0, 0);
let alloc_2 = mm.allocate_pages(1).unwrap();
log::debug!("Addr: {:p}", alloc_2);
mm.deallocate_pages(alloc_1, 0);
mm.deallocate_pages(alloc_2, 1);
let alloc_3 = mm.allocate_pages(1).unwrap();
log::debug!("Addr: {:p}", alloc_3);
mm.deallocate_pages(alloc_3, 1);
}
static KFILE_REQ: LimineKernelFileRequest = LimineKernelFileRequest::new(0);
static MOD_REQ: LimineModuleRequest = LimineModuleRequest::new(0);
crate::kmain::kmain(

View File

@ -7,7 +7,7 @@ pub fn kmain(cmdline: &str, initrd: Option<&'static [u8]>) -> ! {
if cmdline.contains("baka=9") {
let _ = crate::arch::log(format_args!(include_str!("../data/⑨. バカ")));
}
log::info!("Cmdline: \"{cmdline}\"");
let initrd = initrd.expect("no initrd found");

View File

@ -3,6 +3,7 @@
#![feature(
abi_x86_interrupt,
alloc_error_handler,
inline_const,
panic_info_message,
pointer_is_aligned,
prelude_import,
@ -16,6 +17,7 @@ mod allocator;
mod arch;
mod kmain;
mod logger;
mod memory;
mod task;
use versioning::Version;

77
kernel/src/memory.rs Normal file
View File

@ -0,0 +1,77 @@
//! The Memory Manager
use alloc::vec::Vec;
pub use crate::arch::PAGE_SIZE;
pub const MAX_ORDER: usize = 10;
pub struct MemoryManager {
free_lists: [Vec<usize>; MAX_ORDER + 1],
}
impl MemoryManager {
pub const fn new() -> Self {
Self {
free_lists: [const { Vec::new() }; MAX_ORDER + 1],
}
}
pub fn allocate_pages(&mut self, order: usize) -> Option<*mut u8> {
self.get_free_pages(order).map(|addr| addr as *mut u8)
}
/// # Safety
/// This method assumes that `address` is in range of this allocator
pub unsafe fn deallocate_pages(&mut self, address: *mut u8, order: usize) {
self.free_lists[order].push(address as usize);
self.merge_buddies(order, address as usize)
}
/// # Safety
/// This method assumes that the given address range,
/// a) starts at an address aligned to page boundaries,
/// b) are valid free pages not already added,
/// FIXME: c) has a multiple of `1 << MAX_ORDER` number of pages
pub unsafe fn add_range(&mut self, start_addr: usize, page_count: usize) {
for i in 0..page_count / 1024 {
self.free_lists[MAX_ORDER].push(start_addr + i * 1024 * PAGE_SIZE);
}
}
fn get_free_pages(&mut self, order: usize) -> Option<usize> {
// We can't get such a page!
if order > MAX_ORDER {
return None;
}
if self.free_lists[order].len() > 0 {
return self.free_lists[order].pop();
}
self.get_free_pages(order + 1).map(|addr| {
self.free_lists[order].push(addr ^ (PAGE_SIZE << order));
addr
})
}
fn merge_buddies(&mut self, order: usize, address: usize) {
// if we can't have any higher order blocks, we can't merge
if order > MAX_ORDER - 1 {
return;
}
let buddy_address = address ^ (PAGE_SIZE << order);
log::debug!("merge buddy: 0x{buddy_address:x}");
if let Some(buddy_index) = self.free_lists[order]
.iter()
.position(|blk| *blk == buddy_address)
{
self.free_lists[order].pop();
self.free_lists[order].remove(buddy_index);
let new_address = address.min(buddy_address);
log::debug!("Merging 0x{address:x} @ {order} with 0x{buddy_address:x} at 0x{new_address:x}");
self.free_lists[order + 1].push(new_address);
self.merge_buddies(order + 1, new_address)
}
}
}