From 59c34f584910aa0921e0fbece32f4cb4a23cd113 Mon Sep 17 00:00:00 2001 From: Asya Date: Mon, 23 Jan 2023 15:06:05 +0300 Subject: [PATCH] Buddy page allocator Soon this will turn into an actual memory manager --- kernel/src/allocator.rs | 7 --- kernel/src/arch/x86_64/memory.rs | 60 +++++++------------------ kernel/src/arch/x86_64/mod.rs | 26 +++++++++-- kernel/src/kmain.rs | 2 +- kernel/src/lib.rs | 2 + kernel/src/memory.rs | 77 ++++++++++++++++++++++++++++++++ 6 files changed, 119 insertions(+), 55 deletions(-) create mode 100644 kernel/src/memory.rs diff --git a/kernel/src/allocator.rs b/kernel/src/allocator.rs index 50fb3292..7f06e746 100644 --- a/kernel/src/allocator.rs +++ b/kernel/src/allocator.rs @@ -123,7 +123,6 @@ impl Heap { let size = size + mem::size_of::
(); let chunks_needed = (size + CHUNK_SIZE - 1) / CHUNK_SIZE; let chunk_alignment = (alignment + CHUNK_SIZE - 1) / CHUNK_SIZE; - log::debug!("size: {size} chunks: {chunks_needed} align: {chunk_alignment}"); if chunks_needed + chunk_alignment > self.free_chunks() { return None; @@ -137,11 +136,6 @@ impl Heap { // Align the starting address and verify that we haven't gone outside the calculated free area let addr = addr_unaligned + alignment - (addr_unaligned + mem::size_of::
()) % alignment; - log::debug!( - "Addr unaligned: 0x{addr_unaligned:x} (offset: 0x{:x})", - addr_unaligned - chunks_addr - ); - log::trace!("Addr: 0x{addr:x} (offset: 0x{:x})", addr - chunks_addr); let aligned_first_chunk = (addr - chunks_addr) / CHUNK_SIZE; assert!(first_chunk <= aligned_first_chunk); assert!( @@ -158,7 +152,6 @@ impl Heap { self.allocated_chunks += chunks_needed; let ptr: *mut u8 = unsafe { mem::transmute(header.add(1)) }; - log::trace!("{ptr:p}"); // FIXME: zero or scrub memory? assert!(ptr.is_aligned_to(alignment)); NonNull::new(ptr) diff --git a/kernel/src/arch/x86_64/memory.rs b/kernel/src/arch/x86_64/memory.rs index 05d02452..e355be35 100644 --- a/kernel/src/arch/x86_64/memory.rs +++ b/kernel/src/arch/x86_64/memory.rs @@ -1,14 +1,14 @@ use core::sync::atomic::AtomicU64; use limine::{LimineMemmapEntry, LimineMemoryMapEntryType, NonNullPtr}; use spin::{Mutex, Once}; -use x86_64::{ - structures::paging::{FrameAllocator, FrameDeallocator, OffsetPageTable, PhysFrame, Size4KiB}, - PhysAddr, VirtAddr, -}; +use x86_64::{structures::paging::OffsetPageTable, VirtAddr}; +use crate::memory::MemoryManager; -pub static PAGE_TABLE: Once> = Once::new(); -pub static FRAME_ALLOC: Once> = Once::new(); +pub const PAGE_SIZE: usize = 4096; + +pub static MEMORY_MANAGER: Once> = Once::new(); pub static HHDM_OFFSET: AtomicU64 = AtomicU64::new(0); +static PAGE_TABLE: Once> = Once::new(); /// Initialise page table pub unsafe fn init_pt(phys_base: VirtAddr) { @@ -27,45 +27,19 @@ pub unsafe fn init_pt(phys_base: VirtAddr) { }); } -/// Initialise page frame allocator -pub unsafe fn init_falloc(mmap: &'static [NonNullPtr]) { - log::info!("Initialising frame allocator"); - FRAME_ALLOC.call_once(|| Mutex::new(FrameAlloc::new(mmap))); -} +/// Initialise memory manager +pub fn initialize(mmap: &'static [NonNullPtr]) { + let mut memory_manager = MemoryManager::new(); -pub struct FrameAlloc { - mmap: &'static [NonNullPtr], - next: usize, -} + for entry in mmap { + if entry.typ != LimineMemoryMapEntryType::Usable { + continue; + } -unsafe impl Send for FrameAlloc {} - -impl FrameAlloc { - pub unsafe fn new(mmap: &'static [NonNullPtr]) -> Self { - Self { mmap, next: 0 } + unsafe { + memory_manager.add_range(entry.base as usize, entry.len as usize / PAGE_SIZE); + } } - fn usable_frames(&self) -> impl Iterator { - self.mmap - .iter() - .filter(|e| e.typ == LimineMemoryMapEntryType::Usable) - .map(|e| e.base..e.base + e.len) - .flat_map(|r| r.step_by(4096)) - .map(PhysAddr::new) - .map(PhysFrame::containing_address) - } -} - -unsafe impl FrameAllocator for FrameAlloc { - fn allocate_frame(&mut self) -> Option> { - let f = self.usable_frames().nth(self.next); - self.next += 1; - f - } -} - -impl FrameDeallocator for FrameAlloc { - unsafe fn deallocate_frame(&mut self, frame: PhysFrame) { - // TODO - } + MEMORY_MANAGER.call_once(|| Mutex::new(memory_manager)); } diff --git a/kernel/src/arch/x86_64/mod.rs b/kernel/src/arch/x86_64/mod.rs index 17ad0e5f..ac881f94 100644 --- a/kernel/src/arch/x86_64/mod.rs +++ b/kernel/src/arch/x86_64/mod.rs @@ -1,11 +1,12 @@ +pub mod memory; mod gdt; mod interrupts; mod logging; -mod memory; pub use logging::log; +pub use memory::PAGE_SIZE; -use crate::allocator; +use crate::{allocator, arch::memory::MEMORY_MANAGER}; use limine::{ LimineHhdmRequest, LimineKernelFileRequest, LimineMemmapRequest, LimineModuleRequest, }; @@ -26,8 +27,10 @@ unsafe extern "C" fn _kernel_start() -> ! { .offset, )); + allocator::init(); + static MMAP_REQ: LimineMemmapRequest = LimineMemmapRequest::new(0); - memory::init_falloc( + memory::initialize( MMAP_REQ .get_response() .get() @@ -35,10 +38,25 @@ unsafe extern "C" fn _kernel_start() -> ! { .memmap(), ); - allocator::init(); gdt::init(); interrupts::init(); + { + let mut mm = MEMORY_MANAGER.get().unwrap().lock(); + let alloc_0 = mm.allocate_pages(0).unwrap(); + log::debug!("Addr: {:p}", alloc_0); + let alloc_1 = mm.allocate_pages(0).unwrap(); + log::debug!("Addr: {:p}", alloc_1); + mm.deallocate_pages(alloc_0, 0); + let alloc_2 = mm.allocate_pages(1).unwrap(); + log::debug!("Addr: {:p}", alloc_2); + mm.deallocate_pages(alloc_1, 0); + mm.deallocate_pages(alloc_2, 1); + let alloc_3 = mm.allocate_pages(1).unwrap(); + log::debug!("Addr: {:p}", alloc_3); + mm.deallocate_pages(alloc_3, 1); + } + static KFILE_REQ: LimineKernelFileRequest = LimineKernelFileRequest::new(0); static MOD_REQ: LimineModuleRequest = LimineModuleRequest::new(0); crate::kmain::kmain( diff --git a/kernel/src/kmain.rs b/kernel/src/kmain.rs index cb427af6..9d562f97 100644 --- a/kernel/src/kmain.rs +++ b/kernel/src/kmain.rs @@ -7,7 +7,7 @@ pub fn kmain(cmdline: &str, initrd: Option<&'static [u8]>) -> ! { if cmdline.contains("baka=9") { let _ = crate::arch::log(format_args!(include_str!("../data/⑨. バカ"))); } - + log::info!("Cmdline: \"{cmdline}\""); let initrd = initrd.expect("no initrd found"); diff --git a/kernel/src/lib.rs b/kernel/src/lib.rs index 605faf78..f02f744b 100644 --- a/kernel/src/lib.rs +++ b/kernel/src/lib.rs @@ -3,6 +3,7 @@ #![feature( abi_x86_interrupt, alloc_error_handler, + inline_const, panic_info_message, pointer_is_aligned, prelude_import, @@ -16,6 +17,7 @@ mod allocator; mod arch; mod kmain; mod logger; +mod memory; mod task; use versioning::Version; diff --git a/kernel/src/memory.rs b/kernel/src/memory.rs new file mode 100644 index 00000000..544a17b0 --- /dev/null +++ b/kernel/src/memory.rs @@ -0,0 +1,77 @@ +//! The Memory Manager + +use alloc::vec::Vec; + +pub use crate::arch::PAGE_SIZE; +pub const MAX_ORDER: usize = 10; + +pub struct MemoryManager { + free_lists: [Vec; MAX_ORDER + 1], +} + +impl MemoryManager { + pub const fn new() -> Self { + Self { + free_lists: [const { Vec::new() }; MAX_ORDER + 1], + } + } + + pub fn allocate_pages(&mut self, order: usize) -> Option<*mut u8> { + self.get_free_pages(order).map(|addr| addr as *mut u8) + } + + /// # Safety + /// This method assumes that `address` is in range of this allocator + pub unsafe fn deallocate_pages(&mut self, address: *mut u8, order: usize) { + self.free_lists[order].push(address as usize); + self.merge_buddies(order, address as usize) + } + + /// # Safety + /// This method assumes that the given address range, + /// a) starts at an address aligned to page boundaries, + /// b) are valid free pages not already added, + /// FIXME: c) has a multiple of `1 << MAX_ORDER` number of pages + pub unsafe fn add_range(&mut self, start_addr: usize, page_count: usize) { + for i in 0..page_count / 1024 { + self.free_lists[MAX_ORDER].push(start_addr + i * 1024 * PAGE_SIZE); + } + } + + fn get_free_pages(&mut self, order: usize) -> Option { + // We can't get such a page! + if order > MAX_ORDER { + return None; + } + + if self.free_lists[order].len() > 0 { + return self.free_lists[order].pop(); + } + + self.get_free_pages(order + 1).map(|addr| { + self.free_lists[order].push(addr ^ (PAGE_SIZE << order)); + addr + }) + } + + fn merge_buddies(&mut self, order: usize, address: usize) { + // if we can't have any higher order blocks, we can't merge + if order > MAX_ORDER - 1 { + return; + } + + let buddy_address = address ^ (PAGE_SIZE << order); + log::debug!("merge buddy: 0x{buddy_address:x}"); + if let Some(buddy_index) = self.free_lists[order] + .iter() + .position(|blk| *blk == buddy_address) + { + self.free_lists[order].pop(); + self.free_lists[order].remove(buddy_index); + let new_address = address.min(buddy_address); + log::debug!("Merging 0x{address:x} @ {order} with 0x{buddy_address:x} at 0x{new_address:x}"); + self.free_lists[order + 1].push(new_address); + self.merge_buddies(order + 1, new_address) + } + } +}