akern-gkgoat-fork/kernel/src/arch/x86_64/memory.rs

55 lines
1.7 KiB
Rust

use crate::memory::{MemoryManager, MAX_ORDER};
use core::sync::atomic::AtomicU64;
use limine::{MemmapEntry, MemoryMapEntryType, NonNullPtr};
use spin::{Mutex, Once};
use x86_64::{structures::paging::OffsetPageTable, VirtAddr};
pub const PAGE_SIZE: usize = 4096;
pub static MEMORY_MANAGER: Once<Mutex<MemoryManager>> = Once::new();
pub static HHDM_OFFSET: AtomicU64 = AtomicU64::new(0);
static PAGE_TABLE: Once<Mutex<OffsetPageTable>> = Once::new();
/// Initialise page table
pub unsafe fn init_pt(phys_base: VirtAddr) {
log::info!("Retrieving page table");
HHDM_OFFSET.store(phys_base.as_u64(), core::sync::atomic::Ordering::Relaxed);
PAGE_TABLE.call_once(|| {
Mutex::new(OffsetPageTable::new(
&mut *((phys_base
+ x86_64::registers::control::Cr3::read()
.0
.start_address()
.as_u64())
.as_mut_ptr()),
phys_base,
))
});
}
/// Initialise memory manager
pub fn initialize(mmap: &'static [NonNullPtr<MemmapEntry>]) {
let mut memory_manager = MemoryManager::new();
for entry in mmap {
if entry.typ != MemoryMapEntryType::Usable {
continue;
}
let alignment = PAGE_SIZE << MAX_ORDER;
let start_addr_unaligned = entry.base as usize;
let diff = alignment - start_addr_unaligned % alignment;
if diff > entry.len as usize {
continue;
}
let start_addr = start_addr_unaligned + diff;
let page_count = (entry.len as usize - diff) / PAGE_SIZE;
unsafe {
memory_manager.add_range(start_addr.into(), page_count);
}
}
MEMORY_MANAGER.call_once(|| Mutex::new(memory_manager));
}