1
0
Fork 0
forked from AbleOS/ableos
ableos/kernel/src/arch/x86_64/memory.rs

57 lines
1.7 KiB
Rust
Raw Normal View History

use {
crate::memory::{MemoryManager, MAX_ORDER},
core::sync::atomic::AtomicU64,
limine::{MemmapEntry, MemoryMapEntryType, NonNullPtr},
spin::{Mutex, Once},
x86_64::{structures::paging::OffsetPageTable, VirtAddr},
};
2023-03-30 16:43:04 -05:00
pub const PAGE_SIZE: usize = 4096;
pub static MEMORY_MANAGER: Once<Mutex<MemoryManager>> = Once::new();
pub static HHDM_OFFSET: AtomicU64 = AtomicU64::new(0);
static PAGE_TABLE: Once<Mutex<OffsetPageTable>> = Once::new();
/// Initialise page table
pub unsafe fn init_pt(phys_base: VirtAddr) {
log::debug!("Retrieving page table");
2023-03-30 16:43:04 -05:00
HHDM_OFFSET.store(phys_base.as_u64(), core::sync::atomic::Ordering::Relaxed);
PAGE_TABLE.call_once(|| {
Mutex::new(OffsetPageTable::new(
&mut *((phys_base
+ x86_64::registers::control::Cr3::read()
.0
.start_address()
.as_u64())
.as_mut_ptr()),
phys_base,
))
});
}
/// Initialise memory manager
pub fn initialize(mmap: &'static [NonNullPtr<MemmapEntry>]) {
let mut memory_manager = MemoryManager::new();
for entry in mmap {
if entry.typ != MemoryMapEntryType::Usable {
continue;
}
let alignment = PAGE_SIZE << MAX_ORDER;
let start_addr_unaligned = entry.base as usize;
let diff = alignment - start_addr_unaligned % alignment;
if diff > entry.len as usize {
continue;
}
let start_addr = start_addr_unaligned + diff;
let page_count = (entry.len as usize - diff) / PAGE_SIZE;
unsafe {
memory_manager.add_range(start_addr.into(), page_count);
}
}
MEMORY_MANAGER.call_once(|| Mutex::new(memory_manager));
}