VirtualAddress/PhysicalAddress types

This commit is contained in:
Asya 2023-02-13 23:22:42 +03:00
parent d9fb718b86
commit 04c6ee11c3
8 changed files with 195 additions and 77 deletions

12
Cargo.lock generated
View file

@ -179,6 +179,17 @@ dependencies = [
"syn",
]
[[package]]
name = "derive_more"
version = "0.99.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "env_logger"
version = "0.10.0"
@ -326,6 +337,7 @@ name = "kernel"
version = "0.2.0"
dependencies = [
"crossbeam-queue",
"derive_more",
"limine",
"log",
"sbi",

View file

@ -15,6 +15,22 @@ version = "0.3"
default-features = false
features = ["alloc"]
[dependencies.derive_more]
version = "0.99"
default-features = false
features = [
"add",
"add_assign",
"constructor",
"display",
"from",
"into",
"mul",
"mul_assign",
"not",
"sum",
]
[target.'cfg(target_arch = "x86_64")'.dependencies]
limine = { version = "0.1", git = "https://github.com/limine-bootloader/limine-rs" }
x86_64 = "0.14"

View file

@ -58,10 +58,10 @@ unsafe impl GlobalAlloc for Allocator {
#[global_allocator]
static ALLOCATOR: Allocator = Allocator(Mutex::new(None));
// FIXME: umm is `memory` VirtualAddress or PhysicalAddress? both?
pub fn init(memory: *mut u8, memory_size: usize) {
log::info!("Initialising kernel heap allocator");
*ALLOCATOR.0.lock() =
Some(unsafe { Heap::new(memory, memory_size) });
*ALLOCATOR.0.lock() = Some(unsafe { Heap::new(memory, memory_size) });
}
// FIXME: these are arch-specific

View file

@ -1,10 +1,11 @@
mod memory;
use core::{arch::{asm, global_asm}, fmt::Write};
use alloc::vec;
use sbi::system_reset::{ResetType, ResetReason, system_reset};
use spin::{Mutex, Once};
use uart_16550::MmioSerialPort;
use crate::{allocator, memory::MemoryManager};
use crate::{allocator, memory::PhysicalAddress};
global_asm!(include_str!("entry.s"));
global_asm!(include_str!("memory_regions.s"));
@ -12,72 +13,41 @@ global_asm!(include_str!("memory_regions.s"));
pub const PAGE_SIZE: usize = 4096;
extern {
static TEXT_START: usize;
static TEXT_END: usize;
static TEXT_START: PhysicalAddress;
static TEXT_END: PhysicalAddress;
static RODATA_START: usize;
static RODATA_END: usize;
static RODATA_START: PhysicalAddress;
static RODATA_END: PhysicalAddress;
static DATA_START: usize;
static DATA_END: usize;
static DATA_START: PhysicalAddress;
static DATA_END: PhysicalAddress;
static SDATA_START: usize;
static SDATA_END: usize;
static SDATA_START: PhysicalAddress;
static SDATA_END: PhysicalAddress;
static BSS_START: usize;
static BSS_END: usize;
static BSS_START: PhysicalAddress;
static BSS_END: PhysicalAddress;
static INITIAL_KERNEL_HEAP_START: *mut u8;
static INITIAL_KERNEL_HEAP_START: PhysicalAddress;
static INITIAL_KERNEL_HEAP_SIZE: usize;
static USABLE_MEMORY_START: usize;
static USABLE_MEMORY_START: PhysicalAddress;
static USABLE_MEMORY_SIZE: usize;
}
static SERIAL_CONSOLE: Once<Mutex<MmioSerialPort>> = Once::new();
mod memory {
use spin::{Mutex, Once};
use crate::memory::MemoryManager;
pub static MEMORY_MANAGER: Once<Mutex<MemoryManager>> = Once::new();
}
#[no_mangle]
extern fn _kernel_start() -> ! {
unsafe extern fn _kernel_start() -> ! {
SERIAL_CONSOLE.call_once(|| Mutex::new(unsafe { MmioSerialPort::new(0x1000_0000) }));
crate::logger::init().expect("failed to set logger");
log::info!("Initialising AKern {}", crate::VERSION);
unsafe {
allocator::init(INITIAL_KERNEL_HEAP_START, INITIAL_KERNEL_HEAP_SIZE);
}
allocator::init(INITIAL_KERNEL_HEAP_START.as_mut_ptr::<u8>(), INITIAL_KERNEL_HEAP_SIZE);
memory::init(USABLE_MEMORY_START.into(), USABLE_MEMORY_SIZE / PAGE_SIZE);
let mut memory_manager = MemoryManager::new();
unsafe {
log::debug!("USABLE_MEMORY_START = 0x{USABLE_MEMORY_START:x}");
memory_manager.add_range(USABLE_MEMORY_START, USABLE_MEMORY_SIZE / PAGE_SIZE);
}
memory::MEMORY_MANAGER.call_once(|| Mutex::new(memory_manager));
unsafe {
let mut mm = memory::MEMORY_MANAGER.get().unwrap().lock();
let alloc_0 = mm.allocate_pages(0).unwrap();
log::debug!("Addr: {:p}", alloc_0);
let alloc_1 = mm.allocate_pages(0).unwrap();
log::debug!("Addr: {:p}", alloc_1);
mm.deallocate_pages(alloc_0, 0);
let alloc_2 = mm.allocate_pages(1).unwrap();
log::debug!("Addr: {:p}", alloc_2);
mm.deallocate_pages(alloc_1, 0);
mm.deallocate_pages(alloc_2, 1);
let alloc_3 = mm.allocate_pages(1).unwrap();
log::debug!("Addr: {:p}", alloc_3);
mm.deallocate_pages(alloc_3, 1);
}
system_reset(ResetType::Shutdown, ResetReason::NoReason).unwrap();
loop {}
#[allow(unreachable_code)]
match system_reset(ResetType::Shutdown, ResetReason::NoReason).unwrap() {}
}
/// Spin loop

View file

@ -46,7 +46,7 @@ pub fn initialize(mmap: &'static [NonNullPtr<LimineMemmapEntry>]) {
let page_count = (entry.len as usize - diff) / PAGE_SIZE;
unsafe {
memory_manager.add_range(start_addr, page_count);
memory_manager.add_range(start_addr.into(), page_count);
}
}

View file

@ -53,16 +53,16 @@ unsafe extern "C" fn _kernel_start() -> ! {
{
let mut mm = MEMORY_MANAGER.get().unwrap().lock();
let alloc_0 = mm.allocate_pages(0).unwrap();
log::debug!("Addr: {:p}", alloc_0);
log::debug!("Addr: {alloc_0}");
let alloc_1 = mm.allocate_pages(0).unwrap();
log::debug!("Addr: {:p}", alloc_1);
log::debug!("Addr: {alloc_1}");
mm.deallocate_pages(alloc_0, 0);
let alloc_2 = mm.allocate_pages(1).unwrap();
log::debug!("Addr: {:p}", alloc_2);
log::debug!("Addr: {alloc_2}");
mm.deallocate_pages(alloc_1, 0);
mm.deallocate_pages(alloc_2, 1);
let alloc_3 = mm.allocate_pages(1).unwrap();
log::debug!("Addr: {:p}", alloc_3);
log::debug!("Addr: {alloc_3}");
mm.deallocate_pages(alloc_3, 1);
}

View file

@ -1,12 +1,125 @@
//! The Memory Manager
use alloc::collections::VecDeque;
use derive_more::*;
pub use crate::arch::PAGE_SIZE;
pub const MAX_ORDER: usize = 10;
#[repr(transparent)]
#[derive(
Add,
AddAssign,
Binary,
BitAnd,
BitAndAssign,
BitOr,
BitOrAssign,
BitXor,
BitXorAssign,
Clone,
Constructor,
Copy,
Display,
Div,
DivAssign,
Eq,
From,
LowerHex,
Mul,
MulAssign,
Not,
Octal,
Ord,
PartialEq,
PartialOrd,
Rem,
RemAssign,
Shl,
ShlAssign,
Shr,
ShrAssign,
Sub,
SubAssign,
Sum,
UpperHex,
)]
#[display(fmt = "0x{:x}", _0)]
#[from(forward)]
pub struct VirtualAddress(usize);
impl VirtualAddress {
pub fn as_addr(&self) -> usize {
self.0
}
pub fn as_ptr<T>(&self) -> *const T {
self.0 as _
}
pub fn as_mut_ptr<T>(&mut self) -> *mut T {
self.0 as _
}
}
#[repr(transparent)]
#[derive(
Add,
AddAssign,
Binary,
BitAnd,
BitAndAssign,
BitOr,
BitOrAssign,
BitXor,
BitXorAssign,
Clone,
Constructor,
Copy,
Display,
Div,
DivAssign,
Eq,
From,
LowerHex,
Mul,
MulAssign,
Not,
Octal,
Ord,
PartialEq,
PartialOrd,
Rem,
RemAssign,
Shl,
ShlAssign,
Shr,
ShrAssign,
Sub,
SubAssign,
Sum,
UpperHex,
)]
#[display(fmt = "0x{:x}", _0)]
#[from(forward)]
pub struct PhysicalAddress(usize);
impl PhysicalAddress {
pub fn as_addr(&self) -> usize {
self.0
}
pub fn as_ptr<T>(&self) -> *const T {
self.0 as _
}
pub fn as_mut_ptr<T>(&self) -> *mut T {
self.0 as _
}
}
pub struct MemoryManager {
free_lists: [VecDeque<usize>; MAX_ORDER + 1],
free_lists: [VecDeque<PhysicalAddress>; MAX_ORDER + 1],
}
impl MemoryManager {
@ -16,23 +129,26 @@ impl MemoryManager {
}
}
pub fn allocate_pages(&mut self, order: usize) -> Option<*mut u8> {
self.get_free_pages(order).map(|addr| addr as *mut u8)
// FIXME: this method should take a length and turn that into an order
pub fn allocate_pages(&mut self, order: usize) -> Option<PhysicalAddress> {
self.get_free_pages(order)
}
pub fn zallocate_pages(&mut self, order: usize) -> Option<*mut u8> {
let ptr = self.allocate_pages(order)?;
// FIXME: this method should take a length and turn that into an order
pub fn zallocate_pages(&mut self, order: usize) -> Option<PhysicalAddress> {
let alloc = self.allocate_pages(order)?;
unsafe {
ptr.write_bytes(0, PAGE_SIZE << order);
alloc.as_mut_ptr::<u8>().write_bytes(0, PAGE_SIZE << order);
}
Some(ptr)
Some(alloc)
}
/// # Safety
/// This method assumes that `address` is in range of this allocator
pub unsafe fn deallocate_pages(&mut self, address: *mut u8, order: usize) {
self.free_lists[order].push_front(address as usize);
self.merge_buddies(order, address as usize)
// FIXME: this method should take a length and turn that into an order
pub unsafe fn deallocate_pages(&mut self, address: PhysicalAddress, order: usize) {
self.free_lists[order].push_front(address);
self.merge_buddies(order, address)
}
/// # Safety
@ -40,13 +156,13 @@ impl MemoryManager {
/// a) starts and ends at an address aligned to page boundaries,
/// b) are valid free pages not already added,
/// FIXME: c) starts and ends at an address aligned to `PAGE_SIZE << MAX_ORDER`
pub unsafe fn add_range(&mut self, start_addr: usize, page_count: usize) {
pub unsafe fn add_range(&mut self, start_addr: PhysicalAddress, page_count: usize) {
for i in 0..page_count / 1024 {
self.free_lists[MAX_ORDER].push_back(start_addr + i * 1024 * PAGE_SIZE);
self.free_lists[MAX_ORDER].push_back(start_addr + (i * 1024 * PAGE_SIZE).into());
}
}
fn get_free_pages(&mut self, order: usize) -> Option<usize> {
fn get_free_pages(&mut self, order: usize) -> Option<PhysicalAddress> {
// We can't get such a page!
if order > MAX_ORDER {
return None;
@ -57,18 +173,18 @@ impl MemoryManager {
}
self.get_free_pages(order + 1).map(|addr| {
self.free_lists[order].push_front(addr ^ (PAGE_SIZE << order));
self.free_lists[order].push_front(addr ^ (PAGE_SIZE << order).into());
addr
})
}
fn merge_buddies(&mut self, order: usize, address: usize) {
fn merge_buddies(&mut self, order: usize, address: PhysicalAddress) {
// if we can't have any higher order blocks, we can't merge
if order > MAX_ORDER - 1 {
return;
}
let buddy_address = address ^ (PAGE_SIZE << order);
let buddy_address = address ^ (PAGE_SIZE << order).into();
log::debug!("merge buddy: 0x{buddy_address:x}");
if let Some(buddy_index) = self.free_lists[order]
.iter()
@ -77,7 +193,9 @@ impl MemoryManager {
self.free_lists[order].pop_front();
self.free_lists[order].remove(buddy_index);
let new_address = address.min(buddy_address);
log::debug!("Merging 0x{address:x} @ {order} with 0x{buddy_address:x} at 0x{new_address:x}");
log::debug!(
"Merging 0x{address:x} @ {order} with 0x{buddy_address:x} at 0x{new_address:x}"
);
self.free_lists[order + 1].push_front(new_address);
self.merge_buddies(order + 1, new_address)
}

View file

@ -21,7 +21,7 @@ fn main() -> Result<(), Error> {
}
build(release, target).change_context(Error::Build)
},
}
Some("run" | "r") => {
let mut release = false;
let mut target = Target::X86_64;
@ -113,7 +113,9 @@ fn build(release: bool, target: Target) -> Result<(), Error> {
}
match target {
Target::Riscv64Virt => { com.args(["--target", "targets/riscv64-virt-ableos.json"]); },
Target::Riscv64Virt => {
com.args(["--target", "targets/riscv64-virt-ableos.json"]);
}
_ => {}
}
@ -145,7 +147,7 @@ fn build(release: bool, target: Target) -> Result<(), Error> {
fn run(release: bool, target: Target) -> Result<(), Error> {
let mut com = match target {
Target::X86_64 => Command::new("qemu-system-x86_64"),
Target::Riscv64Virt => Command::new("qemu-system-riscv64")
Target::Riscv64Virt => Command::new("qemu-system-riscv64"),
};
if target == Target::X86_64 {