WIP: riscv64-virt port + memory manager #2

Closed
asya wants to merge 5 commits from asya/ableos:memory-manager into usermode
16 changed files with 933 additions and 99 deletions

View File

@ -3,4 +3,5 @@
"stddef.h": "c"
},
"rust-analyzer.checkOnSave.allTargets": false,
"rust-analyzer.cargo.target": "kernel/targets/riscv64-virt-ableos.json",
}

19
Cargo.lock generated
View File

@ -179,6 +179,17 @@ dependencies = [
"syn",
]
[[package]]
name = "derive_more"
version = "0.99.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "env_logger"
version = "0.10.0"
@ -326,8 +337,10 @@ name = "kernel"
version = "0.2.0"
dependencies = [
"crossbeam-queue",
"derive_more",
"limine",
"log",
"sbi",
"slab",
"spin",
"uart_16550",
@ -510,6 +523,12 @@ version = "1.0.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5583e89e108996506031660fe09baa5011b9dd0341b89029313006d1fb508d70"
[[package]]
name = "sbi"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29cb0870400aca7e4487e8ec1e93f9d4288da763cb1da2cedc5102e62b6522ad"
[[package]]
name = "scopeguard"
version = "1.1.0"

View File

@ -8,14 +8,33 @@ slab = { version = "0.4", default-features = false }
spin = "0.9"
versioning = { git = "https://git.ablecorp.us/able/aos_userland" }
log = "0.4"
uart_16550 = "0.2"
[dependencies.crossbeam-queue]
version = "0.3"
default-features = false
features = ["alloc"]
[dependencies.derive_more]
version = "0.99"
default-features = false
features = [
"add",
"add_assign",
"constructor",
"display",
"from",
"into",
"mul",
"mul_assign",
"not",
"sum",
]
[target.'cfg(target_arch = "x86_64")'.dependencies]
limine = { version = "0.1", git = "https://github.com/limine-bootloader/limine-rs" }
uart_16550 = "0.2"
x86_64 = "0.14"
x2apic = "0.4"
[target.'cfg(target_arch = "riscv64")'.dependencies]
sbi = "0.2.0"

View File

@ -0,0 +1,66 @@
OUTPUT_ARCH(riscv)
ENTRY(_start)
START_ADDRESS = 0x80200000;
SECTIONS {
. = START_ADDRESS;
.text : {
PROVIDE(_text_start = .);
*(.text.entry)
. = ALIGN(4K);
*(.text .text.*)
PROVIDE(_text_end = .);
}
. = ALIGN(4K);
.rodata : {
PROVIDE(_rodata_start = .);
*(.rodata .rodata.*)
PROVIDE(_rodata_end = .);
}
. = ALIGN(4K);
.data : {
PROVIDE(_data_start = .);
*(.data .data.*)
PROVIDE(_data_end = .);
}
. = ALIGN(4K);
.sdata : {
PROVIDE(_sdata_start = .);
*(.sdata)
*(.sdata.*)
*(.srodata.*)
*(.gnu.linkonce.s.*)
PROVIDE(_sdata_end = .);
}
. = ALIGN(4K);
.bss : {
PROVIDE(_bss_start = .);
*(.sbss*)
*(.bss.stack)
*(.bss .bss.*)
PROVIDE(_initial_kernel_heap_start = .);
PROVIDE(_initial_kernel_heap_size = 1024 * 1024);
. += _initial_kernel_heap_size;
PROVIDE(_bss_end = .);
}
/* FIXME: Currently this has to be aligned to PAGE_SIZE << MAX_ORDER */
PROVIDE(_usable_memory_start = ALIGN(4M));
PROVIDE(_usable_memory_size = 0x88000000 - _usable_memory_start);
/DISCARD/ : {
*(.comment)
*(.eh_frame)
}
}

View File

@ -35,14 +35,6 @@ use core::{
use spin::Mutex;
extern "C" {
fn _initial_kernel_heap_start();
fn _initial_kernel_heap_size();
}
const INITIAL_KERNEL_HEAP_START: *mut u8 = _initial_kernel_heap_start as _;
const INITIAL_KERNEL_HEAP_SIZE: *const () = _initial_kernel_heap_size as _;
struct Allocator(Mutex<Option<Heap>>);
unsafe impl GlobalAlloc for Allocator {
@ -66,10 +58,10 @@ unsafe impl GlobalAlloc for Allocator {
#[global_allocator]
static ALLOCATOR: Allocator = Allocator(Mutex::new(None));
pub fn init() {
// FIXME: umm is `memory` VirtualAddress or PhysicalAddress? both?
pub fn init(memory: *mut u8, memory_size: usize) {
log::info!("Initialising kernel heap allocator");
*ALLOCATOR.0.lock() =
Some(unsafe { Heap::new(INITIAL_KERNEL_HEAP_START, INITIAL_KERNEL_HEAP_SIZE as _) });
*ALLOCATOR.0.lock() = Some(unsafe { Heap::new(memory, memory_size) });
}
// FIXME: these are arch-specific
@ -123,7 +115,6 @@ impl Heap {
let size = size + mem::size_of::<Header>();
let chunks_needed = (size + CHUNK_SIZE - 1) / CHUNK_SIZE;
let chunk_alignment = (alignment + CHUNK_SIZE - 1) / CHUNK_SIZE;
log::debug!("size: {size} chunks: {chunks_needed} align: {chunk_alignment}");
if chunks_needed + chunk_alignment > self.free_chunks() {
return None;
@ -137,11 +128,6 @@ impl Heap {
// Align the starting address and verify that we haven't gone outside the calculated free area
let addr =
addr_unaligned + alignment - (addr_unaligned + mem::size_of::<Header>()) % alignment;
log::debug!(
"Addr unaligned: 0x{addr_unaligned:x} (offset: 0x{:x})",
addr_unaligned - chunks_addr
);
log::trace!("Addr: 0x{addr:x} (offset: 0x{:x})", addr - chunks_addr);
let aligned_first_chunk = (addr - chunks_addr) / CHUNK_SIZE;
assert!(first_chunk <= aligned_first_chunk);
assert!(
@ -158,7 +144,6 @@ impl Heap {
self.allocated_chunks += chunks_needed;
let ptr: *mut u8 = unsafe { mem::transmute(header.add(1)) };
log::trace!("{ptr:p}");
// FIXME: zero or scrub memory?
assert!(ptr.is_aligned_to(alignment));
NonNull::new(ptr)

View File

@ -0,0 +1,31 @@
.section .text.entry
.global _start
_start:
# load stack_top to sp register
la sp, stack_top
# The BSS section is expected to be zero
la a0, _bss_start
la a1, _bss_end
bgeu a0, a1, 2f
1:
sd zero, (a0)
addi a0, a0, 8
bltu a0, a1, 1b
2:
call _kernel_start
.section .bss.stack
.global stack
stack:
# alloc stack memory
.space 4096 * 16
.global stack_top
stack_top:
.section .bss.heap
.global _initial_kernel_heap_start
_initial_kernel_heap_start:
# alloc initial kmalloc memory
.space 4096 * 64
.global _initial_kernel_heap_end
_initial_kernel_heap_end:

View File

@ -0,0 +1,268 @@
use core::num;
use alloc::boxed::Box;
use spin::{Mutex, Once};
use crate::memory::{MemoryManager, PhysicalAddress, VirtualAddress};
use super::PAGE_SIZE;
pub enum PageSize {
Size4KiB,
Size2MiB,
Size1GiB,
// FIXME: SV48 support
// Size512GiB,
// FIXME: SV57 support
// Size256TiB,
}
impl PageSize {
fn level(&self) -> usize {
match self {
PageSize::Size4KiB => 0,
PageSize::Size2MiB => 1,
PageSize::Size1GiB => 2,
// FIXME: SV48 and SV57 support
}
}
}
pub struct PageTable {
entries: [PageEntry; 512]
}
impl PageTable {
/// Walk the page table to convert a virtual address to a physical address.
/// If a page fault would occur, this returns None. Otherwise, it returns the physical address.
pub fn virt_to_phys(&self, vaddr: VirtualAddress) -> Option<PhysicalAddress> {
let vpn = vaddr.vpns();
let mut v = &self.entries[vpn[2]];
for i in (0..=2).rev() {
if v.is_invalid() {
// This is an invalid entry, page fault.
break;
} else if v.is_leaf() {
// In RISC-V, a leaf can be at any level.
// The offset mask masks off the PPN. Each PPN is 9 bits and they start at bit #12.
// So, our formula 12 + i * 9
let off_mask = (1 << (12 + i * 9)) - 1;
let vaddr_pgoff = vaddr.as_addr() & off_mask;
let addr = ((v.entry() << 2) as usize) & !off_mask;
return Some((addr | vaddr_pgoff).into());
}
// Set v to the next entry which is pointed to by this entry.
// However, the address was shifted right by 2 places when stored in the page table
// entry, so we shift it left to get it back into place.
let entry = v.addr().as_ptr::<PageEntry>();
// We do i - 1 here, however we should get None or Some() above
// before we do 0 - 1 = -1.
v = unsafe { entry.add(vpn[i - 1]).as_ref().unwrap() };
}
// If we get here, we've exhausted all valid tables and haven't
// found a leaf.
None
}
/// Maps a virtual address to a physical address
/// flags should contain only the following:
/// Read, Write, Execute, User, and/or Global
/// flags MUST include one or more of the following:
/// Read, Write, Execute
/// The valid bit automatically gets added
pub fn map(&mut self, vaddr: VirtualAddress, paddr: PhysicalAddress, flags: PageEntryFlags, page_size: PageSize) {
assert!(flags as usize & 0xe != 0);
let vpn = vaddr.vpns();
let ppn = paddr.ppns();
let level = page_size.level();
let mut v = &mut self.entries[vpn[2]];
// Now, we're going to traverse the page table and set the bits properly. We expect the root
// to be valid, however we're required to create anything beyond the root
for i in (level..2).rev() {
if v.is_invalid() {
let mut mm = MEMORY_MANAGER.get().unwrap().lock();
let page = mm.zallocate_pages(1).unwrap().as_addr();
v.set_entry((page as usize >> 2) | PageEntryFlags::Valid as usize);
}
let entry = v.addr().as_mut_ptr::<PageEntry>();
v = unsafe { entry.add(vpn[i]).as_mut().unwrap() };
}
// When we get here, we should be at VPN[0] and v should be pointing to our entry.
// The entry structure is Figure 4.18 in the RISC-V Privileged Specification
let entry = (ppn[2] << 28) as usize // PPN[2] = [53:28]
| (ppn[1] << 19) as usize // PPN[1] = [27:19]
| (ppn[0] << 10) as usize // PPN[0] = [18:10]
| flags as usize // Specified bits, such as User, Read, Write, etc.
| PageEntryFlags::Valid as usize;
v.set_entry(entry);
}
/// Identity maps a page of memory
pub fn identity_map(&mut self, addr: PhysicalAddress, flags: PageEntryFlags, page_size: PageSize) {
// log::debug!("identity mapped {addr}");
self.map(addr.as_addr().into(), addr, flags, page_size);
}
/// Identity maps a range of contiguous memory
/// This assumes that start <= end
pub fn identity_map_range(&mut self, start: PhysicalAddress, end: PhysicalAddress, flags: PageEntryFlags) {
log::debug!("start: {start}, end: {end}");
let mut mem_addr = start.as_addr() & !(PAGE_SIZE - 1);
let num_pages = (align_val(end.as_addr(), 12) - mem_addr - 1) / PAGE_SIZE + 1;
for _ in 0..num_pages {
// FIXME: we can merge these page entries if possible into Size2MiB or larger entries
self.identity_map(mem_addr.into(), flags, PageSize::Size4KiB);
mem_addr += 1 << 12;
}
}
/// Unmaps a page of memory at vaddr
pub fn unmap(&mut self, vaddr: VirtualAddress) {
let vpn = vaddr.vpns();
// Now, we're going to traverse the page table and clear the bits
let mut v = &mut self.entries[vpn[2]];
for i in (0..2).rev() {
if v.is_invalid() {
// This is an invalid entry, page is already unmapped
return;
} else if v.is_leaf() {
// This is a leaf, which can be at any level
// In order to make this page unmapped, we need to clear the entry
v.set_entry(0);
return;
}
let entry = v.addr().as_mut_ptr::<PageEntry>();
v = unsafe { entry.add(vpn[i]).as_mut().unwrap() };
}
// If we're here this is an unmapped page
return;
}
/// Unmaps a range of contiguous memory
/// This assumes that start <= end
pub fn unmap_range(&mut self, start: VirtualAddress, end: VirtualAddress) {
let mut mem_addr = start.as_addr() & !(PAGE_SIZE - 1);
let num_pages = (align_val(end.as_addr(), 12) - mem_addr) / PAGE_SIZE;
for _ in 0..num_pages {
self.unmap(mem_addr.into());
mem_addr += 1 << 12;
}
}
/// Frees all memory associated with a table.
/// NOTE: This does NOT free the table directly. This must be freed manually.
fn destroy(&mut self) {
for entry in &mut self.entries {
entry.destroy()
}
}
}
#[repr(usize)]
#[derive(Clone, Copy, Debug)]
pub enum PageEntryFlags {
None = 0,
Valid = 1,
Read = 1 << 1,
Write = 1 << 2,
Execute = 1 << 3,
User = 1 << 4,
Global = 1 << 5,
Access = 1 << 6,
Dirty = 1 << 7,
// for convenience
ReadWrite = Self::Read as usize | Self::Write as usize,
ReadExecute = Self::Read as usize | Self::Execute as usize,
ReadWriteExecute = Self::Read as usize | Self::Write as usize | Self::Execute as usize,
UserReadWrite = Self::User as usize | Self::ReadWrite as usize,
UserReadExecute = Self::User as usize | Self::ReadExecute as usize,
UserReadWriteExecute = Self::User as usize | Self::ReadWriteExecute as usize,
}
struct PageEntry(usize);
impl PageEntry {
fn is_valid(&self) -> bool {
self.0 & PageEntryFlags::Valid as usize != 0
}
fn is_invalid(&self) -> bool {
!self.is_valid()
}
fn is_leaf(&self) -> bool {
self.0 & PageEntryFlags::ReadWriteExecute as usize != 0
}
fn is_branch(&self) -> bool {
!self.is_leaf()
}
fn entry(&self) -> usize {
self.0
}
fn set_entry(&mut self, entry: usize) {
self.0 = entry;
}
fn clear_flag(&mut self, flag: PageEntryFlags) {
self.0 &= !(flag as usize);
}
fn set_flag(&mut self, flag: PageEntryFlags) {
self.0 |= flag as usize;
}
fn addr(&self) -> PhysicalAddress {
((self.entry() as usize & !0x3ff) << 2).into()
}
fn destroy(&mut self) {
if self.is_valid() && self.is_branch() {
// This is a valid entry so drill down and free
let memaddr = self.addr();
let table = memaddr.as_mut_ptr::<PageTable>();
unsafe {
(*table).destroy();
let mut mm = MEMORY_MANAGER.get().unwrap().lock();
mm.deallocate_pages(memaddr.into(), 0);
}
}
}
}
// FIXME: PageTable should be integrated into MemoryManager *somehow*
pub static MEMORY_MANAGER: Once<Mutex<MemoryManager>> = Once::new();
pub static PAGE_TABLE: Once<Mutex<PhysicalAddress>> = Once::new();
pub fn init(start_addr: PhysicalAddress, page_count: usize) {
let mut memory_manager = MemoryManager::new();
unsafe {
memory_manager.add_range(start_addr, page_count);
PAGE_TABLE.call_once(|| Mutex::new(memory_manager.zallocate_pages(0).unwrap()));
}
MEMORY_MANAGER.call_once(|| Mutex::new(memory_manager));
}
/// Align (set to a multiple of some power of two)
/// This function always rounds up.
fn align_val(val: usize, order: usize) -> usize {
let o = (1 << order) - 1;
(val + o) & !o
}

View File

@ -0,0 +1,35 @@
.section .rodata
.global TEXT_START
TEXT_START: .quad _text_start
.global TEXT_END
TEXT_END: .quad _text_end
.global RODATA_START
RODATA_START: .quad _rodata_start
.global RODATA_END
RODATA_END: .quad _rodata_end
.global DATA_START
DATA_START: .quad _data_start
.global DATA_END
DATA_END: .quad _data_end
.global SDATA_START
SDATA_START: .quad _sdata_start
.global SDATA_END
SDATA_END: .quad _sdata_end
.global BSS_START
BSS_START: .quad _bss_start
.global BSS_END
BSS_END: .quad _bss_end
.global INITIAL_KERNEL_HEAP_START
INITIAL_KERNEL_HEAP_START: .quad _initial_kernel_heap_start
.global INITIAL_KERNEL_HEAP_SIZE
INITIAL_KERNEL_HEAP_SIZE: .quad _initial_kernel_heap_size
.global USABLE_MEMORY_START
USABLE_MEMORY_START: .quad _usable_memory_start
.global USABLE_MEMORY_SIZE
USABLE_MEMORY_SIZE: .quad _usable_memory_size

View File

@ -1 +1,93 @@
//!
mod memory;
use core::{arch::{asm, global_asm}, fmt::Write};
use alloc::boxed::Box;
use sbi::system_reset::{ResetType, ResetReason, system_reset};
use spin::{Mutex, Once};
use uart_16550::MmioSerialPort;
use crate::{allocator, memory::PhysicalAddress, arch::riscv64::memory::{PAGE_TABLE, PageEntryFlags, PageSize, PageTable}};
global_asm!(include_str!("entry.s"));
global_asm!(include_str!("memory_regions.s"));
pub const PAGE_SIZE: usize = 4096;
extern {
static TEXT_START: PhysicalAddress;
static TEXT_END: PhysicalAddress;
static RODATA_START: PhysicalAddress;
static RODATA_END: PhysicalAddress;
static DATA_START: PhysicalAddress;
static DATA_END: PhysicalAddress;
static SDATA_START: PhysicalAddress;
static SDATA_END: PhysicalAddress;
static BSS_START: PhysicalAddress;
static BSS_END: PhysicalAddress;
static INITIAL_KERNEL_HEAP_START: PhysicalAddress;
static INITIAL_KERNEL_HEAP_SIZE: usize;
static USABLE_MEMORY_START: PhysicalAddress;
static USABLE_MEMORY_SIZE: usize;
}
static SERIAL_CONSOLE: Once<Mutex<MmioSerialPort>> = Once::new();
#[no_mangle]
unsafe extern fn _kernel_start() -> ! {
SERIAL_CONSOLE.call_once(|| Mutex::new(unsafe { MmioSerialPort::new(0x1000_0000) }));
crate::logger::init().expect("failed to set logger");
log::info!("Initialising AKern {}", crate::VERSION);
allocator::init(INITIAL_KERNEL_HEAP_START.as_mut_ptr::<u8>(), INITIAL_KERNEL_HEAP_SIZE);
memory::init(USABLE_MEMORY_START.into(), USABLE_MEMORY_SIZE / PAGE_SIZE);
let mut page_table_addr = PAGE_TABLE.get().unwrap().lock();
let mut page_table = page_table_addr.as_mut_ptr::<PageTable>().as_mut().unwrap();
// Map text (executable) section
page_table.identity_map_range(TEXT_START, TEXT_END, PageEntryFlags::ReadExecute);
// Map rodata section
page_table.identity_map_range(RODATA_END, RODATA_END, PageEntryFlags::Read);
// Map data section
page_table.identity_map_range(DATA_START, DATA_END, PageEntryFlags::ReadWrite);
// Map sdata section
page_table.identity_map_range(SDATA_START, SDATA_END, PageEntryFlags::ReadWrite);
// Map bss section (includes stack and initial kernel heap)
page_table.identity_map_range(BSS_START, BSS_END, PageEntryFlags::ReadWrite);
// Map usable memory range (as rw so not executable)
page_table.identity_map_range(USABLE_MEMORY_START, USABLE_MEMORY_START + USABLE_MEMORY_SIZE.into(), PageEntryFlags::ReadWrite);
// Map Uart so we can continue using serial
page_table.identity_map(0x1000_0000_usize.into(), PageEntryFlags::ReadWrite, PageSize::Size4KiB);
let table_ppn = page_table_addr.as_addr() as usize >> 12;
let satp_value = 8 << 60 | table_ppn;
log::info!("Enabling the MMU...");
asm!(
"csrw satp, {}",
"sfence.vma",
in(reg) satp_value,
);
log::info!("We're in PAGING LAND!");
#[allow(unreachable_code)]
match system_reset(ResetType::Shutdown, ResetReason::NoReason).unwrap() {}
}
/// Spin loop
pub fn sloop() -> ! {
loop {
unsafe { asm!("wfi") }
}
}
pub fn log(args: core::fmt::Arguments<'_>) -> core::fmt::Result {
SERIAL_CONSOLE.get().unwrap().lock().write_fmt(args)
}

View File

@ -1,14 +1,14 @@
use core::sync::atomic::AtomicU64;
use limine::{LimineMemmapEntry, LimineMemoryMapEntryType, NonNullPtr};
use spin::{Mutex, Once};
use x86_64::{
structures::paging::{FrameAllocator, FrameDeallocator, OffsetPageTable, PhysFrame, Size4KiB},
PhysAddr, VirtAddr,
};
use x86_64::{structures::paging::OffsetPageTable, VirtAddr};
use crate::memory::{MemoryManager, MAX_ORDER};
pub static PAGE_TABLE: Once<Mutex<OffsetPageTable>> = Once::new();
pub static FRAME_ALLOC: Once<Mutex<FrameAlloc>> = Once::new();
pub const PAGE_SIZE: usize = 4096;
pub static MEMORY_MANAGER: Once<Mutex<MemoryManager>> = Once::new();
pub static HHDM_OFFSET: AtomicU64 = AtomicU64::new(0);
static PAGE_TABLE: Once<Mutex<OffsetPageTable>> = Once::new();
/// Initialise page table
pub unsafe fn init_pt(phys_base: VirtAddr) {
@ -27,45 +27,28 @@ pub unsafe fn init_pt(phys_base: VirtAddr) {
});
}
/// Initialise page frame allocator
pub unsafe fn init_falloc(mmap: &'static [NonNullPtr<LimineMemmapEntry>]) {
log::info!("Initialising frame allocator");
FRAME_ALLOC.call_once(|| Mutex::new(FrameAlloc::new(mmap)));
}
/// Initialise memory manager
pub fn initialize(mmap: &'static [NonNullPtr<LimineMemmapEntry>]) {
let mut memory_manager = MemoryManager::new();
pub struct FrameAlloc {
mmap: &'static [NonNullPtr<LimineMemmapEntry>],
next: usize,
}
for entry in mmap {
if entry.typ != LimineMemoryMapEntryType::Usable {
continue;
}
unsafe impl Send for FrameAlloc {}
let alignment = PAGE_SIZE << MAX_ORDER;
let start_addr_unaligned = entry.base as usize;
let diff = alignment - start_addr_unaligned % alignment;
if diff > entry.len as usize {
continue;
}
let start_addr = start_addr_unaligned + diff;
let page_count = (entry.len as usize - diff) / PAGE_SIZE;
impl FrameAlloc {
pub unsafe fn new(mmap: &'static [NonNullPtr<LimineMemmapEntry>]) -> Self {
Self { mmap, next: 0 }
unsafe {
memory_manager.add_range(start_addr.into(), page_count);
}
}
fn usable_frames(&self) -> impl Iterator<Item = PhysFrame> {
self.mmap
.iter()
.filter(|e| e.typ == LimineMemoryMapEntryType::Usable)
.map(|e| e.base..e.base + e.len)
.flat_map(|r| r.step_by(4096))
.map(PhysAddr::new)
.map(PhysFrame::containing_address)
}
}
unsafe impl FrameAllocator<Size4KiB> for FrameAlloc {
fn allocate_frame(&mut self) -> Option<PhysFrame<Size4KiB>> {
let f = self.usable_frames().nth(self.next);
self.next += 1;
f
}
}
impl FrameDeallocator<Size4KiB> for FrameAlloc {
unsafe fn deallocate_frame(&mut self, frame: PhysFrame<Size4KiB>) {
// TODO
}
MEMORY_MANAGER.call_once(|| Mutex::new(memory_manager));
}

View File

@ -1,16 +1,26 @@
pub mod memory;
mod gdt;
mod interrupts;
mod logging;
mod memory;
pub use logging::log;
pub use memory::PAGE_SIZE;
use crate::allocator;
use memory::MEMORY_MANAGER;
use limine::{
LimineHhdmRequest, LimineKernelFileRequest, LimineMemmapRequest, LimineModuleRequest,
};
use x86_64::VirtAddr;
extern "C" {
fn _initial_kernel_heap_start();
fn _initial_kernel_heap_size();
}
const INITIAL_KERNEL_HEAP_START: *mut u8 = _initial_kernel_heap_start as _;
const INITIAL_KERNEL_HEAP_SIZE: *const () = _initial_kernel_heap_size as _;
#[no_mangle]
unsafe extern "C" fn _kernel_start() -> ! {
logging::init();
@ -26,8 +36,10 @@ unsafe extern "C" fn _kernel_start() -> ! {
.offset,
));
allocator::init(INITIAL_KERNEL_HEAP_START, INITIAL_KERNEL_HEAP_SIZE as _);
static MMAP_REQ: LimineMemmapRequest = LimineMemmapRequest::new(0);
memory::init_falloc(
memory::initialize(
MMAP_REQ
.get_response()
.get()
@ -35,10 +47,25 @@ unsafe extern "C" fn _kernel_start() -> ! {
.memmap(),
);
allocator::init();
gdt::init();
interrupts::init();
{
let mut mm = MEMORY_MANAGER.get().unwrap().lock();
let alloc_0 = mm.allocate_pages(0).unwrap();
log::debug!("Addr: {alloc_0}");
let alloc_1 = mm.allocate_pages(0).unwrap();
log::debug!("Addr: {alloc_1}");
mm.deallocate_pages(alloc_0, 0);
let alloc_2 = mm.allocate_pages(1).unwrap();
log::debug!("Addr: {alloc_2}");
mm.deallocate_pages(alloc_1, 0);
mm.deallocate_pages(alloc_2, 1);
let alloc_3 = mm.allocate_pages(1).unwrap();
log::debug!("Addr: {alloc_3}");
mm.deallocate_pages(alloc_3, 1);
}
static KFILE_REQ: LimineKernelFileRequest = LimineKernelFileRequest::new(0);
static MOD_REQ: LimineModuleRequest = LimineModuleRequest::new(0);
crate::kmain::kmain(

View File

@ -7,7 +7,7 @@ pub fn kmain(cmdline: &str, initrd: Option<&'static [u8]>) -> ! {
if cmdline.contains("baka=9") {
let _ = crate::arch::log(format_args!(include_str!("../data/⑨. バカ")));
}
log::info!("Cmdline: \"{cmdline}\"");
let initrd = initrd.expect("no initrd found");

View File

@ -3,6 +3,7 @@
#![feature(
abi_x86_interrupt,
alloc_error_handler,
inline_const,
panic_info_message,
pointer_is_aligned,
prelude_import,
@ -16,6 +17,7 @@ mod allocator;
mod arch;
mod kmain;
mod logger;
mod memory;
mod task;
use versioning::Version;
@ -30,9 +32,9 @@ pub const VERSION: Version = Version {
#[panic_handler]
fn panic(info: &core::panic::PanicInfo) -> ! {
// TODO: Better panic handler
let _ = crate::arch::log(format_args!(
"\r\n\x1b[1m\x1b[4m\x1b[38;5;125mKernel Panic\x1b[0m\r\n",
));
// let _ = crate::arch::log(format_args!(
// "\r\n\x1b[1m\x1b[4m\x1b[38;5;125mKernel Panic\x1b[0m\r\n",
// ));
if let Some(loc) = info.location() {
let _ = crate::arch::log(format_args!(

231
kernel/src/memory.rs Normal file
View File

@ -0,0 +1,231 @@
//! The Memory Manager
use alloc::collections::VecDeque;
use derive_more::*;
pub use crate::arch::PAGE_SIZE;
pub const MAX_ORDER: usize = 10;
#[repr(transparent)]
#[derive(
Add,
AddAssign,
Binary,
BitAnd,
BitAndAssign,
BitOr,
BitOrAssign,
BitXor,
BitXorAssign,
Clone,
Constructor,
Copy,
Display,
Div,
DivAssign,
Eq,
From,
LowerHex,
Mul,
MulAssign,
Not,
Octal,
Ord,
PartialEq,
PartialOrd,
Rem,
RemAssign,
Shl,
ShlAssign,
Shr,
ShrAssign,
Sub,
SubAssign,
Sum,
UpperHex,
)]
#[display(fmt = "0x{:x}", _0)]
#[from(forward)]
pub struct VirtualAddress(usize);
impl VirtualAddress {
#[cfg(target_arch = "riscv64")]
/// Returns an array of Virtual Page Numbers
// FIXME: SV48 and SV57 support
pub fn vpns(&self) -> [usize; 3] {
[
// [20:12]
(self.0 >> 12) & 0x1ff,
// [29:21]
(self.0 >> 21) & 0x1ff,
// [38:30]
(self.0 >> 30) & 0x1ff,
]
}
pub fn as_addr(&self) -> usize {
self.0
}
pub fn as_ptr<T>(&self) -> *const T {
self.0 as _
}
pub fn as_mut_ptr<T>(&mut self) -> *mut T {
self.0 as _
}
}
#[repr(transparent)]
#[derive(
Add,
AddAssign,
Binary,
BitAnd,
BitAndAssign,
BitOr,
BitOrAssign,
BitXor,
BitXorAssign,
Clone,
Constructor,
Copy,
Display,
Div,
DivAssign,
Eq,
From,
LowerHex,
Mul,
MulAssign,
Not,
Octal,
Ord,
PartialEq,
PartialOrd,
Rem,
RemAssign,
Shl,
ShlAssign,
Shr,
ShrAssign,
Sub,
SubAssign,
Sum,
UpperHex,
)]
#[display(fmt = "0x{:x}", _0)]
#[from(forward)]
pub struct PhysicalAddress(usize);
impl PhysicalAddress {
#[cfg(target_arch = "riscv64")]
/// Returns an array of Physical Page Numbers
// FIXME: SV48 and SV57 support
pub fn ppns(&self) -> [usize; 3] {
[
// [20:12]
(self.0 >> 12) & 0x1ff,
// [29:21]
(self.0 >> 21) & 0x1ff,
// [55:30]
(self.0 >> 30) & 0x3ffffff,
]
}
pub fn as_addr(&self) -> usize {
self.0
}
pub fn as_ptr<T>(&self) -> *const T {
self.0 as _
}
pub fn as_mut_ptr<T>(&self) -> *mut T {
self.0 as _
}
}
pub struct MemoryManager {
free_lists: [VecDeque<PhysicalAddress>; MAX_ORDER + 1],
}
impl MemoryManager {
pub const fn new() -> Self {
Self {
free_lists: [const { VecDeque::new() }; MAX_ORDER + 1],
}
}
// FIXME: this method should take a length and turn that into an order
pub fn allocate_pages(&mut self, order: usize) -> Option<PhysicalAddress> {
self.get_free_pages(order)
}
// FIXME: this method should take a length and turn that into an order
pub fn zallocate_pages(&mut self, order: usize) -> Option<PhysicalAddress> {
let alloc = self.allocate_pages(order)?;
unsafe {
alloc.as_mut_ptr::<u8>().write_bytes(0, PAGE_SIZE << order);
}
Some(alloc)
}
/// # Safety
/// This method assumes that `address` is in range of this allocator
// FIXME: this method should take a length and turn that into an order
pub unsafe fn deallocate_pages(&mut self, address: PhysicalAddress, order: usize) {
self.free_lists[order].push_front(address);
self.merge_buddies(order, address)
}
/// # Safety
/// This method assumes that the given address range,
/// a) starts and ends at an address aligned to page boundaries,
/// b) are valid free pages not already added,
/// FIXME: c) starts and ends at an address aligned to `PAGE_SIZE << MAX_ORDER`
pub unsafe fn add_range(&mut self, start_addr: PhysicalAddress, page_count: usize) {
for i in 0..page_count / 1024 {
self.free_lists[MAX_ORDER].push_back(start_addr + (i * 1024 * PAGE_SIZE).into());
}
}
fn get_free_pages(&mut self, order: usize) -> Option<PhysicalAddress> {
// We can't get such a page!
if order > MAX_ORDER {
return None;
}
if self.free_lists[order].len() > 0 {
return self.free_lists[order].pop_front();
}
self.get_free_pages(order + 1).map(|addr| {
self.free_lists[order].push_front(addr ^ (PAGE_SIZE << order).into());
addr
})
}
fn merge_buddies(&mut self, order: usize, address: PhysicalAddress) {
// if we can't have any higher order blocks, we can't merge
if order > MAX_ORDER - 1 {
return;
}
let buddy_address = address ^ (PAGE_SIZE << order).into();
log::debug!("merge buddy: 0x{buddy_address:x}");
if let Some(buddy_index) = self.free_lists[order]
.iter()
.position(|blk| *blk == buddy_address)
{
self.free_lists[order].pop_front();
self.free_lists[order].remove(buddy_index);
let new_address = address.min(buddy_address);
log::debug!(
"Merging 0x{address:x} @ {order} with 0x{buddy_address:x} at 0x{new_address:x}"
);
self.free_lists[order + 1].push_front(new_address);
self.merge_buddies(order + 1, new_address)
}
}
}

View File

@ -0,0 +1,22 @@
{
"arch": "riscv64",
"code-model": "medium",
"cpu": "generic-rv64",
"data-layout": "e-m:e-p:64:64-i64:64-i128:128-n32:64-S128",
"eh-frame-header": false,
"emit-debug-gdb-scripts": false,
"features": "+m,+a,+f,+d,+c",
"linker": "rust-lld",
"linker-flavor": "ld.lld",
"llvm-abiname": "lp64d",
"llvm-target": "riscv64",
"max-atomic-width": 64,
"panic-strategy": "abort",
"relocation-model": "static",
"target-pointer-width": "64",
"pre-link-args": {
"ld.lld": [
"--script=kernel/lds/riscv64-virt.ld"
]
}
}

View File

@ -8,19 +8,34 @@ fn main() -> Result<(), Error> {
args.next();
match args.next().as_deref() {
Some("build" | "b") => build(
args.next()
.map(|x| x == "-r" || x == "--release")
.unwrap_or_default(),
)
.change_context(Error::Build),
Some("build" | "b") => {
let mut release = false;
let mut target = Target::X86_64;
for arg in args {
if arg == "-r" || arg == "--release" {
release = true;
}
if arg == "rv64" || arg == "riscv64" || arg == "riscv64-virt" {
target = Target::Riscv64Virt;
}
}
build(release, target).change_context(Error::Build)
}
Some("run" | "r") => {
build(
args.next()
.map(|x| x == "-r" || x == "--release")
.unwrap_or_default(),
)?;
run()
let mut release = false;
let mut target = Target::X86_64;
for arg in args {
if arg == "-r" || arg == "--release" {
release = true;
}
if arg == "rv64" || arg == "riscv64" || arg == "riscv64-virt" {
target = Target::Riscv64Virt;
}
}
build(release, target)?;
run(release, target)
}
Some("help" | "h") => {
println!(concat!(
@ -30,7 +45,8 @@ fn main() -> Result<(), Error> {
" help (h): Print this message\n",
" run (r): Build and run AbleOS in QEMU\n\n",
"Options for build and run:\n",
" -r: build in release mode",
" -r: build in release mode",
" [target]: sets target"
),);
Ok(())
}
@ -87,7 +103,7 @@ fn get_fs() -> Result<FileSystem<impl ReadWriteSeek>, io::Error> {
Ok(fs)
}
fn build(release: bool) -> Result<(), Error> {
fn build(release: bool, target: Target) -> Result<(), Error> {
let fs = get_fs().change_context(Error::Io)?;
let mut com = Command::new("cargo");
com.current_dir("kernel");
@ -96,12 +112,23 @@ fn build(release: bool) -> Result<(), Error> {
com.arg("-r");
}
match target {
Target::Riscv64Virt => {
com.args(["--target", "targets/riscv64-virt-ableos.json"]);
}
_ => {}
}
match com.status() {
Ok(s) if s.code() != Some(0) => bail!(Error::Build),
Err(e) => bail!(report!(e).change_context(Error::Build)),
_ => (),
}
if target != Target::X86_64 {
return Ok(());
}
(|| -> std::io::Result<_> {
io::copy(
&mut File::open(
@ -117,24 +144,44 @@ fn build(release: bool) -> Result<(), Error> {
.change_context(Error::Io)
}
fn run() -> Result<(), Error> {
let mut com = Command::new("qemu-system-x86_64");
fn run(release: bool, target: Target) -> Result<(), Error> {
let mut com = match target {
Target::X86_64 => Command::new("qemu-system-x86_64"),
Target::Riscv64Virt => Command::new("qemu-system-riscv64"),
};
#[rustfmt::skip]
com.args([
"-bios",
std::env::var("REPBUILD_QEMU_FIRMWARE_PATH")
.as_deref()
.unwrap_or("/usr/share/OVMF/OVMF_CODE.fd"),
"-drive", "file=target/disk.img,format=raw",
"-m", "4G",
"-serial", "stdio",
"-smp", "cores=2",
]);
if target == Target::X86_64 {
#[rustfmt::skip]
com.args([
"-bios",
std::env::var("REPBUILD_QEMU_FIRMWARE_PATH")
.as_deref()
.unwrap_or("/usr/share/OVMF/OVMF_CODE.fd"),
"-drive", "file=target/disk.img,format=raw",
"-m", "4G",
"-serial", "stdio",
"-smp", "cores=2",
]);
#[cfg(target_os = "linux")]
{
com.args(["-enable-kvm", "-cpu", "host"]);
#[cfg(target_os = "linux")]
{
com.args(["-enable-kvm", "-cpu", "host"]);
}
}
if target == Target::Riscv64Virt {
#[rustfmt::skip]
com.args([
"-M", "virt",
"-m", "128M",
"-serial", "stdio",
"-kernel",
if release {
"target/riscv64-virt-ableos/release/kernel"
} else {
"target/riscv64-virt-ableos/debug/kernel"
}
]);
}
match com
@ -147,6 +194,12 @@ fn run() -> Result<(), Error> {
}
}
#[derive(Clone, Copy, PartialEq, Eq)]
enum Target {
X86_64,
Riscv64Virt,
}
#[derive(Debug)]
enum Error {
Build,