1
0
Fork 0
forked from AbleOS/ableos

Formatting, LF and RISC-V kmain

This commit is contained in:
Erin 2023-03-19 13:40:08 +01:00 committed by ondra05
parent e3ce700295
commit cef19a8fe4
3 changed files with 388 additions and 396 deletions

View file

@ -1,268 +1,268 @@
use core::num; use core::num;
use alloc::boxed::Box; use alloc::boxed::Box;
use spin::{Mutex, Once}; use spin::{Mutex, Once};
use crate::memory::{MemoryManager, PhysicalAddress, VirtualAddress}; use crate::memory::{MemoryManager, PhysicalAddress, VirtualAddress};
use super::PAGE_SIZE; use super::PAGE_SIZE;
pub enum PageSize { pub enum PageSize {
Size4KiB, Size4KiB,
Size2MiB, Size2MiB,
Size1GiB, Size1GiB,
// FIXME: SV48 support // FIXME: SV48 support
// Size512GiB, // Size512GiB,
// FIXME: SV57 support // FIXME: SV57 support
// Size256TiB, // Size256TiB,
} }
impl PageSize { impl PageSize {
fn level(&self) -> usize { fn level(&self) -> usize {
match self { match self {
PageSize::Size4KiB => 0, PageSize::Size4KiB => 0,
PageSize::Size2MiB => 1, PageSize::Size2MiB => 1,
PageSize::Size1GiB => 2, PageSize::Size1GiB => 2,
// FIXME: SV48 and SV57 support // FIXME: SV48 and SV57 support
} }
} }
} }
pub struct PageTable { pub struct PageTable {
entries: [PageEntry; 512] entries: [PageEntry; 512]
} }
impl PageTable { impl PageTable {
/// Walk the page table to convert a virtual address to a physical address. /// Walk the page table to convert a virtual address to a physical address.
/// If a page fault would occur, this returns None. Otherwise, it returns the physical address. /// If a page fault would occur, this returns None. Otherwise, it returns the physical address.
pub fn virt_to_phys(&self, vaddr: VirtualAddress) -> Option<PhysicalAddress> { pub fn virt_to_phys(&self, vaddr: VirtualAddress) -> Option<PhysicalAddress> {
let vpn = vaddr.vpns(); let vpn = vaddr.vpns();
let mut v = &self.entries[vpn[2]]; let mut v = &self.entries[vpn[2]];
for i in (0..=2).rev() { for i in (0..=2).rev() {
if v.is_invalid() { if v.is_invalid() {
// This is an invalid entry, page fault. // This is an invalid entry, page fault.
break; break;
} else if v.is_leaf() { } else if v.is_leaf() {
// In RISC-V, a leaf can be at any level. // In RISC-V, a leaf can be at any level.
// The offset mask masks off the PPN. Each PPN is 9 bits and they start at bit #12. // The offset mask masks off the PPN. Each PPN is 9 bits and they start at bit #12.
// So, our formula 12 + i * 9 // So, our formula 12 + i * 9
let off_mask = (1 << (12 + i * 9)) - 1; let off_mask = (1 << (12 + i * 9)) - 1;
let vaddr_pgoff = vaddr.as_addr() & off_mask; let vaddr_pgoff = vaddr.as_addr() & off_mask;
let addr = ((v.entry() << 2) as usize) & !off_mask; let addr = ((v.entry() << 2) as usize) & !off_mask;
return Some((addr | vaddr_pgoff).into()); return Some((addr | vaddr_pgoff).into());
} }
// Set v to the next entry which is pointed to by this entry. // Set v to the next entry which is pointed to by this entry.
// However, the address was shifted right by 2 places when stored in the page table // However, the address was shifted right by 2 places when stored in the page table
// entry, so we shift it left to get it back into place. // entry, so we shift it left to get it back into place.
let entry = v.addr().as_ptr::<PageEntry>(); let entry = v.addr().as_ptr::<PageEntry>();
// We do i - 1 here, however we should get None or Some() above // We do i - 1 here, however we should get None or Some() above
// before we do 0 - 1 = -1. // before we do 0 - 1 = -1.
v = unsafe { entry.add(vpn[i - 1]).as_ref().unwrap() }; v = unsafe { entry.add(vpn[i - 1]).as_ref().unwrap() };
} }
// If we get here, we've exhausted all valid tables and haven't // If we get here, we've exhausted all valid tables and haven't
// found a leaf. // found a leaf.
None None
} }
/// Maps a virtual address to a physical address /// Maps a virtual address to a physical address
/// flags should contain only the following: /// flags should contain only the following:
/// Read, Write, Execute, User, and/or Global /// Read, Write, Execute, User, and/or Global
/// flags MUST include one or more of the following: /// flags MUST include one or more of the following:
/// Read, Write, Execute /// Read, Write, Execute
/// The valid bit automatically gets added /// The valid bit automatically gets added
pub fn map(&mut self, vaddr: VirtualAddress, paddr: PhysicalAddress, flags: PageEntryFlags, page_size: PageSize) { pub fn map(&mut self, vaddr: VirtualAddress, paddr: PhysicalAddress, flags: PageEntryFlags, page_size: PageSize) {
assert!(flags as usize & 0xe != 0); assert!(flags as usize & 0xe != 0);
let vpn = vaddr.vpns(); let vpn = vaddr.vpns();
let ppn = paddr.ppns(); let ppn = paddr.ppns();
let level = page_size.level(); let level = page_size.level();
let mut v = &mut self.entries[vpn[2]]; let mut v = &mut self.entries[vpn[2]];
// Now, we're going to traverse the page table and set the bits properly. We expect the root // Now, we're going to traverse the page table and set the bits properly. We expect the root
// to be valid, however we're required to create anything beyond the root // to be valid, however we're required to create anything beyond the root
for i in (level..2).rev() { for i in (level..2).rev() {
if v.is_invalid() { if v.is_invalid() {
let mut mm = MEMORY_MANAGER.get().unwrap().lock(); let mut mm = MEMORY_MANAGER.get().unwrap().lock();
let page = mm.zallocate_pages(1).unwrap().as_addr(); let page = mm.zallocate_pages(1).unwrap().as_addr();
v.set_entry((page as usize >> 2) | PageEntryFlags::Valid as usize); v.set_entry((page as usize >> 2) | PageEntryFlags::Valid as usize);
} }
let entry = v.addr().as_mut_ptr::<PageEntry>(); let entry = v.addr().as_mut_ptr::<PageEntry>();
v = unsafe { entry.add(vpn[i]).as_mut().unwrap() }; v = unsafe { entry.add(vpn[i]).as_mut().unwrap() };
} }
// When we get here, we should be at VPN[0] and v should be pointing to our entry. // When we get here, we should be at VPN[0] and v should be pointing to our entry.
// The entry structure is Figure 4.18 in the RISC-V Privileged Specification // The entry structure is Figure 4.18 in the RISC-V Privileged Specification
let entry = (ppn[2] << 28) as usize // PPN[2] = [53:28] let entry = (ppn[2] << 28) as usize // PPN[2] = [53:28]
| (ppn[1] << 19) as usize // PPN[1] = [27:19] | (ppn[1] << 19) as usize // PPN[1] = [27:19]
| (ppn[0] << 10) as usize // PPN[0] = [18:10] | (ppn[0] << 10) as usize // PPN[0] = [18:10]
| flags as usize // Specified bits, such as User, Read, Write, etc. | flags as usize // Specified bits, such as User, Read, Write, etc.
| PageEntryFlags::Valid as usize; | PageEntryFlags::Valid as usize;
v.set_entry(entry); v.set_entry(entry);
} }
/// Identity maps a page of memory /// Identity maps a page of memory
pub fn identity_map(&mut self, addr: PhysicalAddress, flags: PageEntryFlags, page_size: PageSize) { pub fn identity_map(&mut self, addr: PhysicalAddress, flags: PageEntryFlags, page_size: PageSize) {
// log::debug!("identity mapped {addr}"); // log::debug!("identity mapped {addr}");
self.map(addr.as_addr().into(), addr, flags, page_size); self.map(addr.as_addr().into(), addr, flags, page_size);
} }
/// Identity maps a range of contiguous memory /// Identity maps a range of contiguous memory
/// This assumes that start <= end /// This assumes that start <= end
pub fn identity_map_range(&mut self, start: PhysicalAddress, end: PhysicalAddress, flags: PageEntryFlags) { pub fn identity_map_range(&mut self, start: PhysicalAddress, end: PhysicalAddress, flags: PageEntryFlags) {
log::debug!("start: {start}, end: {end}"); log::debug!("start: {start}, end: {end}");
let mut mem_addr = start.as_addr() & !(PAGE_SIZE - 1); let mut mem_addr = start.as_addr() & !(PAGE_SIZE - 1);
let num_pages = (align_val(end.as_addr(), 12) - mem_addr - 1) / PAGE_SIZE + 1; let num_pages = (align_val(end.as_addr(), 12) - mem_addr - 1) / PAGE_SIZE + 1;
for _ in 0..num_pages { for _ in 0..num_pages {
// FIXME: we can merge these page entries if possible into Size2MiB or larger entries // FIXME: we can merge these page entries if possible into Size2MiB or larger entries
self.identity_map(mem_addr.into(), flags, PageSize::Size4KiB); self.identity_map(mem_addr.into(), flags, PageSize::Size4KiB);
mem_addr += 1 << 12; mem_addr += 1 << 12;
} }
} }
/// Unmaps a page of memory at vaddr /// Unmaps a page of memory at vaddr
pub fn unmap(&mut self, vaddr: VirtualAddress) { pub fn unmap(&mut self, vaddr: VirtualAddress) {
let vpn = vaddr.vpns(); let vpn = vaddr.vpns();
// Now, we're going to traverse the page table and clear the bits // Now, we're going to traverse the page table and clear the bits
let mut v = &mut self.entries[vpn[2]]; let mut v = &mut self.entries[vpn[2]];
for i in (0..2).rev() { for i in (0..2).rev() {
if v.is_invalid() { if v.is_invalid() {
// This is an invalid entry, page is already unmapped // This is an invalid entry, page is already unmapped
return; return;
} else if v.is_leaf() { } else if v.is_leaf() {
// This is a leaf, which can be at any level // This is a leaf, which can be at any level
// In order to make this page unmapped, we need to clear the entry // In order to make this page unmapped, we need to clear the entry
v.set_entry(0); v.set_entry(0);
return; return;
} }
let entry = v.addr().as_mut_ptr::<PageEntry>(); let entry = v.addr().as_mut_ptr::<PageEntry>();
v = unsafe { entry.add(vpn[i]).as_mut().unwrap() }; v = unsafe { entry.add(vpn[i]).as_mut().unwrap() };
} }
// If we're here this is an unmapped page // If we're here this is an unmapped page
return; return;
} }
/// Unmaps a range of contiguous memory /// Unmaps a range of contiguous memory
/// This assumes that start <= end /// This assumes that start <= end
pub fn unmap_range(&mut self, start: VirtualAddress, end: VirtualAddress) { pub fn unmap_range(&mut self, start: VirtualAddress, end: VirtualAddress) {
let mut mem_addr = start.as_addr() & !(PAGE_SIZE - 1); let mut mem_addr = start.as_addr() & !(PAGE_SIZE - 1);
let num_pages = (align_val(end.as_addr(), 12) - mem_addr) / PAGE_SIZE; let num_pages = (align_val(end.as_addr(), 12) - mem_addr) / PAGE_SIZE;
for _ in 0..num_pages { for _ in 0..num_pages {
self.unmap(mem_addr.into()); self.unmap(mem_addr.into());
mem_addr += 1 << 12; mem_addr += 1 << 12;
} }
} }
/// Frees all memory associated with a table. /// Frees all memory associated with a table.
/// NOTE: This does NOT free the table directly. This must be freed manually. /// NOTE: This does NOT free the table directly. This must be freed manually.
fn destroy(&mut self) { fn destroy(&mut self) {
for entry in &mut self.entries { for entry in &mut self.entries {
entry.destroy() entry.destroy()
} }
} }
} }
#[repr(usize)] #[repr(usize)]
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
pub enum PageEntryFlags { pub enum PageEntryFlags {
None = 0, None = 0,
Valid = 1, Valid = 1,
Read = 1 << 1, Read = 1 << 1,
Write = 1 << 2, Write = 1 << 2,
Execute = 1 << 3, Execute = 1 << 3,
User = 1 << 4, User = 1 << 4,
Global = 1 << 5, Global = 1 << 5,
Access = 1 << 6, Access = 1 << 6,
Dirty = 1 << 7, Dirty = 1 << 7,
// for convenience // for convenience
ReadWrite = Self::Read as usize | Self::Write as usize, ReadWrite = Self::Read as usize | Self::Write as usize,
ReadExecute = Self::Read as usize | Self::Execute as usize, ReadExecute = Self::Read as usize | Self::Execute as usize,
ReadWriteExecute = Self::Read as usize | Self::Write as usize | Self::Execute as usize, ReadWriteExecute = Self::Read as usize | Self::Write as usize | Self::Execute as usize,
UserReadWrite = Self::User as usize | Self::ReadWrite as usize, UserReadWrite = Self::User as usize | Self::ReadWrite as usize,
UserReadExecute = Self::User as usize | Self::ReadExecute as usize, UserReadExecute = Self::User as usize | Self::ReadExecute as usize,
UserReadWriteExecute = Self::User as usize | Self::ReadWriteExecute as usize, UserReadWriteExecute = Self::User as usize | Self::ReadWriteExecute as usize,
} }
struct PageEntry(usize); struct PageEntry(usize);
impl PageEntry { impl PageEntry {
fn is_valid(&self) -> bool { fn is_valid(&self) -> bool {
self.0 & PageEntryFlags::Valid as usize != 0 self.0 & PageEntryFlags::Valid as usize != 0
} }
fn is_invalid(&self) -> bool { fn is_invalid(&self) -> bool {
!self.is_valid() !self.is_valid()
} }
fn is_leaf(&self) -> bool { fn is_leaf(&self) -> bool {
self.0 & PageEntryFlags::ReadWriteExecute as usize != 0 self.0 & PageEntryFlags::ReadWriteExecute as usize != 0
} }
fn is_branch(&self) -> bool { fn is_branch(&self) -> bool {
!self.is_leaf() !self.is_leaf()
} }
fn entry(&self) -> usize { fn entry(&self) -> usize {
self.0 self.0
} }
fn set_entry(&mut self, entry: usize) { fn set_entry(&mut self, entry: usize) {
self.0 = entry; self.0 = entry;
} }
fn clear_flag(&mut self, flag: PageEntryFlags) { fn clear_flag(&mut self, flag: PageEntryFlags) {
self.0 &= !(flag as usize); self.0 &= !(flag as usize);
} }
fn set_flag(&mut self, flag: PageEntryFlags) { fn set_flag(&mut self, flag: PageEntryFlags) {
self.0 |= flag as usize; self.0 |= flag as usize;
} }
fn addr(&self) -> PhysicalAddress { fn addr(&self) -> PhysicalAddress {
((self.entry() as usize & !0x3ff) << 2).into() ((self.entry() as usize & !0x3ff) << 2).into()
} }
fn destroy(&mut self) { fn destroy(&mut self) {
if self.is_valid() && self.is_branch() { if self.is_valid() && self.is_branch() {
// This is a valid entry so drill down and free // This is a valid entry so drill down and free
let memaddr = self.addr(); let memaddr = self.addr();
let table = memaddr.as_mut_ptr::<PageTable>(); let table = memaddr.as_mut_ptr::<PageTable>();
unsafe { unsafe {
(*table).destroy(); (*table).destroy();
let mut mm = MEMORY_MANAGER.get().unwrap().lock(); let mut mm = MEMORY_MANAGER.get().unwrap().lock();
mm.deallocate_pages(memaddr.into(), 0); mm.deallocate_pages(memaddr.into(), 0);
} }
} }
} }
} }
// FIXME: PageTable should be integrated into MemoryManager *somehow* // FIXME: PageTable should be integrated into MemoryManager *somehow*
pub static MEMORY_MANAGER: Once<Mutex<MemoryManager>> = Once::new(); pub static MEMORY_MANAGER: Once<Mutex<MemoryManager>> = Once::new();
pub static PAGE_TABLE: Once<Mutex<PhysicalAddress>> = Once::new(); pub static PAGE_TABLE: Once<Mutex<PhysicalAddress>> = Once::new();
pub fn init(start_addr: PhysicalAddress, page_count: usize) { pub fn init(start_addr: PhysicalAddress, page_count: usize) {
let mut memory_manager = MemoryManager::new(); let mut memory_manager = MemoryManager::new();
unsafe { unsafe {
memory_manager.add_range(start_addr, page_count); memory_manager.add_range(start_addr, page_count);
PAGE_TABLE.call_once(|| Mutex::new(memory_manager.zallocate_pages(0).unwrap())); PAGE_TABLE.call_once(|| Mutex::new(memory_manager.zallocate_pages(0).unwrap()));
} }
MEMORY_MANAGER.call_once(|| Mutex::new(memory_manager)); MEMORY_MANAGER.call_once(|| Mutex::new(memory_manager));
} }
/// Align (set to a multiple of some power of two) /// Align (set to a multiple of some power of two)
/// This function always rounds up. /// This function always rounds up.
fn align_val(val: usize, order: usize) -> usize { fn align_val(val: usize, order: usize) -> usize {
let o = (1 << order) - 1; let o = (1 << order) - 1;
(val + o) & !o (val + o) & !o
} }

View file

@ -1,35 +1,30 @@
.section .rodata .section .rodata
.global TEXT_START .global TEXT_START
TEXT_START: .quad _text_start .global TEXT_END
.global TEXT_END .global RODATA_START
TEXT_END: .quad _text_end .global RODATA_END
.global DATA_START
.global RODATA_START .global DATA_END
RODATA_START: .quad _rodata_start .global SDATA_START
.global RODATA_END .global SDATA_END
RODATA_END: .quad _rodata_end .global BSS_START
.global BSS_END
.global DATA_START .global INITIAL_KERNEL_HEAP_START
DATA_START: .quad _data_start .global INITIAL_KERNEL_HEAP_SIZE
.global DATA_END .global USABLE_MEMORY_START
DATA_END: .quad _data_end .global USABLE_MEMORY_SIZE
.global SDATA_START TEXT_START: .quad _text_start
SDATA_START: .quad _sdata_start TEXT_END: .quad _text_end
.global SDATA_END RODATA_START: .quad _rodata_start
SDATA_END: .quad _sdata_end RODATA_END: .quad _rodata_end
DATA_START: .quad _data_start
.global BSS_START DATA_END: .quad _data_end
BSS_START: .quad _bss_start SDATA_START: .quad _sdata_start
.global BSS_END SDATA_END: .quad _sdata_end
BSS_END: .quad _bss_end BSS_START: .quad _bss_start
BSS_END: .quad _bss_end
.global INITIAL_KERNEL_HEAP_START INITIAL_KERNEL_HEAP_START: .quad _initial_kernel_heap_start
INITIAL_KERNEL_HEAP_START: .quad _initial_kernel_heap_start INITIAL_KERNEL_HEAP_SIZE: .quad _initial_kernel_heap_size
.global INITIAL_KERNEL_HEAP_SIZE USABLE_MEMORY_START: .quad _usable_memory_start
INITIAL_KERNEL_HEAP_SIZE: .quad _initial_kernel_heap_size USABLE_MEMORY_SIZE: .quad _usable_memory_size
.global USABLE_MEMORY_START
USABLE_MEMORY_START: .quad _usable_memory_start
.global USABLE_MEMORY_SIZE
USABLE_MEMORY_SIZE: .quad _usable_memory_size

View file

@ -1,93 +1,90 @@
mod memory; mod memory;
use core::{arch::{asm, global_asm}, fmt::Write}; use core::{arch::{asm, global_asm}, fmt::Write};
use alloc::boxed::Box; use alloc::boxed::Box;
use sbi::system_reset::{ResetType, ResetReason, system_reset}; use sbi::system_reset::{ResetType, ResetReason, system_reset};
use spin::{Mutex, Once}; use spin::{Mutex, Once};
use uart_16550::MmioSerialPort; use uart_16550::MmioSerialPort;
use crate::{allocator, memory::PhysicalAddress, arch::riscv64::memory::{PAGE_TABLE, PageEntryFlags, PageSize, PageTable}}; use crate::{allocator, memory::PhysicalAddress, arch::riscv64::memory::{PAGE_TABLE, PageEntryFlags, PageSize, PageTable}};
global_asm!(include_str!("entry.s")); global_asm!(include_str!("entry.s"));
global_asm!(include_str!("memory_regions.s")); global_asm!(include_str!("memory_regions.s"));
pub const PAGE_SIZE: usize = 4096; pub const PAGE_SIZE: usize = 4096;
extern { extern {
static TEXT_START: PhysicalAddress; static TEXT_START: PhysicalAddress;
static TEXT_END: PhysicalAddress; static TEXT_END: PhysicalAddress;
static RODATA_START: PhysicalAddress; static RODATA_START: PhysicalAddress;
static RODATA_END: PhysicalAddress; static RODATA_END: PhysicalAddress;
static DATA_START: PhysicalAddress; static DATA_START: PhysicalAddress;
static DATA_END: PhysicalAddress; static DATA_END: PhysicalAddress;
static SDATA_START: PhysicalAddress; static SDATA_START: PhysicalAddress;
static SDATA_END: PhysicalAddress; static SDATA_END: PhysicalAddress;
static BSS_START: PhysicalAddress; static BSS_START: PhysicalAddress;
static BSS_END: PhysicalAddress; static BSS_END: PhysicalAddress;
static INITIAL_KERNEL_HEAP_START: PhysicalAddress; static INITIAL_KERNEL_HEAP_START: PhysicalAddress;
static INITIAL_KERNEL_HEAP_SIZE: usize; static INITIAL_KERNEL_HEAP_SIZE: usize;
static USABLE_MEMORY_START: PhysicalAddress; static USABLE_MEMORY_START: PhysicalAddress;
static USABLE_MEMORY_SIZE: usize; static USABLE_MEMORY_SIZE: usize;
} }
static SERIAL_CONSOLE: Once<Mutex<MmioSerialPort>> = Once::new(); static SERIAL_CONSOLE: Once<Mutex<MmioSerialPort>> = Once::new();
#[no_mangle] #[no_mangle]
unsafe extern fn _kernel_start() -> ! { unsafe extern fn _kernel_start() -> ! {
SERIAL_CONSOLE.call_once(|| Mutex::new(unsafe { MmioSerialPort::new(0x1000_0000) })); SERIAL_CONSOLE.call_once(|| Mutex::new(unsafe { MmioSerialPort::new(0x1000_0000) }));
crate::logger::init().expect("failed to set logger"); crate::logger::init().expect("failed to set logger");
log::info!("Initialising AKern {}", crate::VERSION); log::info!("Initialising AKern {}", crate::VERSION);
allocator::init(INITIAL_KERNEL_HEAP_START.as_mut_ptr::<u8>(), INITIAL_KERNEL_HEAP_SIZE); allocator::init(INITIAL_KERNEL_HEAP_START.as_mut_ptr::<u8>(), INITIAL_KERNEL_HEAP_SIZE);
memory::init(USABLE_MEMORY_START.into(), USABLE_MEMORY_SIZE / PAGE_SIZE); memory::init(USABLE_MEMORY_START.into(), USABLE_MEMORY_SIZE / PAGE_SIZE);
let mut page_table_addr = PAGE_TABLE.get().unwrap().lock(); let mut page_table_addr = PAGE_TABLE.get().unwrap().lock();
let mut page_table = page_table_addr.as_mut_ptr::<PageTable>().as_mut().unwrap(); let mut page_table = page_table_addr.as_mut_ptr::<PageTable>().as_mut().unwrap();
// Map text (executable) section // Map text (executable) section
page_table.identity_map_range(TEXT_START, TEXT_END, PageEntryFlags::ReadExecute); page_table.identity_map_range(TEXT_START, TEXT_END, PageEntryFlags::ReadExecute);
// Map rodata section // Map rodata section
page_table.identity_map_range(RODATA_START, RODATA_END, PageEntryFlags::Read); page_table.identity_map_range(RODATA_START, RODATA_END, PageEntryFlags::Read);
// Map data section // Map data section
page_table.identity_map_range(DATA_START, DATA_END, PageEntryFlags::ReadWrite); page_table.identity_map_range(DATA_START, DATA_END, PageEntryFlags::ReadWrite);
// Map sdata section // Map sdata section
page_table.identity_map_range(SDATA_START, SDATA_END, PageEntryFlags::ReadWrite); page_table.identity_map_range(SDATA_START, SDATA_END, PageEntryFlags::ReadWrite);
// Map bss section (includes stack and initial kernel heap) // Map bss section (includes stack and initial kernel heap)
page_table.identity_map_range(BSS_START, BSS_END, PageEntryFlags::ReadWrite); page_table.identity_map_range(BSS_START, BSS_END, PageEntryFlags::ReadWrite);
// Map usable memory range (as rw so not executable) // Map usable memory range (as rw so not executable)
page_table.identity_map_range(USABLE_MEMORY_START, USABLE_MEMORY_START + USABLE_MEMORY_SIZE.into(), PageEntryFlags::ReadWrite); page_table.identity_map_range(USABLE_MEMORY_START, USABLE_MEMORY_START + USABLE_MEMORY_SIZE.into(), PageEntryFlags::ReadWrite);
// Map Uart so we can continue using serial // Map Uart so we can continue using serial
page_table.identity_map(0x1000_0000_usize.into(), PageEntryFlags::ReadWrite, PageSize::Size4KiB); page_table.identity_map(0x1000_0000_usize.into(), PageEntryFlags::ReadWrite, PageSize::Size4KiB);
let table_ppn = page_table_addr.as_addr() as usize >> 12; let table_ppn = page_table_addr.as_addr() as usize >> 12;
let satp_value = 8 << 60 | table_ppn; let satp_value = 8 << 60 | table_ppn;
log::info!("Enabling the MMU..."); log::info!("Enabling MMU");
asm!( asm!(
"csrw satp, {}", "csrw satp, {}",
"sfence.vma", "sfence.vma",
in(reg) satp_value, in(reg) satp_value,
); );
log::info!("We're in PAGING LAND!"); crate::kmain::kmain("baka=9", None);
}
#[allow(unreachable_code)]
match system_reset(ResetType::Shutdown, ResetReason::NoReason).unwrap() {} /// Spin loop
} pub fn sloop() -> ! {
loop {
/// Spin loop unsafe { asm!("wfi") }
pub fn sloop() -> ! { }
loop { }
unsafe { asm!("wfi") }
} pub fn log(args: core::fmt::Arguments<'_>) -> core::fmt::Result {
} SERIAL_CONSOLE.get().unwrap().lock().write_fmt(args)
}
pub fn log(args: core::fmt::Arguments<'_>) -> core::fmt::Result {
SERIAL_CONSOLE.get().unwrap().lock().write_fmt(args)
}