From cef19a8fe4790efe1770898a55c1f3d2cc24c14a Mon Sep 17 00:00:00 2001
From: Erin <erin@erindesu.cz>
Date: Sun, 19 Mar 2023 13:40:08 +0100
Subject: [PATCH] Formatting, LF and RISC-V kmain

---
 kernel/src/arch/riscv64/memory.rs        | 536 +++++++++++------------
 kernel/src/arch/riscv64/memory_regions.s |  65 ++-
 kernel/src/arch/riscv64/mod.rs           | 183 ++++----
 3 files changed, 388 insertions(+), 396 deletions(-)

diff --git a/kernel/src/arch/riscv64/memory.rs b/kernel/src/arch/riscv64/memory.rs
index 60d9970..b6411d7 100644
--- a/kernel/src/arch/riscv64/memory.rs
+++ b/kernel/src/arch/riscv64/memory.rs
@@ -1,268 +1,268 @@
-use core::num;
-
-use alloc::boxed::Box;
-use spin::{Mutex, Once};
-use crate::memory::{MemoryManager, PhysicalAddress, VirtualAddress};
-
-use super::PAGE_SIZE;
-
-pub enum PageSize {
-    Size4KiB,
-    Size2MiB,
-    Size1GiB,
-    // FIXME: SV48 support
-    // Size512GiB,
-    // FIXME: SV57 support
-    // Size256TiB,
-}
-
-impl PageSize {
-    fn level(&self) -> usize {
-        match self {
-            PageSize::Size4KiB => 0,
-            PageSize::Size2MiB => 1,
-            PageSize::Size1GiB => 2,
-            // FIXME: SV48 and SV57 support
-        }
-    }
-}
-
-pub struct PageTable {
-    entries: [PageEntry; 512]
-}
-
-impl PageTable {
-    /// Walk the page table to convert a virtual address to a physical address.
-    /// If a page fault would occur, this returns None. Otherwise, it returns the physical address.
-    pub fn virt_to_phys(&self, vaddr: VirtualAddress) -> Option<PhysicalAddress> {
-        let vpn = vaddr.vpns();
-
-        let mut v = &self.entries[vpn[2]];
-        for i in (0..=2).rev() {
-            if v.is_invalid() {
-                // This is an invalid entry, page fault.
-                break;
-            } else if v.is_leaf() {
-                // In RISC-V, a leaf can be at any level.
-
-                // The offset mask masks off the PPN. Each PPN is 9 bits and they start at bit #12.
-                // So, our formula 12 + i * 9
-                let off_mask = (1 << (12 + i * 9)) - 1;
-                let vaddr_pgoff = vaddr.as_addr() & off_mask;
-                let addr = ((v.entry() << 2) as usize) & !off_mask;
-                return Some((addr | vaddr_pgoff).into());
-            }
-            // Set v to the next entry which is pointed to by this entry.
-            // However, the address was shifted right by 2 places when stored in the page table
-            // entry, so we shift it left to get it back into place.
-            let entry = v.addr().as_ptr::<PageEntry>();
-            // We do i - 1 here, however we should get None or Some() above
-            // before we do 0 - 1 = -1.
-            v = unsafe { entry.add(vpn[i - 1]).as_ref().unwrap() };
-        }
-
-        // If we get here, we've exhausted all valid tables and haven't
-        // found a leaf.
-        None
-    }
-
-    /// Maps a virtual address to a physical address
-    /// flags should contain only the following:
-    ///   Read, Write, Execute, User, and/or Global
-    /// flags MUST include one or more of the following:
-    ///   Read, Write, Execute
-    /// The valid bit automatically gets added
-    pub fn map(&mut self, vaddr: VirtualAddress, paddr: PhysicalAddress, flags: PageEntryFlags, page_size: PageSize) {
-        assert!(flags as usize & 0xe != 0);
-
-        let vpn = vaddr.vpns();
-        let ppn = paddr.ppns();
-        let level = page_size.level();
-
-        let mut v = &mut self.entries[vpn[2]];
-
-        // Now, we're going to traverse the page table and set the bits properly. We expect the root
-        // to be valid, however we're required to create anything beyond the root
-        for i in (level..2).rev() {
-            if v.is_invalid() {
-                let mut mm = MEMORY_MANAGER.get().unwrap().lock();
-                let page = mm.zallocate_pages(1).unwrap().as_addr();
-                v.set_entry((page as usize >> 2) | PageEntryFlags::Valid as usize);
-            }
-
-            let entry = v.addr().as_mut_ptr::<PageEntry>();
-	        v = unsafe { entry.add(vpn[i]).as_mut().unwrap() };
-        }
-
-        // When we get here, we should be at VPN[0] and v should be pointing to our entry.
-        // The entry structure is Figure 4.18 in the RISC-V Privileged Specification
-        let entry = (ppn[2] << 28) as usize // PPN[2] = [53:28]
-            | (ppn[1] << 19) as usize // PPN[1] = [27:19]
-            | (ppn[0] << 10) as usize // PPN[0] = [18:10]
-            | flags as usize // Specified bits, such as User, Read, Write, etc.
-            | PageEntryFlags::Valid as usize;
-        v.set_entry(entry);
-    }
-
-    /// Identity maps a page of memory
-    pub fn identity_map(&mut self, addr: PhysicalAddress, flags: PageEntryFlags, page_size: PageSize) {
-        // log::debug!("identity mapped {addr}");
-        self.map(addr.as_addr().into(), addr, flags, page_size);
-    }
-
-    /// Identity maps a range of contiguous memory
-    /// This assumes that start <= end
-    pub fn identity_map_range(&mut self, start: PhysicalAddress, end: PhysicalAddress, flags: PageEntryFlags) {
-        log::debug!("start: {start}, end: {end}");
-        let mut mem_addr = start.as_addr() & !(PAGE_SIZE - 1);
-        let num_pages = (align_val(end.as_addr(), 12) - mem_addr - 1) / PAGE_SIZE + 1;
-
-        for _ in 0..num_pages {
-            // FIXME: we can merge these page entries if possible into Size2MiB or larger entries
-            self.identity_map(mem_addr.into(), flags, PageSize::Size4KiB);
-            mem_addr += 1 << 12;
-        }
-    }
-
-    /// Unmaps a page of memory at vaddr
-    pub fn unmap(&mut self, vaddr: VirtualAddress) {
-        let vpn = vaddr.vpns();
-
-        // Now, we're going to traverse the page table and clear the bits
-        let mut v = &mut self.entries[vpn[2]];
-        for i in (0..2).rev() {
-            if v.is_invalid() {
-                // This is an invalid entry, page is already unmapped
-                return;
-            } else if v.is_leaf() {
-                // This is a leaf, which can be at any level
-                // In order to make this page unmapped, we need to clear the entry
-                v.set_entry(0);
-                return;
-            }
-
-            let entry = v.addr().as_mut_ptr::<PageEntry>();
-	        v = unsafe { entry.add(vpn[i]).as_mut().unwrap() };
-        }
-
-        // If we're here this is an unmapped page
-        return;
-    }
-
-    /// Unmaps a range of contiguous memory
-    /// This assumes that start <= end
-    pub fn unmap_range(&mut self, start: VirtualAddress, end: VirtualAddress) {
-        let mut mem_addr = start.as_addr() & !(PAGE_SIZE - 1);
-        let num_pages = (align_val(end.as_addr(), 12) - mem_addr) / PAGE_SIZE;
-
-        for _ in 0..num_pages {
-            self.unmap(mem_addr.into());
-            mem_addr += 1 << 12;
-        }
-    }
-
-    /// Frees all memory associated with a table.
-    /// NOTE: This does NOT free the table directly. This must be freed manually.
-    fn destroy(&mut self) {
-        for entry in &mut self.entries {
-            entry.destroy()
-        }
-    }
-}
-
-#[repr(usize)]
-#[derive(Clone, Copy, Debug)]
-pub enum PageEntryFlags {
-    None = 0,
-    Valid = 1,
-    Read = 1 << 1,
-    Write = 1 << 2,
-    Execute = 1 << 3,
-    User = 1 << 4,
-    Global = 1 << 5,
-    Access = 1 << 6,
-    Dirty = 1 << 7,
-  
-    // for convenience
-    ReadWrite = Self::Read as usize | Self::Write as usize,
-    ReadExecute = Self::Read as usize | Self::Execute as usize,
-    ReadWriteExecute = Self::Read as usize | Self::Write as usize | Self::Execute as usize,
-    UserReadWrite = Self::User as usize | Self::ReadWrite as usize,
-    UserReadExecute = Self::User as usize | Self::ReadExecute as usize,
-    UserReadWriteExecute = Self::User as usize | Self::ReadWriteExecute as usize,
-}
-
-struct PageEntry(usize);
-
-impl PageEntry {
-    fn is_valid(&self) -> bool {
-        self.0 & PageEntryFlags::Valid as usize != 0
-    }
-
-    fn is_invalid(&self) -> bool {
-        !self.is_valid()
-    }
-
-    fn is_leaf(&self) -> bool {
-        self.0 & PageEntryFlags::ReadWriteExecute as usize != 0
-    }
-
-    fn is_branch(&self) -> bool {
-        !self.is_leaf()
-    }
-
-    fn entry(&self) -> usize {
-        self.0
-    }
-
-    fn set_entry(&mut self, entry: usize) {
-        self.0 = entry;
-    }
-
-    fn clear_flag(&mut self, flag: PageEntryFlags) {
-        self.0 &= !(flag as usize);
-    }
-
-    fn set_flag(&mut self, flag: PageEntryFlags) {
-        self.0 |= flag as usize;
-    }
-
-    fn addr(&self) -> PhysicalAddress {
-        ((self.entry() as usize & !0x3ff) << 2).into()
-    }
-
-    fn destroy(&mut self) {
-        if self.is_valid() && self.is_branch() {
-            // This is a valid entry so drill down and free
-            let memaddr = self.addr();
-            let table = memaddr.as_mut_ptr::<PageTable>();
-            unsafe {
-                (*table).destroy();
-                let mut mm = MEMORY_MANAGER.get().unwrap().lock();
-                mm.deallocate_pages(memaddr.into(), 0);
-            }
-        }
-    }
-}
-
-// FIXME: PageTable should be integrated into MemoryManager *somehow*
-pub static MEMORY_MANAGER: Once<Mutex<MemoryManager>> = Once::new();
-pub static PAGE_TABLE: Once<Mutex<PhysicalAddress>> = Once::new();
-
-pub fn init(start_addr: PhysicalAddress, page_count: usize) {
-    let mut memory_manager = MemoryManager::new();
-
-    unsafe {
-        memory_manager.add_range(start_addr, page_count);
-        PAGE_TABLE.call_once(|| Mutex::new(memory_manager.zallocate_pages(0).unwrap()));
-    }
-
-    MEMORY_MANAGER.call_once(|| Mutex::new(memory_manager));
-}
-
-/// Align (set to a multiple of some power of two)
-/// This function always rounds up.
-fn align_val(val: usize, order: usize) -> usize {
-    let o = (1 << order) - 1;
-    (val + o) & !o
-}
+use core::num;
+
+use alloc::boxed::Box;
+use spin::{Mutex, Once};
+use crate::memory::{MemoryManager, PhysicalAddress, VirtualAddress};
+
+use super::PAGE_SIZE;
+
+pub enum PageSize {
+    Size4KiB,
+    Size2MiB,
+    Size1GiB,
+    // FIXME: SV48 support
+    // Size512GiB,
+    // FIXME: SV57 support
+    // Size256TiB,
+}
+
+impl PageSize {
+    fn level(&self) -> usize {
+        match self {
+            PageSize::Size4KiB => 0,
+            PageSize::Size2MiB => 1,
+            PageSize::Size1GiB => 2,
+            // FIXME: SV48 and SV57 support
+        }
+    }
+}
+
+pub struct PageTable {
+    entries: [PageEntry; 512]
+}
+
+impl PageTable {
+    /// Walk the page table to convert a virtual address to a physical address.
+    /// If a page fault would occur, this returns None. Otherwise, it returns the physical address.
+    pub fn virt_to_phys(&self, vaddr: VirtualAddress) -> Option<PhysicalAddress> {
+        let vpn = vaddr.vpns();
+
+        let mut v = &self.entries[vpn[2]];
+        for i in (0..=2).rev() {
+            if v.is_invalid() {
+                // This is an invalid entry, page fault.
+                break;
+            } else if v.is_leaf() {
+                // In RISC-V, a leaf can be at any level.
+
+                // The offset mask masks off the PPN. Each PPN is 9 bits and they start at bit #12.
+                // So, our formula 12 + i * 9
+                let off_mask = (1 << (12 + i * 9)) - 1;
+                let vaddr_pgoff = vaddr.as_addr() & off_mask;
+                let addr = ((v.entry() << 2) as usize) & !off_mask;
+                return Some((addr | vaddr_pgoff).into());
+            }
+            // Set v to the next entry which is pointed to by this entry.
+            // However, the address was shifted right by 2 places when stored in the page table
+            // entry, so we shift it left to get it back into place.
+            let entry = v.addr().as_ptr::<PageEntry>();
+            // We do i - 1 here, however we should get None or Some() above
+            // before we do 0 - 1 = -1.
+            v = unsafe { entry.add(vpn[i - 1]).as_ref().unwrap() };
+        }
+
+        // If we get here, we've exhausted all valid tables and haven't
+        // found a leaf.
+        None
+    }
+
+    /// Maps a virtual address to a physical address
+    /// flags should contain only the following:
+    ///   Read, Write, Execute, User, and/or Global
+    /// flags MUST include one or more of the following:
+    ///   Read, Write, Execute
+    /// The valid bit automatically gets added
+    pub fn map(&mut self, vaddr: VirtualAddress, paddr: PhysicalAddress, flags: PageEntryFlags, page_size: PageSize) {
+        assert!(flags as usize & 0xe != 0);
+
+        let vpn = vaddr.vpns();
+        let ppn = paddr.ppns();
+        let level = page_size.level();
+
+        let mut v = &mut self.entries[vpn[2]];
+
+        // Now, we're going to traverse the page table and set the bits properly. We expect the root
+        // to be valid, however we're required to create anything beyond the root
+        for i in (level..2).rev() {
+            if v.is_invalid() {
+                let mut mm = MEMORY_MANAGER.get().unwrap().lock();
+                let page = mm.zallocate_pages(1).unwrap().as_addr();
+                v.set_entry((page as usize >> 2) | PageEntryFlags::Valid as usize);
+            }
+
+            let entry = v.addr().as_mut_ptr::<PageEntry>();
+	        v = unsafe { entry.add(vpn[i]).as_mut().unwrap() };
+        }
+
+        // When we get here, we should be at VPN[0] and v should be pointing to our entry.
+        // The entry structure is Figure 4.18 in the RISC-V Privileged Specification
+        let entry = (ppn[2] << 28) as usize // PPN[2] = [53:28]
+            | (ppn[1] << 19) as usize // PPN[1] = [27:19]
+            | (ppn[0] << 10) as usize // PPN[0] = [18:10]
+            | flags as usize // Specified bits, such as User, Read, Write, etc.
+            | PageEntryFlags::Valid as usize;
+        v.set_entry(entry);
+    }
+
+    /// Identity maps a page of memory
+    pub fn identity_map(&mut self, addr: PhysicalAddress, flags: PageEntryFlags, page_size: PageSize) {
+        // log::debug!("identity mapped {addr}");
+        self.map(addr.as_addr().into(), addr, flags, page_size);
+    }
+
+    /// Identity maps a range of contiguous memory
+    /// This assumes that start <= end
+    pub fn identity_map_range(&mut self, start: PhysicalAddress, end: PhysicalAddress, flags: PageEntryFlags) {
+        log::debug!("start: {start}, end: {end}");
+        let mut mem_addr = start.as_addr() & !(PAGE_SIZE - 1);
+        let num_pages = (align_val(end.as_addr(), 12) - mem_addr - 1) / PAGE_SIZE + 1;
+
+        for _ in 0..num_pages {
+            // FIXME: we can merge these page entries if possible into Size2MiB or larger entries
+            self.identity_map(mem_addr.into(), flags, PageSize::Size4KiB);
+            mem_addr += 1 << 12;
+        }
+    }
+
+    /// Unmaps a page of memory at vaddr
+    pub fn unmap(&mut self, vaddr: VirtualAddress) {
+        let vpn = vaddr.vpns();
+
+        // Now, we're going to traverse the page table and clear the bits
+        let mut v = &mut self.entries[vpn[2]];
+        for i in (0..2).rev() {
+            if v.is_invalid() {
+                // This is an invalid entry, page is already unmapped
+                return;
+            } else if v.is_leaf() {
+                // This is a leaf, which can be at any level
+                // In order to make this page unmapped, we need to clear the entry
+                v.set_entry(0);
+                return;
+            }
+
+            let entry = v.addr().as_mut_ptr::<PageEntry>();
+	        v = unsafe { entry.add(vpn[i]).as_mut().unwrap() };
+        }
+
+        // If we're here this is an unmapped page
+        return;
+    }
+
+    /// Unmaps a range of contiguous memory
+    /// This assumes that start <= end
+    pub fn unmap_range(&mut self, start: VirtualAddress, end: VirtualAddress) {
+        let mut mem_addr = start.as_addr() & !(PAGE_SIZE - 1);
+        let num_pages = (align_val(end.as_addr(), 12) - mem_addr) / PAGE_SIZE;
+
+        for _ in 0..num_pages {
+            self.unmap(mem_addr.into());
+            mem_addr += 1 << 12;
+        }
+    }
+
+    /// Frees all memory associated with a table.
+    /// NOTE: This does NOT free the table directly. This must be freed manually.
+    fn destroy(&mut self) {
+        for entry in &mut self.entries {
+            entry.destroy()
+        }
+    }
+}
+
+#[repr(usize)]
+#[derive(Clone, Copy, Debug)]
+pub enum PageEntryFlags {
+    None = 0,
+    Valid = 1,
+    Read = 1 << 1,
+    Write = 1 << 2,
+    Execute = 1 << 3,
+    User = 1 << 4,
+    Global = 1 << 5,
+    Access = 1 << 6,
+    Dirty = 1 << 7,
+  
+    // for convenience
+    ReadWrite = Self::Read as usize | Self::Write as usize,
+    ReadExecute = Self::Read as usize | Self::Execute as usize,
+    ReadWriteExecute = Self::Read as usize | Self::Write as usize | Self::Execute as usize,
+    UserReadWrite = Self::User as usize | Self::ReadWrite as usize,
+    UserReadExecute = Self::User as usize | Self::ReadExecute as usize,
+    UserReadWriteExecute = Self::User as usize | Self::ReadWriteExecute as usize,
+}
+
+struct PageEntry(usize);
+
+impl PageEntry {
+    fn is_valid(&self) -> bool {
+        self.0 & PageEntryFlags::Valid as usize != 0
+    }
+
+    fn is_invalid(&self) -> bool {
+        !self.is_valid()
+    }
+
+    fn is_leaf(&self) -> bool {
+        self.0 & PageEntryFlags::ReadWriteExecute as usize != 0
+    }
+
+    fn is_branch(&self) -> bool {
+        !self.is_leaf()
+    }
+
+    fn entry(&self) -> usize {
+        self.0
+    }
+
+    fn set_entry(&mut self, entry: usize) {
+        self.0 = entry;
+    }
+
+    fn clear_flag(&mut self, flag: PageEntryFlags) {
+        self.0 &= !(flag as usize);
+    }
+
+    fn set_flag(&mut self, flag: PageEntryFlags) {
+        self.0 |= flag as usize;
+    }
+
+    fn addr(&self) -> PhysicalAddress {
+        ((self.entry() as usize & !0x3ff) << 2).into()
+    }
+
+    fn destroy(&mut self) {
+        if self.is_valid() && self.is_branch() {
+            // This is a valid entry so drill down and free
+            let memaddr = self.addr();
+            let table = memaddr.as_mut_ptr::<PageTable>();
+            unsafe {
+                (*table).destroy();
+                let mut mm = MEMORY_MANAGER.get().unwrap().lock();
+                mm.deallocate_pages(memaddr.into(), 0);
+            }
+        }
+    }
+}
+
+// FIXME: PageTable should be integrated into MemoryManager *somehow*
+pub static MEMORY_MANAGER: Once<Mutex<MemoryManager>> = Once::new();
+pub static PAGE_TABLE: Once<Mutex<PhysicalAddress>> = Once::new();
+
+pub fn init(start_addr: PhysicalAddress, page_count: usize) {
+    let mut memory_manager = MemoryManager::new();
+
+    unsafe {
+        memory_manager.add_range(start_addr, page_count);
+        PAGE_TABLE.call_once(|| Mutex::new(memory_manager.zallocate_pages(0).unwrap()));
+    }
+
+    MEMORY_MANAGER.call_once(|| Mutex::new(memory_manager));
+}
+
+/// Align (set to a multiple of some power of two)
+/// This function always rounds up.
+fn align_val(val: usize, order: usize) -> usize {
+    let o = (1 << order) - 1;
+    (val + o) & !o
+}
diff --git a/kernel/src/arch/riscv64/memory_regions.s b/kernel/src/arch/riscv64/memory_regions.s
index c4cb76f..efb55d7 100644
--- a/kernel/src/arch/riscv64/memory_regions.s
+++ b/kernel/src/arch/riscv64/memory_regions.s
@@ -1,35 +1,30 @@
-    .section .rodata
-    .global TEXT_START
-TEXT_START: .quad _text_start
-    .global TEXT_END
-TEXT_END: .quad _text_end
-
-    .global RODATA_START
-RODATA_START: .quad _rodata_start
-    .global RODATA_END
-RODATA_END: .quad _rodata_end
-
-    .global DATA_START
-DATA_START: .quad _data_start
-    .global DATA_END
-DATA_END: .quad _data_end
-
-    .global SDATA_START
-SDATA_START: .quad _sdata_start
-    .global SDATA_END
-SDATA_END: .quad _sdata_end
-
-    .global BSS_START
-BSS_START: .quad _bss_start
-    .global BSS_END
-BSS_END: .quad _bss_end
-
-    .global INITIAL_KERNEL_HEAP_START
-INITIAL_KERNEL_HEAP_START: .quad _initial_kernel_heap_start
-    .global INITIAL_KERNEL_HEAP_SIZE
-INITIAL_KERNEL_HEAP_SIZE: .quad _initial_kernel_heap_size
-
-    .global USABLE_MEMORY_START
-USABLE_MEMORY_START: .quad _usable_memory_start
-    .global USABLE_MEMORY_SIZE
-USABLE_MEMORY_SIZE: .quad _usable_memory_size
+.section .rodata
+.global TEXT_START
+.global TEXT_END
+.global RODATA_START
+.global RODATA_END
+.global DATA_START
+.global DATA_END
+.global SDATA_START
+.global SDATA_END
+.global BSS_START
+.global BSS_END
+.global INITIAL_KERNEL_HEAP_START
+.global INITIAL_KERNEL_HEAP_SIZE
+.global USABLE_MEMORY_START
+.global USABLE_MEMORY_SIZE
+
+TEXT_START:					.quad _text_start
+TEXT_END:					.quad _text_end
+RODATA_START:				.quad _rodata_start
+RODATA_END:					.quad _rodata_end
+DATA_START:					.quad _data_start
+DATA_END:					.quad _data_end
+SDATA_START:				.quad _sdata_start
+SDATA_END:					.quad _sdata_end
+BSS_START:					.quad _bss_start
+BSS_END:					.quad _bss_end
+INITIAL_KERNEL_HEAP_START:	.quad _initial_kernel_heap_start
+INITIAL_KERNEL_HEAP_SIZE:	.quad _initial_kernel_heap_size
+USABLE_MEMORY_START:		.quad _usable_memory_start
+USABLE_MEMORY_SIZE:			.quad _usable_memory_size
diff --git a/kernel/src/arch/riscv64/mod.rs b/kernel/src/arch/riscv64/mod.rs
index a557046..8ef40da 100644
--- a/kernel/src/arch/riscv64/mod.rs
+++ b/kernel/src/arch/riscv64/mod.rs
@@ -1,93 +1,90 @@
-mod memory;
-
-use core::{arch::{asm, global_asm}, fmt::Write};
-use alloc::boxed::Box;
-use sbi::system_reset::{ResetType, ResetReason, system_reset};
-use spin::{Mutex, Once};
-use uart_16550::MmioSerialPort;
-
-use crate::{allocator, memory::PhysicalAddress, arch::riscv64::memory::{PAGE_TABLE, PageEntryFlags, PageSize, PageTable}};
-
-global_asm!(include_str!("entry.s"));
-global_asm!(include_str!("memory_regions.s"));
-
-pub const PAGE_SIZE: usize = 4096;
-
-extern {
-    static TEXT_START: PhysicalAddress;
-    static TEXT_END: PhysicalAddress;
-
-    static RODATA_START: PhysicalAddress;
-    static RODATA_END: PhysicalAddress;
-    
-    static DATA_START: PhysicalAddress;
-    static DATA_END: PhysicalAddress;
-
-    static SDATA_START: PhysicalAddress;
-    static SDATA_END: PhysicalAddress;
-    
-    static BSS_START: PhysicalAddress;
-    static BSS_END: PhysicalAddress;
-    
-    static INITIAL_KERNEL_HEAP_START: PhysicalAddress;
-    static INITIAL_KERNEL_HEAP_SIZE: usize;
-    
-    static USABLE_MEMORY_START: PhysicalAddress;
-    static USABLE_MEMORY_SIZE: usize;
-}
-
-static SERIAL_CONSOLE: Once<Mutex<MmioSerialPort>> = Once::new();
-
-#[no_mangle]
-unsafe extern fn _kernel_start() -> ! {
-    SERIAL_CONSOLE.call_once(|| Mutex::new(unsafe { MmioSerialPort::new(0x1000_0000) }));
-    crate::logger::init().expect("failed to set logger");
-    log::info!("Initialising AKern {}", crate::VERSION);
-
-    allocator::init(INITIAL_KERNEL_HEAP_START.as_mut_ptr::<u8>(), INITIAL_KERNEL_HEAP_SIZE);
-    memory::init(USABLE_MEMORY_START.into(), USABLE_MEMORY_SIZE / PAGE_SIZE);
-
-    let mut page_table_addr = PAGE_TABLE.get().unwrap().lock();
-    let mut page_table = page_table_addr.as_mut_ptr::<PageTable>().as_mut().unwrap();
-
-    // Map text (executable) section
-    page_table.identity_map_range(TEXT_START, TEXT_END, PageEntryFlags::ReadExecute);
-    // Map rodata section
-    page_table.identity_map_range(RODATA_START, RODATA_END, PageEntryFlags::Read);
-    // Map data section
-    page_table.identity_map_range(DATA_START, DATA_END, PageEntryFlags::ReadWrite);
-    // Map sdata section
-    page_table.identity_map_range(SDATA_START, SDATA_END, PageEntryFlags::ReadWrite);
-    // Map bss section (includes stack and initial kernel heap)
-    page_table.identity_map_range(BSS_START, BSS_END, PageEntryFlags::ReadWrite);
-    // Map usable memory range (as rw so not executable)
-    page_table.identity_map_range(USABLE_MEMORY_START, USABLE_MEMORY_START + USABLE_MEMORY_SIZE.into(), PageEntryFlags::ReadWrite);
-    // Map Uart so we can continue using serial
-    page_table.identity_map(0x1000_0000_usize.into(), PageEntryFlags::ReadWrite, PageSize::Size4KiB);
-
-    let table_ppn = page_table_addr.as_addr() as usize >> 12;
-    let satp_value = 8 << 60 | table_ppn;
-    log::info!("Enabling the MMU...");
-
-    asm!(
-        "csrw satp, {}",
-        "sfence.vma",
-        in(reg) satp_value,
-    );
-
-    log::info!("We're in PAGING LAND!");
-
-    #[allow(unreachable_code)]
-    match system_reset(ResetType::Shutdown, ResetReason::NoReason).unwrap() {}
-}
-
-/// Spin loop
-pub fn sloop() -> ! {
-    loop {
-        unsafe { asm!("wfi") }
-    }
-}
-
-pub fn log(args: core::fmt::Arguments<'_>) -> core::fmt::Result {
-    SERIAL_CONSOLE.get().unwrap().lock().write_fmt(args)
-}
+mod memory;
+
+use core::{arch::{asm, global_asm}, fmt::Write};
+use alloc::boxed::Box;
+use sbi::system_reset::{ResetType, ResetReason, system_reset};
+use spin::{Mutex, Once};
+use uart_16550::MmioSerialPort;
+
+use crate::{allocator, memory::PhysicalAddress, arch::riscv64::memory::{PAGE_TABLE, PageEntryFlags, PageSize, PageTable}};
+
+global_asm!(include_str!("entry.s"));
+global_asm!(include_str!("memory_regions.s"));
+
+pub const PAGE_SIZE: usize = 4096;
+
+extern {
+    static TEXT_START: PhysicalAddress;
+    static TEXT_END: PhysicalAddress;
+
+    static RODATA_START: PhysicalAddress;
+    static RODATA_END: PhysicalAddress;
+    
+    static DATA_START: PhysicalAddress;
+    static DATA_END: PhysicalAddress;
+
+    static SDATA_START: PhysicalAddress;
+    static SDATA_END: PhysicalAddress;
+    
+    static BSS_START: PhysicalAddress;
+    static BSS_END: PhysicalAddress;
+    
+    static INITIAL_KERNEL_HEAP_START: PhysicalAddress;
+    static INITIAL_KERNEL_HEAP_SIZE: usize;
+    
+    static USABLE_MEMORY_START: PhysicalAddress;
+    static USABLE_MEMORY_SIZE: usize;
+}
+
+static SERIAL_CONSOLE: Once<Mutex<MmioSerialPort>> = Once::new();
+
+#[no_mangle]
+unsafe extern fn _kernel_start() -> ! {
+    SERIAL_CONSOLE.call_once(|| Mutex::new(unsafe { MmioSerialPort::new(0x1000_0000) }));
+    crate::logger::init().expect("failed to set logger");
+    log::info!("Initialising AKern {}", crate::VERSION);
+
+    allocator::init(INITIAL_KERNEL_HEAP_START.as_mut_ptr::<u8>(), INITIAL_KERNEL_HEAP_SIZE);
+    memory::init(USABLE_MEMORY_START.into(), USABLE_MEMORY_SIZE / PAGE_SIZE);
+
+    let mut page_table_addr = PAGE_TABLE.get().unwrap().lock();
+    let mut page_table = page_table_addr.as_mut_ptr::<PageTable>().as_mut().unwrap();
+
+    // Map text (executable) section
+    page_table.identity_map_range(TEXT_START, TEXT_END, PageEntryFlags::ReadExecute);
+    // Map rodata section
+    page_table.identity_map_range(RODATA_START, RODATA_END, PageEntryFlags::Read);
+    // Map data section
+    page_table.identity_map_range(DATA_START, DATA_END, PageEntryFlags::ReadWrite);
+    // Map sdata section
+    page_table.identity_map_range(SDATA_START, SDATA_END, PageEntryFlags::ReadWrite);
+    // Map bss section (includes stack and initial kernel heap)
+    page_table.identity_map_range(BSS_START, BSS_END, PageEntryFlags::ReadWrite);
+    // Map usable memory range (as rw so not executable)
+    page_table.identity_map_range(USABLE_MEMORY_START, USABLE_MEMORY_START + USABLE_MEMORY_SIZE.into(), PageEntryFlags::ReadWrite);
+    // Map Uart so we can continue using serial
+    page_table.identity_map(0x1000_0000_usize.into(), PageEntryFlags::ReadWrite, PageSize::Size4KiB);
+
+    let table_ppn = page_table_addr.as_addr() as usize >> 12;
+    let satp_value = 8 << 60 | table_ppn;
+    log::info!("Enabling MMU");
+
+    asm!(
+        "csrw satp, {}",
+        "sfence.vma",
+        in(reg) satp_value,
+    );
+
+    crate::kmain::kmain("baka=9", None);
+}
+
+/// Spin loop
+pub fn sloop() -> ! {
+    loop {
+        unsafe { asm!("wfi") }
+    }
+}
+
+pub fn log(args: core::fmt::Arguments<'_>) -> core::fmt::Result {
+    SERIAL_CONSOLE.get().unwrap().lock().write_fmt(args)
+}