Compare commits

...

3 Commits

Author SHA1 Message Date
Asya ed5b28d498
wip 2023-02-21 17:51:13 +03:00
Asya 76f3dddbdd
VirtualAddress/PhysicalAddress types 2023-02-13 23:22:42 +03:00
Asya 7d392b408a
Zeroed page allocation 2023-02-11 11:22:46 +03:00
10 changed files with 526 additions and 74 deletions

12
Cargo.lock generated
View File

@ -179,6 +179,17 @@ dependencies = [
"syn",
]
[[package]]
name = "derive_more"
version = "0.99.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "env_logger"
version = "0.10.0"
@ -326,6 +337,7 @@ name = "kernel"
version = "0.2.0"
dependencies = [
"crossbeam-queue",
"derive_more",
"limine",
"log",
"sbi",

View File

@ -15,6 +15,22 @@ version = "0.3"
default-features = false
features = ["alloc"]
[dependencies.derive_more]
version = "0.99"
default-features = false
features = [
"add",
"add_assign",
"constructor",
"display",
"from",
"into",
"mul",
"mul_assign",
"not",
"sum",
]
[target.'cfg(target_arch = "x86_64")'.dependencies]
limine = { version = "0.1", git = "https://github.com/limine-bootloader/limine-rs" }
x86_64 = "0.14"

View File

@ -56,9 +56,8 @@ SECTIONS {
}
/* FIXME: Currently this has to be aligned to PAGE_SIZE << MAX_ORDER */
. = ALIGN(4M);
PROVIDE(_usable_memory_start = .);
PROVIDE(_usable_memory_size = 0x88000000 - .);
PROVIDE(_usable_memory_start = ALIGN(4M));
PROVIDE(_usable_memory_size = 0x88000000 - _usable_memory_start);
/DISCARD/ : {
*(.comment)

View File

@ -58,10 +58,10 @@ unsafe impl GlobalAlloc for Allocator {
#[global_allocator]
static ALLOCATOR: Allocator = Allocator(Mutex::new(None));
// FIXME: umm is `memory` VirtualAddress or PhysicalAddress? both?
pub fn init(memory: *mut u8, memory_size: usize) {
log::info!("Initialising kernel heap allocator");
*ALLOCATOR.0.lock() =
Some(unsafe { Heap::new(memory, memory_size) });
*ALLOCATOR.0.lock() = Some(unsafe { Heap::new(memory, memory_size) });
}
// FIXME: these are arch-specific

View File

@ -0,0 +1,268 @@
use core::num;
use alloc::boxed::Box;
use spin::{Mutex, Once};
use crate::memory::{MemoryManager, PhysicalAddress, VirtualAddress};
use super::PAGE_SIZE;
pub enum PageSize {
Size4KiB,
Size2MiB,
Size1GiB,
// FIXME: SV48 support
// Size512GiB,
// FIXME: SV57 support
// Size256TiB,
}
impl PageSize {
fn level(&self) -> usize {
match self {
PageSize::Size4KiB => 0,
PageSize::Size2MiB => 1,
PageSize::Size1GiB => 2,
// FIXME: SV48 and SV57 support
}
}
}
pub struct PageTable {
entries: [PageEntry; 512]
}
impl PageTable {
/// Walk the page table to convert a virtual address to a physical address.
/// If a page fault would occur, this returns None. Otherwise, it returns the physical address.
pub fn virt_to_phys(&self, vaddr: VirtualAddress) -> Option<PhysicalAddress> {
let vpn = vaddr.vpns();
let mut v = &self.entries[vpn[2]];
for i in (0..=2).rev() {
if v.is_invalid() {
// This is an invalid entry, page fault.
break;
} else if v.is_leaf() {
// In RISC-V, a leaf can be at any level.
// The offset mask masks off the PPN. Each PPN is 9 bits and they start at bit #12.
// So, our formula 12 + i * 9
let off_mask = (1 << (12 + i * 9)) - 1;
let vaddr_pgoff = vaddr.as_addr() & off_mask;
let addr = ((v.entry() << 2) as usize) & !off_mask;
return Some((addr | vaddr_pgoff).into());
}
// Set v to the next entry which is pointed to by this entry.
// However, the address was shifted right by 2 places when stored in the page table
// entry, so we shift it left to get it back into place.
let entry = v.addr().as_ptr::<PageEntry>();
// We do i - 1 here, however we should get None or Some() above
// before we do 0 - 1 = -1.
v = unsafe { entry.add(vpn[i - 1]).as_ref().unwrap() };
}
// If we get here, we've exhausted all valid tables and haven't
// found a leaf.
None
}
/// Maps a virtual address to a physical address
/// flags should contain only the following:
/// Read, Write, Execute, User, and/or Global
/// flags MUST include one or more of the following:
/// Read, Write, Execute
/// The valid bit automatically gets added
pub fn map(&mut self, vaddr: VirtualAddress, paddr: PhysicalAddress, flags: PageEntryFlags, page_size: PageSize) {
assert!(flags as usize & 0xe != 0);
let vpn = vaddr.vpns();
let ppn = paddr.ppns();
let level = page_size.level();
let mut v = &mut self.entries[vpn[2]];
// Now, we're going to traverse the page table and set the bits properly. We expect the root
// to be valid, however we're required to create anything beyond the root
for i in (level..2).rev() {
if v.is_invalid() {
let mut mm = MEMORY_MANAGER.get().unwrap().lock();
let page = mm.zallocate_pages(1).unwrap().as_addr();
v.set_entry((page as usize >> 2) | PageEntryFlags::Valid as usize);
}
let entry = v.addr().as_mut_ptr::<PageEntry>();
v = unsafe { entry.add(vpn[i]).as_mut().unwrap() };
}
// When we get here, we should be at VPN[0] and v should be pointing to our entry.
// The entry structure is Figure 4.18 in the RISC-V Privileged Specification
let entry = (ppn[2] << 28) as usize // PPN[2] = [53:28]
| (ppn[1] << 19) as usize // PPN[1] = [27:19]
| (ppn[0] << 10) as usize // PPN[0] = [18:10]
| flags as usize // Specified bits, such as User, Read, Write, etc.
| PageEntryFlags::Valid as usize;
v.set_entry(entry);
}
/// Identity maps a page of memory
pub fn identity_map(&mut self, addr: PhysicalAddress, flags: PageEntryFlags, page_size: PageSize) {
// log::debug!("identity mapped {addr}");
self.map(addr.as_addr().into(), addr, flags, page_size);
}
/// Identity maps a range of contiguous memory
/// This assumes that start <= end
pub fn identity_map_range(&mut self, start: PhysicalAddress, end: PhysicalAddress, flags: PageEntryFlags) {
log::debug!("start: {start}, end: {end}");
let mut mem_addr = start.as_addr() & !(PAGE_SIZE - 1);
let num_pages = (align_val(end.as_addr(), 12) - mem_addr - 1) / PAGE_SIZE + 1;
for _ in 0..num_pages {
// FIXME: we can merge these page entries if possible into Size2MiB or larger entries
self.identity_map(mem_addr.into(), flags, PageSize::Size4KiB);
mem_addr += 1 << 12;
}
}
/// Unmaps a page of memory at vaddr
pub fn unmap(&mut self, vaddr: VirtualAddress) {
let vpn = vaddr.vpns();
// Now, we're going to traverse the page table and clear the bits
let mut v = &mut self.entries[vpn[2]];
for i in (0..2).rev() {
if v.is_invalid() {
// This is an invalid entry, page is already unmapped
return;
} else if v.is_leaf() {
// This is a leaf, which can be at any level
// In order to make this page unmapped, we need to clear the entry
v.set_entry(0);
return;
}
let entry = v.addr().as_mut_ptr::<PageEntry>();
v = unsafe { entry.add(vpn[i]).as_mut().unwrap() };
}
// If we're here this is an unmapped page
return;
}
/// Unmaps a range of contiguous memory
/// This assumes that start <= end
pub fn unmap_range(&mut self, start: VirtualAddress, end: VirtualAddress) {
let mut mem_addr = start.as_addr() & !(PAGE_SIZE - 1);
let num_pages = (align_val(end.as_addr(), 12) - mem_addr) / PAGE_SIZE;
for _ in 0..num_pages {
self.unmap(mem_addr.into());
mem_addr += 1 << 12;
}
}
/// Frees all memory associated with a table.
/// NOTE: This does NOT free the table directly. This must be freed manually.
fn destroy(&mut self) {
for entry in &mut self.entries {
entry.destroy()
}
}
}
#[repr(usize)]
#[derive(Clone, Copy, Debug)]
pub enum PageEntryFlags {
None = 0,
Valid = 1,
Read = 1 << 1,
Write = 1 << 2,
Execute = 1 << 3,
User = 1 << 4,
Global = 1 << 5,
Access = 1 << 6,
Dirty = 1 << 7,
// for convenience
ReadWrite = Self::Read as usize | Self::Write as usize,
ReadExecute = Self::Read as usize | Self::Execute as usize,
ReadWriteExecute = Self::Read as usize | Self::Write as usize | Self::Execute as usize,
UserReadWrite = Self::User as usize | Self::ReadWrite as usize,
UserReadExecute = Self::User as usize | Self::ReadExecute as usize,
UserReadWriteExecute = Self::User as usize | Self::ReadWriteExecute as usize,
}
struct PageEntry(usize);
impl PageEntry {
fn is_valid(&self) -> bool {
self.0 & PageEntryFlags::Valid as usize != 0
}
fn is_invalid(&self) -> bool {
!self.is_valid()
}
fn is_leaf(&self) -> bool {
self.0 & PageEntryFlags::ReadWriteExecute as usize != 0
}
fn is_branch(&self) -> bool {
!self.is_leaf()
}
fn entry(&self) -> usize {
self.0
}
fn set_entry(&mut self, entry: usize) {
self.0 = entry;
}
fn clear_flag(&mut self, flag: PageEntryFlags) {
self.0 &= !(flag as usize);
}
fn set_flag(&mut self, flag: PageEntryFlags) {
self.0 |= flag as usize;
}
fn addr(&self) -> PhysicalAddress {
((self.entry() as usize & !0x3ff) << 2).into()
}
fn destroy(&mut self) {
if self.is_valid() && self.is_branch() {
// This is a valid entry so drill down and free
let memaddr = self.addr();
let table = memaddr.as_mut_ptr::<PageTable>();
unsafe {
(*table).destroy();
let mut mm = MEMORY_MANAGER.get().unwrap().lock();
mm.deallocate_pages(memaddr.into(), 0);
}
}
}
}
// FIXME: PageTable should be integrated into MemoryManager *somehow*
pub static MEMORY_MANAGER: Once<Mutex<MemoryManager>> = Once::new();
pub static PAGE_TABLE: Once<Mutex<PhysicalAddress>> = Once::new();
pub fn init(start_addr: PhysicalAddress, page_count: usize) {
let mut memory_manager = MemoryManager::new();
unsafe {
memory_manager.add_range(start_addr, page_count);
PAGE_TABLE.call_once(|| Mutex::new(memory_manager.zallocate_pages(0).unwrap()));
}
MEMORY_MANAGER.call_once(|| Mutex::new(memory_manager));
}
/// Align (set to a multiple of some power of two)
/// This function always rounds up.
fn align_val(val: usize, order: usize) -> usize {
let o = (1 << order) - 1;
(val + o) & !o
}

View File

@ -1,10 +1,12 @@
mod memory;
use core::{arch::{asm, global_asm}, fmt::Write};
use alloc::vec;
use alloc::boxed::Box;
use sbi::system_reset::{ResetType, ResetReason, system_reset};
use spin::{Mutex, Once};
use uart_16550::MmioSerialPort;
use crate::{allocator, memory::MemoryManager};
use crate::{allocator, memory::PhysicalAddress, arch::riscv64::memory::{PAGE_TABLE, PageEntryFlags, PageSize, PageTable}};
global_asm!(include_str!("entry.s"));
global_asm!(include_str!("memory_regions.s"));
@ -12,72 +14,71 @@ global_asm!(include_str!("memory_regions.s"));
pub const PAGE_SIZE: usize = 4096;
extern {
static TEXT_START: usize;
static TEXT_END: usize;
static TEXT_START: PhysicalAddress;
static TEXT_END: PhysicalAddress;
static RODATA_START: usize;
static RODATA_END: usize;
static RODATA_START: PhysicalAddress;
static RODATA_END: PhysicalAddress;
static DATA_START: usize;
static DATA_END: usize;
static DATA_START: PhysicalAddress;
static DATA_END: PhysicalAddress;
static SDATA_START: usize;
static SDATA_END: usize;
static SDATA_START: PhysicalAddress;
static SDATA_END: PhysicalAddress;
static BSS_START: usize;
static BSS_END: usize;
static BSS_START: PhysicalAddress;
static BSS_END: PhysicalAddress;
static INITIAL_KERNEL_HEAP_START: *mut u8;
static INITIAL_KERNEL_HEAP_START: PhysicalAddress;
static INITIAL_KERNEL_HEAP_SIZE: usize;
static USABLE_MEMORY_START: usize;
static USABLE_MEMORY_START: PhysicalAddress;
static USABLE_MEMORY_SIZE: usize;
}
static SERIAL_CONSOLE: Once<Mutex<MmioSerialPort>> = Once::new();
mod memory {
use spin::{Mutex, Once};
use crate::memory::MemoryManager;
pub static MEMORY_MANAGER: Once<Mutex<MemoryManager>> = Once::new();
}
#[no_mangle]
extern fn _kernel_start() -> ! {
unsafe extern fn _kernel_start() -> ! {
SERIAL_CONSOLE.call_once(|| Mutex::new(unsafe { MmioSerialPort::new(0x1000_0000) }));
crate::logger::init().expect("failed to set logger");
log::info!("Initialising AKern {}", crate::VERSION);
unsafe {
allocator::init(INITIAL_KERNEL_HEAP_START, INITIAL_KERNEL_HEAP_SIZE);
}
allocator::init(INITIAL_KERNEL_HEAP_START.as_mut_ptr::<u8>(), INITIAL_KERNEL_HEAP_SIZE);
memory::init(USABLE_MEMORY_START.into(), USABLE_MEMORY_SIZE / PAGE_SIZE);
let mut memory_manager = MemoryManager::new();
unsafe {
log::debug!("USABLE_MEMORY_START = 0x{USABLE_MEMORY_START:x}");
memory_manager.add_range(USABLE_MEMORY_START, USABLE_MEMORY_SIZE / PAGE_SIZE);
}
memory::MEMORY_MANAGER.call_once(|| Mutex::new(memory_manager));
let mut page_table_addr = PAGE_TABLE.get().unwrap().lock();
let mut page_table = page_table_addr.as_mut_ptr::<PageTable>().as_mut().unwrap();
unsafe {
let mut mm = memory::MEMORY_MANAGER.get().unwrap().lock();
let alloc_0 = mm.allocate_pages(0).unwrap();
log::debug!("Addr: {:p}", alloc_0);
let alloc_1 = mm.allocate_pages(0).unwrap();
log::debug!("Addr: {:p}", alloc_1);
mm.deallocate_pages(alloc_0, 0);
let alloc_2 = mm.allocate_pages(1).unwrap();
log::debug!("Addr: {:p}", alloc_2);
mm.deallocate_pages(alloc_1, 0);
mm.deallocate_pages(alloc_2, 1);
let alloc_3 = mm.allocate_pages(1).unwrap();
log::debug!("Addr: {:p}", alloc_3);
mm.deallocate_pages(alloc_3, 1);
}
// Map text (executable) section
page_table.identity_map_range(TEXT_START, TEXT_END, PageEntryFlags::ReadExecute);
// Map rodata section
page_table.identity_map_range(RODATA_END, RODATA_END, PageEntryFlags::Read);
// Map data section
page_table.identity_map_range(DATA_START, DATA_END, PageEntryFlags::ReadWrite);
// Map sdata section
page_table.identity_map_range(SDATA_START, SDATA_END, PageEntryFlags::ReadWrite);
// Map bss section (includes stack and initial kernel heap)
page_table.identity_map_range(BSS_START, BSS_END, PageEntryFlags::ReadWrite);
// Map usable memory range (as rw so not executable)
page_table.identity_map_range(USABLE_MEMORY_START, USABLE_MEMORY_START + USABLE_MEMORY_SIZE.into(), PageEntryFlags::ReadWrite);
// Map Uart so we can continue using serial
page_table.identity_map(0x1000_0000_usize.into(), PageEntryFlags::ReadWrite, PageSize::Size4KiB);
system_reset(ResetType::Shutdown, ResetReason::NoReason).unwrap();
loop {}
let table_ppn = page_table_addr.as_addr() as usize >> 12;
let satp_value = 8 << 60 | table_ppn;
log::info!("Enabling the MMU...");
asm!(
"csrw satp, {}",
"sfence.vma",
in(reg) satp_value,
);
log::info!("We're in PAGING LAND!");
#[allow(unreachable_code)]
match system_reset(ResetType::Shutdown, ResetReason::NoReason).unwrap() {}
}
/// Spin loop

View File

@ -46,7 +46,7 @@ pub fn initialize(mmap: &'static [NonNullPtr<LimineMemmapEntry>]) {
let page_count = (entry.len as usize - diff) / PAGE_SIZE;
unsafe {
memory_manager.add_range(start_addr, page_count);
memory_manager.add_range(start_addr.into(), page_count);
}
}

View File

@ -53,16 +53,16 @@ unsafe extern "C" fn _kernel_start() -> ! {
{
let mut mm = MEMORY_MANAGER.get().unwrap().lock();
let alloc_0 = mm.allocate_pages(0).unwrap();
log::debug!("Addr: {:p}", alloc_0);
log::debug!("Addr: {alloc_0}");
let alloc_1 = mm.allocate_pages(0).unwrap();
log::debug!("Addr: {:p}", alloc_1);
log::debug!("Addr: {alloc_1}");
mm.deallocate_pages(alloc_0, 0);
let alloc_2 = mm.allocate_pages(1).unwrap();
log::debug!("Addr: {:p}", alloc_2);
log::debug!("Addr: {alloc_2}");
mm.deallocate_pages(alloc_1, 0);
mm.deallocate_pages(alloc_2, 1);
let alloc_3 = mm.allocate_pages(1).unwrap();
log::debug!("Addr: {:p}", alloc_3);
log::debug!("Addr: {alloc_3}");
mm.deallocate_pages(alloc_3, 1);
}

View File

@ -1,12 +1,153 @@
//! The Memory Manager
use alloc::collections::VecDeque;
use derive_more::*;
pub use crate::arch::PAGE_SIZE;
pub const MAX_ORDER: usize = 10;
#[repr(transparent)]
#[derive(
Add,
AddAssign,
Binary,
BitAnd,
BitAndAssign,
BitOr,
BitOrAssign,
BitXor,
BitXorAssign,
Clone,
Constructor,
Copy,
Display,
Div,
DivAssign,
Eq,
From,
LowerHex,
Mul,
MulAssign,
Not,
Octal,
Ord,
PartialEq,
PartialOrd,
Rem,
RemAssign,
Shl,
ShlAssign,
Shr,
ShrAssign,
Sub,
SubAssign,
Sum,
UpperHex,
)]
#[display(fmt = "0x{:x}", _0)]
#[from(forward)]
pub struct VirtualAddress(usize);
impl VirtualAddress {
#[cfg(target_arch = "riscv64")]
/// Returns an array of Virtual Page Numbers
// FIXME: SV48 and SV57 support
pub fn vpns(&self) -> [usize; 3] {
[
// [20:12]
(self.0 >> 12) & 0x1ff,
// [29:21]
(self.0 >> 21) & 0x1ff,
// [38:30]
(self.0 >> 30) & 0x1ff,
]
}
pub fn as_addr(&self) -> usize {
self.0
}
pub fn as_ptr<T>(&self) -> *const T {
self.0 as _
}
pub fn as_mut_ptr<T>(&mut self) -> *mut T {
self.0 as _
}
}
#[repr(transparent)]
#[derive(
Add,
AddAssign,
Binary,
BitAnd,
BitAndAssign,
BitOr,
BitOrAssign,
BitXor,
BitXorAssign,
Clone,
Constructor,
Copy,
Display,
Div,
DivAssign,
Eq,
From,
LowerHex,
Mul,
MulAssign,
Not,
Octal,
Ord,
PartialEq,
PartialOrd,
Rem,
RemAssign,
Shl,
ShlAssign,
Shr,
ShrAssign,
Sub,
SubAssign,
Sum,
UpperHex,
)]
#[display(fmt = "0x{:x}", _0)]
#[from(forward)]
pub struct PhysicalAddress(usize);
impl PhysicalAddress {
#[cfg(target_arch = "riscv64")]
/// Returns an array of Physical Page Numbers
// FIXME: SV48 and SV57 support
pub fn ppns(&self) -> [usize; 3] {
[
// [20:12]
(self.0 >> 12) & 0x1ff,
// [29:21]
(self.0 >> 21) & 0x1ff,
// [55:30]
(self.0 >> 30) & 0x3ffffff,
]
}
pub fn as_addr(&self) -> usize {
self.0
}
pub fn as_ptr<T>(&self) -> *const T {
self.0 as _
}
pub fn as_mut_ptr<T>(&self) -> *mut T {
self.0 as _
}
}
pub struct MemoryManager {
free_lists: [VecDeque<usize>; MAX_ORDER + 1],
free_lists: [VecDeque<PhysicalAddress>; MAX_ORDER + 1],
}
impl MemoryManager {
@ -16,15 +157,26 @@ impl MemoryManager {
}
}
pub fn allocate_pages(&mut self, order: usize) -> Option<*mut u8> {
self.get_free_pages(order).map(|addr| addr as *mut u8)
// FIXME: this method should take a length and turn that into an order
pub fn allocate_pages(&mut self, order: usize) -> Option<PhysicalAddress> {
self.get_free_pages(order)
}
// FIXME: this method should take a length and turn that into an order
pub fn zallocate_pages(&mut self, order: usize) -> Option<PhysicalAddress> {
let alloc = self.allocate_pages(order)?;
unsafe {
alloc.as_mut_ptr::<u8>().write_bytes(0, PAGE_SIZE << order);
}
Some(alloc)
}
/// # Safety
/// This method assumes that `address` is in range of this allocator
pub unsafe fn deallocate_pages(&mut self, address: *mut u8, order: usize) {
self.free_lists[order].push_front(address as usize);
self.merge_buddies(order, address as usize)
// FIXME: this method should take a length and turn that into an order
pub unsafe fn deallocate_pages(&mut self, address: PhysicalAddress, order: usize) {
self.free_lists[order].push_front(address);
self.merge_buddies(order, address)
}
/// # Safety
@ -32,13 +184,13 @@ impl MemoryManager {
/// a) starts and ends at an address aligned to page boundaries,
/// b) are valid free pages not already added,
/// FIXME: c) starts and ends at an address aligned to `PAGE_SIZE << MAX_ORDER`
pub unsafe fn add_range(&mut self, start_addr: usize, page_count: usize) {
pub unsafe fn add_range(&mut self, start_addr: PhysicalAddress, page_count: usize) {
for i in 0..page_count / 1024 {
self.free_lists[MAX_ORDER].push_back(start_addr + i * 1024 * PAGE_SIZE);
self.free_lists[MAX_ORDER].push_back(start_addr + (i * 1024 * PAGE_SIZE).into());
}
}
fn get_free_pages(&mut self, order: usize) -> Option<usize> {
fn get_free_pages(&mut self, order: usize) -> Option<PhysicalAddress> {
// We can't get such a page!
if order > MAX_ORDER {
return None;
@ -49,18 +201,18 @@ impl MemoryManager {
}
self.get_free_pages(order + 1).map(|addr| {
self.free_lists[order].push_front(addr ^ (PAGE_SIZE << order));
self.free_lists[order].push_front(addr ^ (PAGE_SIZE << order).into());
addr
})
}
fn merge_buddies(&mut self, order: usize, address: usize) {
fn merge_buddies(&mut self, order: usize, address: PhysicalAddress) {
// if we can't have any higher order blocks, we can't merge
if order > MAX_ORDER - 1 {
return;
}
let buddy_address = address ^ (PAGE_SIZE << order);
let buddy_address = address ^ (PAGE_SIZE << order).into();
log::debug!("merge buddy: 0x{buddy_address:x}");
if let Some(buddy_index) = self.free_lists[order]
.iter()
@ -69,7 +221,9 @@ impl MemoryManager {
self.free_lists[order].pop_front();
self.free_lists[order].remove(buddy_index);
let new_address = address.min(buddy_address);
log::debug!("Merging 0x{address:x} @ {order} with 0x{buddy_address:x} at 0x{new_address:x}");
log::debug!(
"Merging 0x{address:x} @ {order} with 0x{buddy_address:x} at 0x{new_address:x}"
);
self.free_lists[order + 1].push_front(new_address);
self.merge_buddies(order + 1, new_address)
}

View File

@ -21,7 +21,7 @@ fn main() -> Result<(), Error> {
}
build(release, target).change_context(Error::Build)
},
}
Some("run" | "r") => {
let mut release = false;
let mut target = Target::X86_64;
@ -113,7 +113,9 @@ fn build(release: bool, target: Target) -> Result<(), Error> {
}
match target {
Target::Riscv64Virt => { com.args(["--target", "targets/riscv64-virt-ableos.json"]); },
Target::Riscv64Virt => {
com.args(["--target", "targets/riscv64-virt-ableos.json"]);
}
_ => {}
}
@ -145,7 +147,7 @@ fn build(release: bool, target: Target) -> Result<(), Error> {
fn run(release: bool, target: Target) -> Result<(), Error> {
let mut com = match target {
Target::X86_64 => Command::new("qemu-system-x86_64"),
Target::Riscv64Virt => Command::new("qemu-system-riscv64")
Target::Riscv64Virt => Command::new("qemu-system-riscv64"),
};
if target == Target::X86_64 {