New heap allocator!

This commit is contained in:
Asya 2022-12-22 19:22:11 +03:00
parent 78a5422924
commit 80fd1aaa41
9 changed files with 363 additions and 85 deletions

19
Cargo.lock generated
View file

@ -373,7 +373,6 @@ version = "0.2.0"
dependencies = [
"crossbeam-queue",
"limine",
"linked_list_allocator",
"log",
"slab",
"spin",
@ -401,15 +400,6 @@ version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eac64618e505bab2387986e5b1014f5d344130d609685bbd8b71a10af3ee599d"
[[package]]
name = "linked_list_allocator"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "549ce1740e46b291953c4340adcd74c59bcf4308f4cac050fd33ba91b7168f4a"
dependencies = [
"spinning_top",
]
[[package]]
name = "lock_api"
version = "0.4.7"
@ -755,15 +745,6 @@ dependencies = [
"lock_api",
]
[[package]]
name = "spinning_top"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75adad84ee84b521fb2cca2d4fd0f1dab1d8d026bda3c5bea4ca63b5f9f9293c"
dependencies = [
"lock_api",
]
[[package]]
name = "static_assertions"
version = "1.1.0"

View file

@ -4,7 +4,6 @@ name = "kernel"
version = "0.2.0"
[dependencies]
linked_list_allocator = "0.9"
slab = { version = "0.4", default-features = false }
spin = "0.9"
versioning = { git = "https://git.ablecorp.us/able/aos_userland" }

View file

@ -26,15 +26,15 @@ SECTIONS
*(.text .text.*)
} :text
/* Move to the next memory page for .rodata */
. += CONSTANT(MAXPAGESIZE);
/* Align .rodata to page boundary */
. = ALIGN(4K);
.rodata : {
*(.rodata .rodata.*)
} :rodata
/* Move to the next memory page for .data */
. += CONSTANT(MAXPAGESIZE);
/* Align .data to page boundary */
. = ALIGN(4K);
.data : {
*(.data .data.*)
@ -43,5 +43,11 @@ SECTIONS
.bss : {
*(COMMON)
*(.bss .bss.*)
/* Align initial kernel heap to page boundary */
. = ALIGN(4K);
PROVIDE(_initial_kernel_heap_start = .);
PROVIDE(_initial_kernel_heap_size = 1024 * 1024);
. += _initial_kernel_heap_size;
} :data
}

View file

@ -1,16 +1,342 @@
//! Memory allocator
/*
* This file incorporates work covered by the following license notice:
*
* Copyright (c) 2020, the SerenityOS developers.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
///
pub const HEAP_START: usize = 0x_4444_4444_0000;
use core::{
alloc::{GlobalAlloc, Layout},
mem,
ptr::{self, NonNull},
};
///
pub const HEAP_MULTIPLIER: usize = 100000;
use spin::Mutex;
///
pub const HEAP_BASE: usize = 100;
extern "C" {
fn _initial_kernel_heap_start();
fn _initial_kernel_heap_size();
}
///
pub const HEAP_SIZE: usize = HEAP_BASE * HEAP_MULTIPLIER;
const INITIAL_KERNEL_HEAP_START: *mut u8 = _initial_kernel_heap_start as _;
const INITIAL_KERNEL_HEAP_SIZE: *const () = _initial_kernel_heap_size as _;
struct Allocator(Mutex<Option<Heap>>);
unsafe impl GlobalAlloc for Allocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
let mut lock = self.0.lock();
let allocator = lock.as_mut().expect("heap allocator should be initialized");
match allocator.allocate(layout.size(), layout.align()) {
Some(ptr) => ptr.as_ptr(),
None => ptr::null_mut(),
}
}
unsafe fn dealloc(&self, ptr: *mut u8, _: Layout) {
let mut lock = self.0.lock();
let allocator = lock.as_mut().expect("heap allocator should be initialized");
allocator.deallocate(ptr);
}
}
#[global_allocator]
static ALLOCATOR: Allocator = Allocator(Mutex::new(None));
pub fn init() {
log::info!("Initialising kernel heap allocator");
let memory_size = unsafe { mem::transmute(INITIAL_KERNEL_HEAP_SIZE) };
*ALLOCATOR.0.lock() = Some(unsafe { Heap::new(INITIAL_KERNEL_HEAP_START, memory_size) });
}
// FIXME: these are arch-specific
const CHUNK_SIZE: usize = 16;
const MINIMUM_ALIGNMENT: usize = 8;
struct Header {
size_in_chunks: usize,
}
// compile-time assertions to make sure that AllocationHeader's size is a power of two
// and CHUNK_SIZE is bigger than AllocationHeader's size
const _: () = {
assert!(mem::size_of::<Header>().is_power_of_two());
assert!(CHUNK_SIZE >= mem::size_of::<Header>());
};
/// A first-fit heap allocator, with CHUNK_SIZE chunks and a set size
/// In the future these will become subheaps and the actual heap allocator will create more
/// subheaps as needed
struct Heap {
total_chunks: usize,
allocated_chunks: usize,
chunks: *mut u8,
bitmap: *mut u8,
}
impl Heap {
/// # Safety
/// This function assumes that the pointer given points at a valid memory address
unsafe fn new(memory: *mut u8, memory_size: usize) -> Self {
let total_chunks = Self::calculate_chunks(memory_size);
assert!(total_chunks * CHUNK_SIZE + (total_chunks + 7) / 8 <= memory_size);
Self {
total_chunks,
allocated_chunks: 0,
chunks: memory,
bitmap: unsafe { memory.add(total_chunks * CHUNK_SIZE) },
}
}
fn allocate(&mut self, size: usize, alignment: usize) -> Option<NonNull<u8>> {
assert!(alignment.is_power_of_two());
let alignment = if alignment < MINIMUM_ALIGNMENT {
MINIMUM_ALIGNMENT
} else {
alignment
};
// We need space for the header as well
let size = size + mem::size_of::<Header>();
let chunks_needed = (size + CHUNK_SIZE - 1) / CHUNK_SIZE;
let chunk_alignment = (alignment + CHUNK_SIZE - 1) / CHUNK_SIZE;
log::info!("size: {size} chunks: {chunks_needed} align: {chunk_alignment}");
if chunks_needed + chunk_alignment > self.free_chunks() {
return None;
}
// FIXME: should utilize the alignment directly instead of trying to allocate `size + alignment`
let first_chunk = self.find_first_fit(chunks_needed + chunk_alignment)?;
let chunks_addr = self.chunks as usize;
let addr_unaligned = chunks_addr + first_chunk * CHUNK_SIZE;
// Align the starting address and verify that we haven't gone outside the calculated free area
let addr =
addr_unaligned + alignment - (addr_unaligned + mem::size_of::<Header>()) % alignment;
log::info!(
"Addr unaligned: 0x{addr_unaligned:x} (offset: 0x{:x})",
addr_unaligned - chunks_addr
);
log::info!("Addr: 0x{addr:x} (offset: 0x{:x})", addr - chunks_addr);
let aligned_first_chunk = (addr - chunks_addr) / CHUNK_SIZE;
assert!(first_chunk <= aligned_first_chunk);
assert!(
aligned_first_chunk + chunks_needed <= first_chunk + chunks_needed + chunk_alignment
);
let header: *mut Header = unsafe { mem::transmute(addr) };
unsafe {
(*header).size_in_chunks = chunks_needed;
}
self.bitmap_set_range(aligned_first_chunk, chunks_needed, true);
self.allocated_chunks += chunks_needed;
let ptr: *mut u8 = unsafe { mem::transmute(header.add(1)) };
log::info!("{ptr:p}");
// FIXME: zero or scrub memory?
assert!(ptr.is_aligned_to(alignment));
NonNull::new(ptr)
}
fn deallocate(&mut self, ptr: *mut u8) {
let header = Self::allocation_header(ptr);
let start = (header as usize - self.chunks as usize) / CHUNK_SIZE;
assert!(self.bitmap_get(start));
let size = unsafe { (*header).size_in_chunks };
self.bitmap_set_range(start, size, false);
self.allocated_chunks -= size;
// FIXME: zero or scrub memory?
}
/// Finds first hole that can fit an allocation of `size` chunks, returns the start of the
/// found free chunks
fn find_first_fit(&self, size: usize) -> Option<usize> {
let mut start_of_free_chunks = 0;
let mut free_chunks = 0;
for i in 0..self.total_chunks / usize::BITS as usize {
if free_chunks >= size {
return Some(start_of_free_chunks);
}
let mut bucket = unsafe { *self.bitmap.cast::<usize>().add(i) };
if bucket == usize::MAX {
// Skip over completely full bucket
free_chunks = 0;
continue;
}
if bucket == 0 {
// Skip over completely empty bucket
if free_chunks == 0 {
start_of_free_chunks = i * usize::BITS as usize;
}
free_chunks += usize::BITS as usize;
continue;
}
let mut viewed_bits = 0;
while viewed_bits < usize::BITS as usize {
if bucket == 0 {
if free_chunks == 0 {
start_of_free_chunks = i * usize::BITS as usize + viewed_bits;
}
free_chunks += usize::BITS as usize - viewed_bits;
viewed_bits = usize::BITS as usize;
} else {
let trailing_zeros = bucket.trailing_zeros() as usize;
bucket >>= trailing_zeros;
if free_chunks == 0 {
start_of_free_chunks = i * usize::BITS as usize + viewed_bits;
}
free_chunks += trailing_zeros;
viewed_bits += trailing_zeros;
if free_chunks >= size {
return Some(start_of_free_chunks);
}
let trailing_ones = bucket.trailing_ones() as usize;
bucket >>= trailing_ones;
viewed_bits += trailing_ones;
free_chunks = 0;
}
}
}
if free_chunks >= size {
return Some(start_of_free_chunks);
}
let first_trailing_bit = (self.total_chunks / usize::BITS as usize) * usize::BITS as usize;
let trailing_bits = self.total_chunks % usize::BITS as usize;
for i in 0..trailing_bits {
if self.bitmap_get(first_trailing_bit + i) {
free_chunks = 0;
continue;
}
if free_chunks == 0 {
start_of_free_chunks = first_trailing_bit + i;
}
free_chunks += 1;
if free_chunks >= size {
return Some(start_of_free_chunks);
}
}
None
}
fn bitmap_set_range(&mut self, start: usize, length: usize, value: bool) {
assert!(start + length <= self.total_chunks);
if length == 0 {
return;
}
const BITMASK_FIRST_BYTE: [u8; 8] = [0xFF, 0xFE, 0xFC, 0xF8, 0xF0, 0xE0, 0xC0, 0x80];
const BITMASK_LAST_BYTE: [u8; 8] = [0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F];
let first = unsafe { self.bitmap.add(start / 8) };
let last = unsafe { self.bitmap.add((start + length) / 8) };
let mut byte_mask = BITMASK_FIRST_BYTE[start % 8];
if first == last {
byte_mask &= BITMASK_LAST_BYTE[(start + length) % 8];
if value {
unsafe {
*first |= byte_mask;
}
} else {
unsafe {
*first &= !byte_mask;
}
}
} else {
if value {
unsafe {
*first |= byte_mask;
}
} else {
unsafe {
*first &= !byte_mask;
}
}
byte_mask = BITMASK_LAST_BYTE[(start + length) % 8];
if value {
unsafe {
*last |= byte_mask;
}
} else {
unsafe {
*last &= !byte_mask;
}
}
let first = unsafe { first.add(1) };
if first >= last {
return;
}
if value {
unsafe {
first.write_bytes(0xFF, last.sub_ptr(first));
}
} else {
unsafe {
first.write_bytes(0, last.sub_ptr(first));
}
}
}
}
fn bitmap_get(&self, index: usize) -> bool {
assert!(index < self.total_chunks);
(unsafe { *self.bitmap.add(index / 8) } & (1 << (index % 8))) != 0
}
const fn free_chunks(&self) -> usize {
self.total_chunks - self.allocated_chunks
}
fn allocation_header(ptr: *mut u8) -> *mut Header {
unsafe { mem::transmute::<_, *mut Header>(ptr).sub(1) }
}
const fn calculate_chunks(memory_size: usize) -> usize {
memory_size / (CHUNK_SIZE + 1)
}
}
unsafe impl Send for Heap {}
#[alloc_error_handler]
fn alloc_error_handler(layout: alloc::alloc::Layout) -> ! {

View file

@ -1,46 +0,0 @@
use linked_list_allocator::LockedHeap;
use x86_64::{
structures::paging::{
mapper::MapToError, FrameAllocator, Mapper, Page, PageTableFlags, Size4KiB,
},
VirtAddr,
};
use crate::allocator::{HEAP_SIZE, HEAP_START};
#[global_allocator]
static ALLOCATOR: LockedHeap = LockedHeap::empty();
pub unsafe fn init_alloc() -> Result<(), MapToError<Size4KiB>> {
log::info!("Initialising kernel heap allocator");
let page_range = Page::range_inclusive(
Page::containing_address(VirtAddr::new(HEAP_START as u64)),
Page::containing_address(VirtAddr::new(HEAP_START as u64) + HEAP_SIZE - 1u64),
);
let mut frame_allocator = super::memory::FRAME_ALLOC
.get()
.expect("frame allocator is not initialised")
.lock();
let mut mapper = super::memory::PAGE_TABLE
.get()
.expect("page table is not initialised")
.lock();
for page in page_range {
let frame = frame_allocator
.allocate_frame()
.ok_or(MapToError::FrameAllocationFailed)?;
let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
mapper
.map_to(page, frame, flags, &mut *frame_allocator)?
.flush();
}
ALLOCATOR
.lock()
.init(crate::allocator::HEAP_START, crate::allocator::HEAP_SIZE);
Ok(())
}

View file

@ -3,7 +3,8 @@ use spin::Mutex;
use uart_16550::SerialPort;
use x86_64::VirtAddr;
mod allocator;
use crate::allocator;
mod gdt;
mod interrupts;
mod memory;
@ -37,7 +38,7 @@ unsafe extern "C" fn _kernel_start() -> ! {
.expect("tried to get memory map from Limine"),
);
allocator::init_alloc().expect("tried to initialise allocator");
allocator::init();
gdt::init();
interrupts::init();

View file

@ -4,6 +4,6 @@ pub fn kmain(cmdline: &str, initrd: Option<&'static [u8]>) -> ! {
log::debug!("Entered kmain");
log::info!("Cmdline: \"{cmdline}\"");
let initrd = initrd.expect("no initrd found");
crate::arch::sloop()
}

View file

@ -4,7 +4,9 @@
abi_x86_interrupt,
alloc_error_handler,
panic_info_message,
prelude_import
pointer_is_aligned,
prelude_import,
ptr_sub_ptr
)]
#![no_std]

View file

@ -108,7 +108,10 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
let dbus_conn = zbus::blocking::Connection::system()?;
// Setup loop device
let disk_img = File::options().read(true).write(true).open("./target/disk.img")?;
let disk_img = File::options()
.read(true)
.write(true)
.open("./target/disk.img")?;
let loopdev = udisks::manager::UDisks2ManagerProxyBlocking::new(&dbus_conn)?
.loop_setup(
disk_img.as_raw_fd().into(),
@ -261,7 +264,10 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
// Setup loopback device for disk.img, with partitions
// FIXME: don't do ths if running without changes
// Setup loop device
let disk_img = File::options().read(true).write(true).open("./target/disk.img")?;
let disk_img = File::options()
.read(true)
.write(true)
.open("./target/disk.img")?;
let dbus_conn = zbus::blocking::Connection::system()?;
let loopdev = udisks::manager::UDisks2ManagerProxyBlocking::new(&dbus_conn)?
.loop_setup(
@ -339,7 +345,10 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
// Setup loopback device for disk.img, with partitions
// FIXME: don't do ths if running without changes
let disk_img = File::options().read(true).write(true).open("./target/disk.img")?;
let disk_img = File::options()
.read(true)
.write(true)
.open("./target/disk.img")?;
let dbus_conn = zbus::blocking::Connection::system()?;
let loopdev = udisks::manager::UDisks2ManagerProxyBlocking::new(&dbus_conn)?
.loop_setup(