2022-05-07 07:08:34 -05:00
|
|
|
//! Memory allocator
|
2023-03-30 16:43:04 -05:00
|
|
|
/*
|
|
|
|
* This file incorporates work covered by the following license notice:
|
|
|
|
*
|
|
|
|
* Copyright (c) 2020, the SerenityOS developers.
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright notice, this
|
|
|
|
* list of conditions and the following disclaimer.
|
|
|
|
*
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
|
|
* and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
|
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
|
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
|
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
|
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
2022-05-07 07:08:34 -05:00
|
|
|
|
2023-05-26 06:30:17 -05:00
|
|
|
use {
|
|
|
|
core::{
|
|
|
|
alloc::{GlobalAlloc, Layout},
|
|
|
|
mem,
|
|
|
|
ptr::{self, NonNull},
|
|
|
|
},
|
|
|
|
log::trace,
|
|
|
|
spin::Mutex,
|
2023-03-30 16:43:04 -05:00
|
|
|
};
|
2022-05-07 07:08:34 -05:00
|
|
|
|
2023-03-30 16:43:04 -05:00
|
|
|
struct Allocator(Mutex<Option<Heap>>);
|
2022-05-07 07:08:34 -05:00
|
|
|
|
2023-03-30 16:43:04 -05:00
|
|
|
unsafe impl GlobalAlloc for Allocator {
|
|
|
|
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
|
|
|
let mut lock = self.0.lock();
|
|
|
|
let allocator = lock.as_mut().expect("heap allocator should be initialized");
|
2022-05-07 07:08:34 -05:00
|
|
|
|
2023-03-30 16:43:04 -05:00
|
|
|
match allocator.allocate(layout.size(), layout.align()) {
|
|
|
|
Some(ptr) => ptr.as_ptr(),
|
|
|
|
None => ptr::null_mut(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe fn dealloc(&self, ptr: *mut u8, _: Layout) {
|
|
|
|
let mut lock = self.0.lock();
|
|
|
|
let allocator = lock.as_mut().expect("heap allocator should be initialized");
|
|
|
|
allocator.deallocate(ptr);
|
|
|
|
}
|
|
|
|
}
|
2022-05-07 07:08:34 -05:00
|
|
|
|
|
|
|
#[global_allocator]
|
2023-03-30 16:43:04 -05:00
|
|
|
static ALLOCATOR: Allocator = Allocator(Mutex::new(None));
|
|
|
|
|
|
|
|
// FIXME: umm is `memory` VirtualAddress or PhysicalAddress? both?
|
|
|
|
pub fn init(memory: *mut u8, memory_size: usize) {
|
2023-05-26 06:30:17 -05:00
|
|
|
trace!("Initialising kernel heap allocator");
|
2023-03-30 16:43:04 -05:00
|
|
|
*ALLOCATOR.0.lock() = Some(unsafe { Heap::new(memory, memory_size) });
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: these are arch-specific
|
|
|
|
const CHUNK_SIZE: usize = 16;
|
|
|
|
const MINIMUM_ALIGNMENT: usize = 8;
|
|
|
|
|
|
|
|
struct Header {
|
|
|
|
size_in_chunks: usize,
|
|
|
|
}
|
|
|
|
|
|
|
|
// compile-time assertions to make sure that AllocationHeader's size is a power of two
|
|
|
|
// and CHUNK_SIZE is bigger than AllocationHeader's size
|
|
|
|
const _: () = {
|
|
|
|
assert!(mem::size_of::<Header>().is_power_of_two());
|
|
|
|
assert!(CHUNK_SIZE >= mem::size_of::<Header>());
|
|
|
|
};
|
|
|
|
|
|
|
|
/// A first-fit heap allocator, with CHUNK_SIZE chunks and a set size
|
|
|
|
/// In the future these will become subheaps and the actual heap allocator will create more
|
|
|
|
/// subheaps as needed
|
|
|
|
struct Heap {
|
|
|
|
total_chunks: usize,
|
|
|
|
allocated_chunks: usize,
|
|
|
|
chunks: *mut u8,
|
|
|
|
bitmap: *mut u8,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Heap {
|
|
|
|
/// # Safety
|
|
|
|
/// This function assumes that the pointer given points at a valid memory address
|
|
|
|
unsafe fn new(memory: *mut u8, memory_size: usize) -> Self {
|
|
|
|
let total_chunks = Self::calculate_chunks(memory_size);
|
|
|
|
assert!(total_chunks * CHUNK_SIZE + (total_chunks + 7) / 8 <= memory_size);
|
|
|
|
Self {
|
|
|
|
total_chunks,
|
|
|
|
allocated_chunks: 0,
|
|
|
|
chunks: memory,
|
|
|
|
bitmap: unsafe { memory.add(total_chunks * CHUNK_SIZE) },
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn allocate(&mut self, size: usize, alignment: usize) -> Option<NonNull<u8>> {
|
|
|
|
assert!(alignment.is_power_of_two());
|
|
|
|
let alignment = if alignment < MINIMUM_ALIGNMENT {
|
|
|
|
MINIMUM_ALIGNMENT
|
|
|
|
} else {
|
|
|
|
alignment
|
|
|
|
};
|
|
|
|
|
|
|
|
// We need space for the header as well
|
|
|
|
let size = size + mem::size_of::<Header>();
|
|
|
|
let chunks_needed = (size + CHUNK_SIZE - 1) / CHUNK_SIZE;
|
|
|
|
let chunk_alignment = (alignment + CHUNK_SIZE - 1) / CHUNK_SIZE;
|
|
|
|
|
|
|
|
if chunks_needed + chunk_alignment > self.free_chunks() {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: should utilize the alignment directly instead of trying to allocate `size + alignment`
|
|
|
|
let first_chunk = self.find_first_fit(chunks_needed + chunk_alignment)?;
|
|
|
|
let chunks_addr = self.chunks as usize;
|
|
|
|
let addr_unaligned = chunks_addr + first_chunk * CHUNK_SIZE;
|
|
|
|
|
|
|
|
// Align the starting address and verify that we haven't gone outside the calculated free area
|
|
|
|
let addr =
|
|
|
|
addr_unaligned + alignment - (addr_unaligned + mem::size_of::<Header>()) % alignment;
|
|
|
|
let aligned_first_chunk = (addr - chunks_addr) / CHUNK_SIZE;
|
|
|
|
assert!(first_chunk <= aligned_first_chunk);
|
|
|
|
assert!(
|
|
|
|
aligned_first_chunk + chunks_needed <= first_chunk + chunks_needed + chunk_alignment
|
|
|
|
);
|
|
|
|
|
|
|
|
let header = addr as *mut Header;
|
|
|
|
unsafe {
|
|
|
|
(*header).size_in_chunks = chunks_needed;
|
|
|
|
}
|
|
|
|
|
|
|
|
self.bitmap_set_range(aligned_first_chunk, chunks_needed, true);
|
|
|
|
|
|
|
|
self.allocated_chunks += chunks_needed;
|
|
|
|
|
|
|
|
let ptr: *mut u8 = unsafe { mem::transmute(header.add(1)) };
|
2023-05-26 06:30:17 -05:00
|
|
|
{
|
|
|
|
#[cfg(debug_assertions)]
|
|
|
|
trace!("Allocating {:?}", ptr);
|
|
|
|
}
|
2023-05-28 02:01:11 -05:00
|
|
|
// FIXME: zero out memory to prevent leaking data
|
|
|
|
|
2023-03-30 16:43:04 -05:00
|
|
|
assert!(ptr.is_aligned_to(alignment));
|
|
|
|
NonNull::new(ptr)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn deallocate(&mut self, ptr: *mut u8) {
|
2023-05-26 06:30:17 -05:00
|
|
|
{
|
|
|
|
#[cfg(debug_assertions)]
|
|
|
|
log::trace!("Deallocating {:?}", ptr);
|
|
|
|
}
|
2023-03-30 16:43:04 -05:00
|
|
|
let header = Self::allocation_header(ptr);
|
|
|
|
let start = (header as usize - self.chunks as usize) / CHUNK_SIZE;
|
|
|
|
assert!(self.bitmap_get(start));
|
|
|
|
let size = unsafe { (*header).size_in_chunks };
|
|
|
|
self.bitmap_set_range(start, size, false);
|
|
|
|
self.allocated_chunks -= size;
|
2023-05-28 02:01:11 -05:00
|
|
|
// FIXME: zero out memory to prevent leaking data
|
2023-03-30 16:43:04 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Finds first hole that can fit an allocation of `size` chunks, returns the start of the
|
|
|
|
/// found free chunks
|
|
|
|
fn find_first_fit(&self, size: usize) -> Option<usize> {
|
|
|
|
let mut start_of_free_chunks = 0;
|
|
|
|
let mut free_chunks = 0;
|
|
|
|
for i in 0..self.total_chunks / usize::BITS as usize {
|
|
|
|
if free_chunks >= size {
|
|
|
|
return Some(start_of_free_chunks);
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut bucket = unsafe { *self.bitmap.cast::<usize>().add(i) };
|
|
|
|
if bucket == usize::MAX {
|
|
|
|
// Skip over completely full bucket
|
|
|
|
free_chunks = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if bucket == 0 {
|
|
|
|
// Skip over completely empty bucket
|
|
|
|
if free_chunks == 0 {
|
|
|
|
start_of_free_chunks = i * usize::BITS as usize;
|
|
|
|
}
|
|
|
|
|
|
|
|
free_chunks += usize::BITS as usize;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut viewed_bits = 0;
|
|
|
|
while viewed_bits < usize::BITS as usize {
|
|
|
|
if bucket == 0 {
|
|
|
|
if free_chunks == 0 {
|
|
|
|
start_of_free_chunks = i * usize::BITS as usize + viewed_bits;
|
|
|
|
}
|
|
|
|
|
|
|
|
free_chunks += usize::BITS as usize - viewed_bits;
|
|
|
|
viewed_bits = usize::BITS as usize;
|
|
|
|
} else {
|
|
|
|
let trailing_zeros = bucket.trailing_zeros() as usize;
|
|
|
|
bucket >>= trailing_zeros;
|
|
|
|
|
|
|
|
if free_chunks == 0 {
|
|
|
|
start_of_free_chunks = i * usize::BITS as usize + viewed_bits;
|
|
|
|
}
|
|
|
|
|
|
|
|
free_chunks += trailing_zeros;
|
|
|
|
viewed_bits += trailing_zeros;
|
|
|
|
|
|
|
|
if free_chunks >= size {
|
|
|
|
return Some(start_of_free_chunks);
|
|
|
|
}
|
|
|
|
|
|
|
|
let trailing_ones = bucket.trailing_ones() as usize;
|
|
|
|
bucket >>= trailing_ones;
|
|
|
|
viewed_bits += trailing_ones;
|
|
|
|
free_chunks = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if free_chunks >= size {
|
|
|
|
return Some(start_of_free_chunks);
|
|
|
|
}
|
|
|
|
|
|
|
|
let first_trailing_bit = (self.total_chunks / usize::BITS as usize) * usize::BITS as usize;
|
|
|
|
let trailing_bits = self.total_chunks % usize::BITS as usize;
|
|
|
|
for i in 0..trailing_bits {
|
|
|
|
if self.bitmap_get(first_trailing_bit + i) {
|
|
|
|
free_chunks = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if free_chunks == 0 {
|
|
|
|
start_of_free_chunks = first_trailing_bit + i;
|
|
|
|
}
|
|
|
|
|
|
|
|
free_chunks += 1;
|
|
|
|
if free_chunks >= size {
|
|
|
|
return Some(start_of_free_chunks);
|
|
|
|
}
|
|
|
|
}
|
2023-05-28 02:01:11 -05:00
|
|
|
#[cfg(debug_assertions)]
|
|
|
|
{
|
|
|
|
trace!("No first fit found");
|
|
|
|
}
|
2023-03-30 16:43:04 -05:00
|
|
|
None
|
|
|
|
}
|
|
|
|
|
|
|
|
fn bitmap_set_range(&mut self, start: usize, length: usize, value: bool) {
|
|
|
|
assert!(start + length <= self.total_chunks);
|
|
|
|
if length == 0 {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
const BITMASK_FIRST_BYTE: [u8; 8] = [0xFF, 0xFE, 0xFC, 0xF8, 0xF0, 0xE0, 0xC0, 0x80];
|
|
|
|
const BITMASK_LAST_BYTE: [u8; 8] = [0, 1, 3, 7, 0xF, 0x1F, 0x3F, 0x7F];
|
|
|
|
let first = unsafe { self.bitmap.add(start / 8) };
|
|
|
|
let last = unsafe { self.bitmap.add((start + length) / 8) };
|
|
|
|
let mut byte_mask = BITMASK_FIRST_BYTE[start % 8];
|
|
|
|
if first == last {
|
|
|
|
byte_mask &= BITMASK_LAST_BYTE[(start + length) % 8];
|
|
|
|
if value {
|
|
|
|
unsafe {
|
|
|
|
*first |= byte_mask;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
unsafe {
|
|
|
|
*first &= !byte_mask;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if value {
|
|
|
|
unsafe {
|
|
|
|
*first |= byte_mask;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
unsafe {
|
|
|
|
*first &= !byte_mask;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
byte_mask = BITMASK_LAST_BYTE[(start + length) % 8];
|
|
|
|
if value {
|
|
|
|
unsafe {
|
|
|
|
*last |= byte_mask;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
unsafe {
|
|
|
|
*last &= !byte_mask;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let first = unsafe { first.add(1) };
|
|
|
|
if first >= last {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if value {
|
|
|
|
unsafe {
|
|
|
|
first.write_bytes(0xFF, last.sub_ptr(first));
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
unsafe {
|
|
|
|
first.write_bytes(0, last.sub_ptr(first));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn bitmap_get(&self, index: usize) -> bool {
|
|
|
|
assert!(index < self.total_chunks);
|
|
|
|
(unsafe { *self.bitmap.add(index / 8) } & (1 << (index % 8))) != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
const fn free_chunks(&self) -> usize {
|
|
|
|
self.total_chunks - self.allocated_chunks
|
|
|
|
}
|
|
|
|
|
|
|
|
fn allocation_header(ptr: *mut u8) -> *mut Header {
|
|
|
|
unsafe { mem::transmute::<_, *mut Header>(ptr).sub(1) }
|
|
|
|
}
|
|
|
|
|
|
|
|
const fn calculate_chunks(memory_size: usize) -> usize {
|
|
|
|
memory_size / (CHUNK_SIZE + 1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe impl Send for Heap {}
|
2022-05-07 07:08:34 -05:00
|
|
|
|
|
|
|
#[alloc_error_handler]
|
2023-05-28 02:01:11 -05:00
|
|
|
fn alloc_error_handler(layout: alloc::alloc::Layout) -> ! {
|
|
|
|
log::error!("allocation error: {:?}", layout);
|
|
|
|
// Todo: Maybe panic here instead
|
2023-07-15 07:47:46 -05:00
|
|
|
crate::arch::spin_loop()
|
2022-05-07 07:08:34 -05:00
|
|
|
}
|