diff --git a/kernel/src/allocator.rs b/kernel/src/allocator.rs index 5b357b7..67dd39b 100644 --- a/kernel/src/allocator.rs +++ b/kernel/src/allocator.rs @@ -68,8 +68,8 @@ static ALLOCATOR: Allocator = Allocator(Mutex::new(None)); pub fn init() { log::info!("Initialising kernel heap allocator"); - let memory_size = unsafe { mem::transmute(INITIAL_KERNEL_HEAP_SIZE) }; - *ALLOCATOR.0.lock() = Some(unsafe { Heap::new(INITIAL_KERNEL_HEAP_START, memory_size) }); + *ALLOCATOR.0.lock() = + Some(unsafe { Heap::new(INITIAL_KERNEL_HEAP_START, INITIAL_KERNEL_HEAP_SIZE as _) }); } // FIXME: these are arch-specific @@ -123,7 +123,7 @@ impl Heap { let size = size + mem::size_of::
(); let chunks_needed = (size + CHUNK_SIZE - 1) / CHUNK_SIZE; let chunk_alignment = (alignment + CHUNK_SIZE - 1) / CHUNK_SIZE; - log::info!("size: {size} chunks: {chunks_needed} align: {chunk_alignment}"); + log::debug!("size: {size} chunks: {chunks_needed} align: {chunk_alignment}"); if chunks_needed + chunk_alignment > self.free_chunks() { return None; @@ -137,11 +137,11 @@ impl Heap { // Align the starting address and verify that we haven't gone outside the calculated free area let addr = addr_unaligned + alignment - (addr_unaligned + mem::size_of::
()) % alignment; - log::info!( + log::debug!( "Addr unaligned: 0x{addr_unaligned:x} (offset: 0x{:x})", addr_unaligned - chunks_addr ); - log::info!("Addr: 0x{addr:x} (offset: 0x{:x})", addr - chunks_addr); + log::trace!("Addr: 0x{addr:x} (offset: 0x{:x})", addr - chunks_addr); let aligned_first_chunk = (addr - chunks_addr) / CHUNK_SIZE; assert!(first_chunk <= aligned_first_chunk); assert!( @@ -158,7 +158,7 @@ impl Heap { self.allocated_chunks += chunks_needed; let ptr: *mut u8 = unsafe { mem::transmute(header.add(1)) }; - log::info!("{ptr:p}"); + log::trace!("{ptr:p}"); // FIXME: zero or scrub memory? assert!(ptr.is_aligned_to(alignment)); NonNull::new(ptr)