forked from AbleOS/ableos
changed log stuff and removed unnecessary transmute
This commit is contained in:
parent
eae5979921
commit
ec5b21292e
|
@ -68,8 +68,8 @@ static ALLOCATOR: Allocator = Allocator(Mutex::new(None));
|
||||||
|
|
||||||
pub fn init() {
|
pub fn init() {
|
||||||
log::info!("Initialising kernel heap allocator");
|
log::info!("Initialising kernel heap allocator");
|
||||||
let memory_size = unsafe { mem::transmute(INITIAL_KERNEL_HEAP_SIZE) };
|
*ALLOCATOR.0.lock() =
|
||||||
*ALLOCATOR.0.lock() = Some(unsafe { Heap::new(INITIAL_KERNEL_HEAP_START, memory_size) });
|
Some(unsafe { Heap::new(INITIAL_KERNEL_HEAP_START, INITIAL_KERNEL_HEAP_SIZE as _) });
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: these are arch-specific
|
// FIXME: these are arch-specific
|
||||||
|
@ -123,7 +123,7 @@ impl Heap {
|
||||||
let size = size + mem::size_of::<Header>();
|
let size = size + mem::size_of::<Header>();
|
||||||
let chunks_needed = (size + CHUNK_SIZE - 1) / CHUNK_SIZE;
|
let chunks_needed = (size + CHUNK_SIZE - 1) / CHUNK_SIZE;
|
||||||
let chunk_alignment = (alignment + CHUNK_SIZE - 1) / CHUNK_SIZE;
|
let chunk_alignment = (alignment + CHUNK_SIZE - 1) / CHUNK_SIZE;
|
||||||
log::info!("size: {size} chunks: {chunks_needed} align: {chunk_alignment}");
|
log::debug!("size: {size} chunks: {chunks_needed} align: {chunk_alignment}");
|
||||||
|
|
||||||
if chunks_needed + chunk_alignment > self.free_chunks() {
|
if chunks_needed + chunk_alignment > self.free_chunks() {
|
||||||
return None;
|
return None;
|
||||||
|
@ -137,11 +137,11 @@ impl Heap {
|
||||||
// Align the starting address and verify that we haven't gone outside the calculated free area
|
// Align the starting address and verify that we haven't gone outside the calculated free area
|
||||||
let addr =
|
let addr =
|
||||||
addr_unaligned + alignment - (addr_unaligned + mem::size_of::<Header>()) % alignment;
|
addr_unaligned + alignment - (addr_unaligned + mem::size_of::<Header>()) % alignment;
|
||||||
log::info!(
|
log::debug!(
|
||||||
"Addr unaligned: 0x{addr_unaligned:x} (offset: 0x{:x})",
|
"Addr unaligned: 0x{addr_unaligned:x} (offset: 0x{:x})",
|
||||||
addr_unaligned - chunks_addr
|
addr_unaligned - chunks_addr
|
||||||
);
|
);
|
||||||
log::info!("Addr: 0x{addr:x} (offset: 0x{:x})", addr - chunks_addr);
|
log::trace!("Addr: 0x{addr:x} (offset: 0x{:x})", addr - chunks_addr);
|
||||||
let aligned_first_chunk = (addr - chunks_addr) / CHUNK_SIZE;
|
let aligned_first_chunk = (addr - chunks_addr) / CHUNK_SIZE;
|
||||||
assert!(first_chunk <= aligned_first_chunk);
|
assert!(first_chunk <= aligned_first_chunk);
|
||||||
assert!(
|
assert!(
|
||||||
|
@ -158,7 +158,7 @@ impl Heap {
|
||||||
self.allocated_chunks += chunks_needed;
|
self.allocated_chunks += chunks_needed;
|
||||||
|
|
||||||
let ptr: *mut u8 = unsafe { mem::transmute(header.add(1)) };
|
let ptr: *mut u8 = unsafe { mem::transmute(header.add(1)) };
|
||||||
log::info!("{ptr:p}");
|
log::trace!("{ptr:p}");
|
||||||
// FIXME: zero or scrub memory?
|
// FIXME: zero or scrub memory?
|
||||||
assert!(ptr.is_aligned_to(alignment));
|
assert!(ptr.is_aligned_to(alignment));
|
||||||
NonNull::new(ptr)
|
NonNull::new(ptr)
|
||||||
|
|
Loading…
Reference in a new issue