From ab26de61f64ccb547bd8edc416a782999bdd7bd0 Mon Sep 17 00:00:00 2001 From: Erin Date: Wed, 26 Jul 2023 12:22:28 +0200 Subject: [PATCH] Fixed memory (un)mapping --- hbvm/fuzz/fuzz_targets/vm.rs | 44 ++++++++++++++++++++++++------------ hbvm/src/mem/mod.rs | 17 +++++++------- 2 files changed, 39 insertions(+), 22 deletions(-) diff --git a/hbvm/fuzz/fuzz_targets/vm.rs b/hbvm/fuzz/fuzz_targets/vm.rs index 3a30dac..e1d4266 100644 --- a/hbvm/fuzz/fuzz_targets/vm.rs +++ b/hbvm/fuzz/fuzz_targets/vm.rs @@ -10,26 +10,42 @@ use { fuzz_target!(|data: &[u8]| { if let Ok(mut vm) = Vm::<_, 16384>::new_validated(data, TestTrapHandler, Default::default()) { - let page = Box::into_raw(Box::::default()); - - unsafe { - vm.memory - .map( - page.cast(), - 0, - hbvm::mem::paging::Permission::Write, - PageSize::Size4K, - ) - .unwrap() - }; + // Alloc and map some memory + let pages = [ + alloc_and_map(&mut vm.memory, 0), + alloc_and_map(&mut vm.memory, 4096), + ]; + // Run VM let _ = vm.run(); - vm.memory.unmap(0).unwrap(); - let _ = unsafe { Box::from_raw(page) }; + // Unmap and dealloc the memory + for (i, page) in pages.into_iter().enumerate() { + unmap_and_dealloc(&mut vm.memory, page, i as u64 * 4096); + } } }); +fn alloc_and_map(memory: &mut Memory, at: u64) -> *mut u8 { + let ptr = Box::into_raw(Box::::default()).cast(); + unsafe { + memory + .map( + ptr, + at, + hbvm::mem::paging::Permission::Write, + PageSize::Size4K, + ) + .unwrap() + }; + ptr +} + +fn unmap_and_dealloc(memory: &mut Memory, ptr: *mut u8, from: u64) { + memory.unmap(from).unwrap(); + let _ = unsafe { Box::from_raw(ptr.cast::()) }; +} + #[repr(align(4096))] struct Page([u8; 4096]); impl Default for Page { diff --git a/hbvm/src/mem/mod.rs b/hbvm/src/mem/mod.rs index 76809bf..d93ef58 100644 --- a/hbvm/src/mem/mod.rs +++ b/hbvm/src/mem/mod.rs @@ -1,7 +1,7 @@ //! Program memory implementation -pub mod paging; pub mod bmc; +pub mod paging; mod pfhandler; @@ -58,13 +58,13 @@ impl Memory { // Decide on what level depth are we going let lookup_depth = match pagesize { - PageSize::Size4K => 4, - PageSize::Size2M => 3, + PageSize::Size4K => 0, + PageSize::Size2M => 1, PageSize::Size1G => 2, }; // Walk pagetable levels - for lvl in (0..lookup_depth).rev() { + for lvl in (lookup_depth..5).rev() { let entry = (*current_pt) .table .get_unchecked_mut(addr_extract_index(target, lvl)); @@ -94,7 +94,7 @@ impl Memory { let node = (*current_pt) .table - .get_unchecked_mut(addr_extract_index(target, 4 - lookup_depth)); + .get_unchecked_mut(addr_extract_index(target, lookup_depth)); // Check if node is not mapped if node.permission() != Permission::Empty { @@ -114,6 +114,7 @@ impl Memory { /// just should be ignored. #[cfg(feature = "alloc")] pub fn unmap(&mut self, addr: u64) -> Result<(), NothingToUnmap> { + extern crate std; let mut current_pt = self.root_pt; let mut page_tables = [core::ptr::null_mut(); 5]; @@ -152,9 +153,7 @@ impl Memory { unsafe { let children = &mut (*(*entry).ptr()).pt.childen; - - // Decrease children count - *children -= 1; + *children -= 1; // Decrease children count // If there are no children, deallocate. if *children == 0 { @@ -162,6 +161,8 @@ impl Memory { // Zero visited entry core::ptr::write_bytes(entry, 0, 1); + } else { + break; } } }