diff --git a/hbvm/src/bmc.rs b/hbvm/src/bmc.rs index 21d30b6..4a7919b 100644 --- a/hbvm/src/bmc.rs +++ b/hbvm/src/bmc.rs @@ -43,17 +43,19 @@ impl BlockCopier { /// - Same as for [`Memory::load`] and [`Memory::store`] pub unsafe fn poll(&mut self, memory: &mut impl Memory) -> Poll> { // Safety: Assuming uninit of array of MaybeUninit is sound - let mut buf = AlignedBuf(MaybeUninit::uninit().assume_init()); + let mut buf = AlignedBuf(unsafe { MaybeUninit::uninit().assume_init() }); // We have at least one buffer size to copy if self.n_buffers != 0 { - if let Err(e) = act( - memory, - self.src, - self.dst, - buf.0.as_mut_ptr().cast(), - BUF_SIZE, - ) { + if let Err(e) = unsafe { + act( + memory, + self.src, + self.dst, + buf.0.as_mut_ptr().cast(), + BUF_SIZE, + ) + } { return Poll::Ready(Err(e)); } @@ -73,13 +75,15 @@ impl BlockCopier { } if self.rem != 0 { - if let Err(e) = act( - memory, - self.src, - self.dst, - buf.0.as_mut_ptr().cast(), - self.rem, - ) { + if let Err(e) = unsafe { + act( + memory, + self.src, + self.dst, + buf.0.as_mut_ptr().cast(), + self.rem, + ) + } { return Poll::Ready(Err(e)); } } @@ -97,21 +101,23 @@ unsafe fn act( buf: *mut u8, count: usize, ) -> Result<(), BlkCopyError> { - // Load to buffer - memory - .load(src, buf, count) - .map_err(|super::mem::LoadError(addr)| BlkCopyError { - access_reason: MemoryAccessReason::Load, - addr, - })?; + unsafe { + // Load to buffer + memory + .load(src, buf, count) + .map_err(|super::mem::LoadError(addr)| BlkCopyError { + access_reason: MemoryAccessReason::Load, + addr, + })?; - // Store from buffer - memory - .store(dst, buf, count) - .map_err(|super::mem::StoreError(addr)| BlkCopyError { - access_reason: MemoryAccessReason::Store, - addr, - })?; + // Store from buffer + memory + .store(dst, buf, count) + .map_err(|super::mem::StoreError(addr)| BlkCopyError { + access_reason: MemoryAccessReason::Store, + addr, + })?; + } Ok(()) } diff --git a/hbvm/src/float/aarch64.rs b/hbvm/src/float/aarch64.rs index 034105d..1d14d7f 100644 --- a/hbvm/src/float/aarch64.rs +++ b/hbvm/src/float/aarch64.rs @@ -46,7 +46,7 @@ unsafe fn set_rounding_mode(mode: RoundingMode) { } let fpcr: u64; - asm!("mrs {}, fpcr", out(reg) fpcr); + unsafe { asm!("mrs {}, fpcr", out(reg) fpcr) }; let fpcr = fpcr & !(0b11 << 22) | (match mode { @@ -56,7 +56,7 @@ unsafe fn set_rounding_mode(mode: RoundingMode) { RoundingMode::Down => 0b10, }) << 22; - asm!("msr fpcr, {}", in(reg) fpcr); + unsafe { asm!("msr fpcr, {}", in(reg) fpcr) }; } #[inline(always)] diff --git a/hbvm/src/float/x86_64.rs b/hbvm/src/float/x86_64.rs index fd83bf0..468aabe 100644 --- a/hbvm/src/float/x86_64.rs +++ b/hbvm/src/float/x86_64.rs @@ -56,12 +56,14 @@ fnsdef! { /// [`default_rounding_mode`], you have to rely on inline assembly #[inline(always)] unsafe fn set_rounding_mode(mode: RoundingMode) { - arin::_MM_SET_ROUNDING_MODE(match mode { - RoundingMode::NearestEven => return, - RoundingMode::Truncate => arin::_MM_ROUND_TOWARD_ZERO, - RoundingMode::Up => arin::_MM_ROUND_UP, - RoundingMode::Down => arin::_MM_ROUND_DOWN, - }) + unsafe { + arin::_MM_SET_ROUNDING_MODE(match mode { + RoundingMode::NearestEven => return, + RoundingMode::Truncate => arin::_MM_ROUND_TOWARD_ZERO, + RoundingMode::Up => arin::_MM_ROUND_UP, + RoundingMode::Down => arin::_MM_ROUND_DOWN, + }) + } } #[inline(always)] diff --git a/hbvm/src/lib.rs b/hbvm/src/lib.rs index 9a6c034..e16c318 100644 --- a/hbvm/src/lib.rs +++ b/hbvm/src/lib.rs @@ -12,6 +12,7 @@ #![no_std] #![cfg_attr(feature = "nightly", feature(fn_align))] +#![deny(unsafe_op_in_unsafe_fn)] #[cfg(feature = "alloc")] extern crate alloc; diff --git a/hbvm/src/mem/softpaging/icache.rs b/hbvm/src/mem/softpaging/icache.rs index 963970a..84f4c33 100644 --- a/hbvm/src/mem/softpaging/icache.rs +++ b/hbvm/src/mem/softpaging/icache.rs @@ -48,14 +48,14 @@ impl ICache { let pbase = self .data - .or_else(|| self.fetch_page(self.base + self.size, root_pt))?; + .or_else(|| unsafe { self.fetch_page(self.base + self.size, root_pt) })?; // Get address base let base = addr.map(|x| x & self.mask); // Base not matching, fetch anew if base != self.base { - self.fetch_page(base, root_pt)?; + unsafe { self.fetch_page(base, root_pt) }?; }; let offset = addr.get() & !self.mask; @@ -68,25 +68,27 @@ impl ICache { let first_copy = requ_size.saturating_sub(rem); // Copy non-overflowing part - copy_nonoverlapping(pbase.as_ptr(), ret.as_mut_ptr().cast::(), first_copy); + unsafe { copy_nonoverlapping(pbase.as_ptr(), ret.as_mut_ptr().cast::(), first_copy) }; // Copy overflow if rem != 0 { - let pbase = self.fetch_page(self.base + self.size, root_pt)?; + let pbase = unsafe { self.fetch_page(self.base + self.size, root_pt) }?; // Unlikely, unsupported scenario if rem > self.size as _ { return None; } - copy_nonoverlapping( - pbase.as_ptr(), - ret.as_mut_ptr().cast::().add(first_copy), - rem, - ); + unsafe { + copy_nonoverlapping( + pbase.as_ptr(), + ret.as_mut_ptr().cast::().add(first_copy), + rem, + ) + }; } - Some(ret.assume_init()) + Some(unsafe { ret.assume_init() }) } /// Fetch a page diff --git a/hbvm/src/mem/softpaging/mapping.rs b/hbvm/src/mem/softpaging/mapping.rs index 67f65a1..3f131f4 100644 --- a/hbvm/src/mem/softpaging/mapping.rs +++ b/hbvm/src/mem/softpaging/mapping.rs @@ -36,9 +36,11 @@ impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> { // Walk pagetable levels for lvl in (lookup_depth + 1..5).rev() { - let entry = (*current_pt) - .table - .get_unchecked_mut(addr_extract_index(target, lvl)); + let entry = unsafe { + (*current_pt) + .table + .get_unchecked_mut(addr_extract_index(target, lvl)) + }; let ptr = entry.ptr(); match entry.permission() { @@ -46,13 +48,13 @@ impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> { // No worries! Let's create one (allocates). Permission::Empty => { // Increase children count - (*current_pt).childen += 1; + unsafe { *current_pt }.childen += 1; let table = Box::into_raw(Box::new(PtPointedData { pt: PageTable::default(), })); - core::ptr::write(entry, PtEntry::new(table, Permission::Node)); + unsafe { core::ptr::write(entry, PtEntry::new(table, Permission::Node)) }; current_pt = table as _; } // Continue walking @@ -63,9 +65,11 @@ impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> { } } - let node = (*current_pt) - .table - .get_unchecked_mut(addr_extract_index(target, lookup_depth)); + let node = unsafe { + (*current_pt) + .table + .get_unchecked_mut(addr_extract_index(target, lookup_depth)) + }; // Check if node is not mapped if node.permission() != Permission::Empty { @@ -73,8 +77,10 @@ impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> { } // Write entry - (*current_pt).childen += 1; - core::ptr::write(node, PtEntry::new(host.cast(), perm)); + unsafe { + (*current_pt).childen += 1; + core::ptr::write(node, PtEntry::new(host.cast(), perm)); + } Ok(()) } diff --git a/hbvm/src/mem/softpaging/mod.rs b/hbvm/src/mem/softpaging/mod.rs index 3e1f6de..6696ead 100644 --- a/hbvm/src/mem/softpaging/mod.rs +++ b/hbvm/src/mem/softpaging/mod.rs @@ -51,7 +51,7 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> Memory target, count, perm_check::readable, - |src, dst, count| core::ptr::copy_nonoverlapping(src, dst, count), + |src, dst, count| unsafe { core::ptr::copy_nonoverlapping(src, dst, count) }, ) .map_err(LoadError) } @@ -72,7 +72,7 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> Memory source.cast_mut(), count, perm_check::writable, - |dst, src, count| core::ptr::copy_nonoverlapping(src, dst, count), + |dst, src, count| unsafe { core::ptr::copy_nonoverlapping(src, dst, count) }, ) .map_err(StoreError) } @@ -80,16 +80,14 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> Memory #[inline(always)] unsafe fn prog_read(&mut self, addr: Address) -> T { if OUT_PROG_EXEC && addr.truncate_usize() > self.program.len() { - return self - .icache - .fetch::(addr, self.root_pt) + return unsafe { self.icache.fetch::(addr, self.root_pt) } .unwrap_or_else(|| unsafe { core::mem::zeroed() }); } let addr = addr.truncate_usize(); self.program .get(addr..addr + size_of::()) - .map(|x| x.as_ptr().cast::().read()) + .map(|x| unsafe { x.as_ptr().cast::().read() }) .unwrap_or_else(|| unsafe { core::mem::zeroed() }) } } diff --git a/hbvm/src/vmrun.rs b/hbvm/src/vmrun.rs index d5bb3bf..d2e7f77 100644 --- a/hbvm/src/vmrun.rs +++ b/hbvm/src/vmrun.rs @@ -17,7 +17,8 @@ use { macro_rules! handler { ($self:expr, |$ty:ident ($($ident:pat),* $(,)?)| $expr:expr) => {{ - let $ty($($ident),*) = $self.decode::<$ty>(); + #[allow(unused_unsafe)] + let $ty($($ident),*) = unsafe { $self.decode::<$ty>() }; #[allow(clippy::no_effect)] let e = $expr; $self.bump_pc::<$ty>(); e @@ -383,7 +384,7 @@ where /// Decode instruction operands #[inline(always)] unsafe fn decode(&mut self) -> T { - self.memory.prog_read::(self.pc + 1_u64) + unsafe { self.memory.prog_read::(self.pc + 1_u64) } } /// Load @@ -400,14 +401,16 @@ where _ => 0, }; - self.memory.load( - self.ldst_addr_uber(dst, base, offset, count, n)?, - self.registers - .as_mut_ptr() - .add(usize::from(dst) + usize::from(n)) - .cast(), - usize::from(count).saturating_sub(n.into()), - )?; + unsafe { + self.memory.load( + self.ldst_addr_uber(dst, base, offset, count, n)?, + self.registers + .as_mut_ptr() + .add(usize::from(dst) + usize::from(n)) + .cast(), + usize::from(count).saturating_sub(n.into()), + ) + }?; Ok(()) } @@ -421,11 +424,13 @@ where offset: u64, count: u16, ) -> Result<(), VmRunError> { - self.memory.store( - self.ldst_addr_uber(dst, base, offset, count, 0)?, - self.registers.as_ptr().add(usize::from(dst)).cast(), - count.into(), - )?; + unsafe { + self.memory.store( + self.ldst_addr_uber(dst, base, offset, count, 0)?, + self.registers.as_ptr().add(usize::from(dst)).cast(), + count.into(), + ) + }?; Ok(()) } @@ -438,7 +443,7 @@ where /// Perform binary operating over two registers #[inline(always)] unsafe fn binary_op(&mut self, op: impl Fn(T, T) -> T) { - let OpsRRR(tg, a0, a1) = self.decode(); + let OpsRRR(tg, a0, a1) = unsafe { self.decode() }; self.write_reg( tg, op(self.read_reg(a0).cast::(), self.read_reg(a1).cast::()), @@ -453,7 +458,7 @@ where #[repr(packed)] struct OpsRRImm(OpsRR, I); - let OpsRRImm::(OpsRR(tg, reg), imm) = self.decode(); + let OpsRRImm::(OpsRR(tg, reg), imm) = unsafe { self.decode() }; self.write_reg(tg, op(self.read_reg(reg).cast::(), imm)); self.bump_pc::>(); } @@ -461,7 +466,7 @@ where /// Perform binary operation over register and shift immediate #[inline(always)] unsafe fn binary_op_shift(&mut self, op: impl Fn(T, u32) -> T) { - let OpsRRR(tg, a0, a1) = self.decode(); + let OpsRRR(tg, a0, a1) = unsafe { self.decode() }; self.write_reg( tg, op( @@ -475,7 +480,7 @@ where /// Perform binary operation over register and shift immediate #[inline(always)] unsafe fn binary_op_ims(&mut self, op: impl Fn(T, u32) -> T) { - let OpsRRB(tg, reg, imm) = self.decode(); + let OpsRRB(tg, reg, imm) = unsafe { self.decode() }; self.write_reg(tg, op(self.read_reg(reg).cast::(), imm.into())); self.bump_pc::(); } @@ -534,7 +539,7 @@ where /// Jump at `PC + #3` if ordering on `#0 <=> #1` is equal to expected #[inline(always)] unsafe fn cond_jmp(&mut self, expected: Ordering) { - let OpsRRP(a0, a1, ja) = self.decode(); + let OpsRRP(a0, a1, ja) = unsafe { self.decode() }; if self .read_reg(a0) .cast::() diff --git a/hbxrt/src/main.rs b/hbxrt/src/main.rs index 0afb807..ed875f5 100644 --- a/hbxrt/src/main.rs +++ b/hbxrt/src/main.rs @@ -1,4 +1,7 @@ //! Holey Bytes Experimental Runtime + +#![deny(unsafe_op_in_unsafe_fn)] + mod mem; use { @@ -32,7 +35,6 @@ fn main() -> Result<(), Box> { eprintln!("[I] Image loaded at {ptr:p}"); - // Execute program let mut vm = unsafe { Vm::<_, 0>::new(mem::HostMemory, Address::new(ptr as u64)) }; // Memory access fault handling @@ -60,6 +62,7 @@ fn main() -> Result<(), Box> { )?; } + // Execute program let stat = loop { match vm.run() { Ok(VmRunOk::Breakpoint) => eprintln!( diff --git a/hbxrt/src/mem.rs b/hbxrt/src/mem.rs index e1687d3..4fae0ce 100644 --- a/hbxrt/src/mem.rs +++ b/hbxrt/src/mem.rs @@ -26,6 +26,6 @@ impl Memory for HostMemory { #[inline] unsafe fn prog_read(&mut self, addr: Address) -> T { - core::ptr::read(addr.get() as *const T) + unsafe { core::ptr::read(addr.get() as *const T) } } }