Compare commits

..

No commits in common. "trunk" and "master" have entirely different histories.

16 changed files with 176 additions and 176 deletions

11
Cargo.lock generated
View file

@ -220,7 +220,7 @@ name = "hbxrt"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"hbvm", "hbvm",
"memmap2", "nix",
] ]
[[package]] [[package]]
@ -256,15 +256,6 @@ version = "2.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167"
[[package]]
name = "memmap2"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "deaba38d7abf1d4cca21cc89e932e542ba2b9258664d2a9ef0e61512039c9375"
dependencies = [
"libc",
]
[[package]] [[package]]
name = "miniz_oxide" name = "miniz_oxide"
version = "0.7.1" version = "0.7.1"

29
c-abi.md Normal file
View file

@ -0,0 +1,29 @@
# C ABI (proposal)
## C datatypes
| C Type | Description | Size (B) |
|:------------|:-------------------------|-------------:|
| char | Character / byte | 8 |
| short | Short integer | 16 |
| int | Integer | 32 |
| long | Long integer | 64 |
| long long | Long long integer | 64 |
| T* | Pointer | 64 |
| float | Single-precision float | 32 |
| double | Double-precision float | 64 |
| long double | Extended-precision float | **Bikeshed** |
## Registers
| Register | ABI Name | Description | Saver |
|:---------|:---------|:---------------|:-------|
| `r0` | — | Zero register | N/A |
| `r1` | `ra` | Return address | Caller |
| `r2` | `sp` | Stack pointer | Callee |
| `r3` | `tp` | Thread pointer | N/A |
**TODO:** Parameters
**TODO:** Saved
**TODO:** Temp

View file

@ -47,7 +47,7 @@
0x2E, ADDI16, RRH, "Addition with immediate (16b)" ; 0x2E, ADDI16, RRH, "Addition with immediate (16b)" ;
0x2F, ADDI32, RRW, "Addition with immediate (32b)" ; 0x2F, ADDI32, RRW, "Addition with immediate (32b)" ;
0x30, ADDI64, RRD, "Addition with immediate (64b)" ; 0x30, ADDI64, RRD, "Addition with immediate (64b)" ;
0x31, MULI8, RRB, "Multiplication with immediate (8b)" ; 0x31, MULI8, RRW, "Multiplication with immediate (8b)" ;
0x32, MULI16, RRH, "Multiplication with immediate (16b)" ; 0x32, MULI16, RRH, "Multiplication with immediate (16b)" ;
0x33, MULI32, RRW, "Multiplication with immediate (32b)" ; 0x33, MULI32, RRW, "Multiplication with immediate (32b)" ;
0x34, MULI64, RRD, "Multiplication with immediate (64b)" ; 0x34, MULI64, RRD, "Multiplication with immediate (64b)" ;
@ -114,7 +114,7 @@
0x71, FTI64, RRB, "Float 64 to int" ; 0x71, FTI64, RRB, "Float 64 to int" ;
0x72, FC32T64, RR, "Float 64 to Float 32" ; 0x72, FC32T64, RR, "Float 64 to Float 32" ;
0x73, FC64T32, RRB, "Float 32 to Float 64" ; 0x73, FC64T32, RRB, "Float 32 to Float 64" ;
0x74, LRA16, RRO, "Load relative immediate (16 bit)" ; 0x74, LRA16, RRP, "Load relative immediate (16 bit)" ;
0x75, LDR16, RRPH, "Load from relative address (16 bit)" ; 0x75, LDR16, RRPH, "Load from relative address (16 bit)" ;
0x76, STR16, RRPH, "Store to relative address (16 bit)" ; 0x76, STR16, RRPH, "Store to relative address (16 bit)" ;
0x77, JMP16, P, "Relative jump (16 bit)" ; 0x77, JMP16, P, "Relative jump (16 bit)" ;

View file

@ -2,4 +2,4 @@ target
artifacts artifacts
corpus corpus
coverage coverage
Cargo.lock Cargo.lock

View file

@ -43,19 +43,17 @@ impl BlockCopier {
/// - Same as for [`Memory::load`] and [`Memory::store`] /// - Same as for [`Memory::load`] and [`Memory::store`]
pub unsafe fn poll(&mut self, memory: &mut impl Memory) -> Poll<Result<(), BlkCopyError>> { pub unsafe fn poll(&mut self, memory: &mut impl Memory) -> Poll<Result<(), BlkCopyError>> {
// Safety: Assuming uninit of array of MaybeUninit is sound // Safety: Assuming uninit of array of MaybeUninit is sound
let mut buf = AlignedBuf(unsafe { MaybeUninit::uninit().assume_init() }); let mut buf = AlignedBuf(MaybeUninit::uninit().assume_init());
// We have at least one buffer size to copy // We have at least one buffer size to copy
if self.n_buffers != 0 { if self.n_buffers != 0 {
if let Err(e) = unsafe { if let Err(e) = act(
act( memory,
memory, self.src,
self.src, self.dst,
self.dst, buf.0.as_mut_ptr().cast(),
buf.0.as_mut_ptr().cast(), BUF_SIZE,
BUF_SIZE, ) {
)
} {
return Poll::Ready(Err(e)); return Poll::Ready(Err(e));
} }
@ -75,15 +73,13 @@ impl BlockCopier {
} }
if self.rem != 0 { if self.rem != 0 {
if let Err(e) = unsafe { if let Err(e) = act(
act( memory,
memory, self.src,
self.src, self.dst,
self.dst, buf.0.as_mut_ptr().cast(),
buf.0.as_mut_ptr().cast(), self.rem,
self.rem, ) {
)
} {
return Poll::Ready(Err(e)); return Poll::Ready(Err(e));
} }
} }
@ -101,23 +97,21 @@ unsafe fn act(
buf: *mut u8, buf: *mut u8,
count: usize, count: usize,
) -> Result<(), BlkCopyError> { ) -> Result<(), BlkCopyError> {
unsafe { // Load to buffer
// Load to buffer memory
memory .load(src, buf, count)
.load(src, buf, count) .map_err(|super::mem::LoadError(addr)| BlkCopyError {
.map_err(|super::mem::LoadError(addr)| BlkCopyError { access_reason: MemoryAccessReason::Load,
access_reason: MemoryAccessReason::Load, addr,
addr, })?;
})?;
// Store from buffer // Store from buffer
memory memory
.store(dst, buf, count) .store(dst, buf, count)
.map_err(|super::mem::StoreError(addr)| BlkCopyError { .map_err(|super::mem::StoreError(addr)| BlkCopyError {
access_reason: MemoryAccessReason::Store, access_reason: MemoryAccessReason::Store,
addr, addr,
})?; })?;
}
Ok(()) Ok(())
} }

View file

@ -46,7 +46,7 @@ unsafe fn set_rounding_mode(mode: RoundingMode) {
} }
let fpcr: u64; let fpcr: u64;
unsafe { asm!("mrs {}, fpcr", out(reg) fpcr) }; asm!("mrs {}, fpcr", out(reg) fpcr);
let fpcr = fpcr & !(0b11 << 22) let fpcr = fpcr & !(0b11 << 22)
| (match mode { | (match mode {
@ -56,7 +56,7 @@ unsafe fn set_rounding_mode(mode: RoundingMode) {
RoundingMode::Down => 0b10, RoundingMode::Down => 0b10,
}) << 22; }) << 22;
unsafe { asm!("msr fpcr, {}", in(reg) fpcr) }; asm!("msr fpcr, {}", in(reg) fpcr);
} }
#[inline(always)] #[inline(always)]

View file

@ -56,14 +56,12 @@ fnsdef! {
/// [`default_rounding_mode`], you have to rely on inline assembly /// [`default_rounding_mode`], you have to rely on inline assembly
#[inline(always)] #[inline(always)]
unsafe fn set_rounding_mode(mode: RoundingMode) { unsafe fn set_rounding_mode(mode: RoundingMode) {
unsafe { arin::_MM_SET_ROUNDING_MODE(match mode {
arin::_MM_SET_ROUNDING_MODE(match mode { RoundingMode::NearestEven => return,
RoundingMode::NearestEven => return, RoundingMode::Truncate => arin::_MM_ROUND_TOWARD_ZERO,
RoundingMode::Truncate => arin::_MM_ROUND_TOWARD_ZERO, RoundingMode::Up => arin::_MM_ROUND_UP,
RoundingMode::Up => arin::_MM_ROUND_UP, RoundingMode::Down => arin::_MM_ROUND_DOWN,
RoundingMode::Down => arin::_MM_ROUND_DOWN, })
})
}
} }
#[inline(always)] #[inline(always)]

View file

@ -12,7 +12,6 @@
#![no_std] #![no_std]
#![cfg_attr(feature = "nightly", feature(fn_align))] #![cfg_attr(feature = "nightly", feature(fn_align))]
#![deny(unsafe_op_in_unsafe_fn)]
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
extern crate alloc; extern crate alloc;

View file

@ -48,14 +48,14 @@ impl ICache {
let pbase = self let pbase = self
.data .data
.or_else(|| unsafe { self.fetch_page(self.base + self.size, root_pt) })?; .or_else(|| self.fetch_page(self.base + self.size, root_pt))?;
// Get address base // Get address base
let base = addr.map(|x| x & self.mask); let base = addr.map(|x| x & self.mask);
// Base not matching, fetch anew // Base not matching, fetch anew
if base != self.base { if base != self.base {
unsafe { self.fetch_page(base, root_pt) }?; self.fetch_page(base, root_pt)?;
}; };
let offset = addr.get() & !self.mask; let offset = addr.get() & !self.mask;
@ -68,27 +68,25 @@ impl ICache {
let first_copy = requ_size.saturating_sub(rem); let first_copy = requ_size.saturating_sub(rem);
// Copy non-overflowing part // Copy non-overflowing part
unsafe { copy_nonoverlapping(pbase.as_ptr(), ret.as_mut_ptr().cast::<u8>(), first_copy) }; copy_nonoverlapping(pbase.as_ptr(), ret.as_mut_ptr().cast::<u8>(), first_copy);
// Copy overflow // Copy overflow
if rem != 0 { if rem != 0 {
let pbase = unsafe { self.fetch_page(self.base + self.size, root_pt) }?; let pbase = self.fetch_page(self.base + self.size, root_pt)?;
// Unlikely, unsupported scenario // Unlikely, unsupported scenario
if rem > self.size as _ { if rem > self.size as _ {
return None; return None;
} }
unsafe { copy_nonoverlapping(
copy_nonoverlapping( pbase.as_ptr(),
pbase.as_ptr(), ret.as_mut_ptr().cast::<u8>().add(first_copy),
ret.as_mut_ptr().cast::<u8>().add(first_copy), rem,
rem, );
)
};
} }
Some(unsafe { ret.assume_init() }) Some(ret.assume_init())
} }
/// Fetch a page /// Fetch a page

View file

@ -36,11 +36,9 @@ impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> {
// Walk pagetable levels // Walk pagetable levels
for lvl in (lookup_depth + 1..5).rev() { for lvl in (lookup_depth + 1..5).rev() {
let entry = unsafe { let entry = (*current_pt)
(*current_pt) .table
.table .get_unchecked_mut(addr_extract_index(target, lvl));
.get_unchecked_mut(addr_extract_index(target, lvl))
};
let ptr = entry.ptr(); let ptr = entry.ptr();
match entry.permission() { match entry.permission() {
@ -48,13 +46,13 @@ impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> {
// No worries! Let's create one (allocates). // No worries! Let's create one (allocates).
Permission::Empty => { Permission::Empty => {
// Increase children count // Increase children count
unsafe { *current_pt }.childen += 1; (*current_pt).childen += 1;
let table = Box::into_raw(Box::new(PtPointedData { let table = Box::into_raw(Box::new(PtPointedData {
pt: PageTable::default(), pt: PageTable::default(),
})); }));
unsafe { core::ptr::write(entry, PtEntry::new(table, Permission::Node)) }; core::ptr::write(entry, PtEntry::new(table, Permission::Node));
current_pt = table as _; current_pt = table as _;
} }
// Continue walking // Continue walking
@ -65,11 +63,9 @@ impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> {
} }
} }
let node = unsafe { let node = (*current_pt)
(*current_pt) .table
.table .get_unchecked_mut(addr_extract_index(target, lookup_depth));
.get_unchecked_mut(addr_extract_index(target, lookup_depth))
};
// Check if node is not mapped // Check if node is not mapped
if node.permission() != Permission::Empty { if node.permission() != Permission::Empty {
@ -77,10 +73,8 @@ impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> {
} }
// Write entry // Write entry
unsafe { (*current_pt).childen += 1;
(*current_pt).childen += 1; core::ptr::write(node, PtEntry::new(host.cast(), perm));
core::ptr::write(node, PtEntry::new(host.cast(), perm));
}
Ok(()) Ok(())
} }

View file

@ -51,7 +51,7 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> Memory
target, target,
count, count,
perm_check::readable, perm_check::readable,
|src, dst, count| unsafe { core::ptr::copy_nonoverlapping(src, dst, count) }, |src, dst, count| core::ptr::copy_nonoverlapping(src, dst, count),
) )
.map_err(LoadError) .map_err(LoadError)
} }
@ -72,7 +72,7 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> Memory
source.cast_mut(), source.cast_mut(),
count, count,
perm_check::writable, perm_check::writable,
|dst, src, count| unsafe { core::ptr::copy_nonoverlapping(src, dst, count) }, |dst, src, count| core::ptr::copy_nonoverlapping(src, dst, count),
) )
.map_err(StoreError) .map_err(StoreError)
} }
@ -80,14 +80,16 @@ impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> Memory
#[inline(always)] #[inline(always)]
unsafe fn prog_read<T>(&mut self, addr: Address) -> T { unsafe fn prog_read<T>(&mut self, addr: Address) -> T {
if OUT_PROG_EXEC && addr.truncate_usize() > self.program.len() { if OUT_PROG_EXEC && addr.truncate_usize() > self.program.len() {
return unsafe { self.icache.fetch::<T>(addr, self.root_pt) } return self
.icache
.fetch::<T>(addr, self.root_pt)
.unwrap_or_else(|| unsafe { core::mem::zeroed() }); .unwrap_or_else(|| unsafe { core::mem::zeroed() });
} }
let addr = addr.truncate_usize(); let addr = addr.truncate_usize();
self.program self.program
.get(addr..addr + size_of::<T>()) .get(addr..addr + size_of::<T>())
.map(|x| unsafe { x.as_ptr().cast::<T>().read() }) .map(|x| x.as_ptr().cast::<T>().read())
.unwrap_or_else(|| unsafe { core::mem::zeroed() }) .unwrap_or_else(|| unsafe { core::mem::zeroed() })
} }
} }

View file

@ -17,8 +17,7 @@ use {
macro_rules! handler { macro_rules! handler {
($self:expr, |$ty:ident ($($ident:pat),* $(,)?)| $expr:expr) => {{ ($self:expr, |$ty:ident ($($ident:pat),* $(,)?)| $expr:expr) => {{
#[allow(unused_unsafe)] let $ty($($ident),*) = $self.decode::<$ty>();
let $ty($($ident),*) = unsafe { $self.decode::<$ty>() };
#[allow(clippy::no_effect)] let e = $expr; #[allow(clippy::no_effect)] let e = $expr;
$self.bump_pc::<$ty>(); $self.bump_pc::<$ty>();
e e
@ -41,14 +40,14 @@ where
// Contribution guide: // Contribution guide:
// - Zero register shall never be overwitten. It's value has to always be 0. // - Zero register shall never be overwitten. It's value has to always be 0.
// - Prefer `Self::read_reg` and `Self::write_reg` functions // - Prefer `Self::read_reg` and `Self::write_reg` functions
// - Try to use `handler!` macro for decoding and then bumping program counter // - Extract parameters using `param!` macro
// - Prioritise speed over code size // - Prioritise speed over code size
// - Memory is cheap, CPUs not that much // - Memory is cheap, CPUs not that much
// - Do not heap allocate at any cost // - Do not heap allocate at any cost
// - Yes, user-provided trap handler may allocate, // - Yes, user-provided trap handler may allocate,
// but that is not our »fault«. // but that is not our »fault«.
// - Unsafe is kinda must, but be sure you have validated everything // - Unsafe is kinda must, but be sure you have validated everything
// - Your contributions have to pass sanitizers, fuzzer and Miri // - Your contributions have to pass sanitizers and Miri
// - Strictly follow the spec // - Strictly follow the spec
// - The spec does not specify how you perform actions, in what order, // - The spec does not specify how you perform actions, in what order,
// just that the observable effects have to be performed in order and // just that the observable effects have to be performed in order and
@ -375,16 +374,13 @@ where
/// Bump instruction pointer /// Bump instruction pointer
#[inline(always)] #[inline(always)]
fn bump_pc<T: Copy>(&mut self) { fn bump_pc<T: Copy>(&mut self) {
self.pc = self self.pc = self.pc.wrapping_add(core::mem::size_of::<T>());
.pc
.wrapping_add(core::mem::size_of::<T>())
.wrapping_add(1);
} }
/// Decode instruction operands /// Decode instruction operands
#[inline(always)] #[inline(always)]
unsafe fn decode<T: Copy>(&mut self) -> T { unsafe fn decode<T: Copy>(&mut self) -> T {
unsafe { self.memory.prog_read::<T>(self.pc + 1_u64) } self.memory.prog_read::<T>(self.pc + 1_u64)
} }
/// Load /// Load
@ -401,16 +397,14 @@ where
_ => 0, _ => 0,
}; };
unsafe { self.memory.load(
self.memory.load( self.ldst_addr_uber(dst, base, offset, count, n)?,
self.ldst_addr_uber(dst, base, offset, count, n)?, self.registers
self.registers .as_mut_ptr()
.as_mut_ptr() .add(usize::from(dst) + usize::from(n))
.add(usize::from(dst) + usize::from(n)) .cast(),
.cast(), usize::from(count).saturating_sub(n.into()),
usize::from(count).saturating_sub(n.into()), )?;
)
}?;
Ok(()) Ok(())
} }
@ -424,13 +418,11 @@ where
offset: u64, offset: u64,
count: u16, count: u16,
) -> Result<(), VmRunError> { ) -> Result<(), VmRunError> {
unsafe { self.memory.store(
self.memory.store( self.ldst_addr_uber(dst, base, offset, count, 0)?,
self.ldst_addr_uber(dst, base, offset, count, 0)?, self.registers.as_ptr().add(usize::from(dst)).cast(),
self.registers.as_ptr().add(usize::from(dst)).cast(), count.into(),
count.into(), )?;
)
}?;
Ok(()) Ok(())
} }
@ -443,7 +435,7 @@ where
/// Perform binary operating over two registers /// Perform binary operating over two registers
#[inline(always)] #[inline(always)]
unsafe fn binary_op<T: ValueVariant>(&mut self, op: impl Fn(T, T) -> T) { unsafe fn binary_op<T: ValueVariant>(&mut self, op: impl Fn(T, T) -> T) {
let OpsRRR(tg, a0, a1) = unsafe { self.decode() }; let OpsRRR(tg, a0, a1) = self.decode();
self.write_reg( self.write_reg(
tg, tg,
op(self.read_reg(a0).cast::<T>(), self.read_reg(a1).cast::<T>()), op(self.read_reg(a0).cast::<T>(), self.read_reg(a1).cast::<T>()),
@ -458,7 +450,7 @@ where
#[repr(packed)] #[repr(packed)]
struct OpsRRImm<I>(OpsRR, I); struct OpsRRImm<I>(OpsRR, I);
let OpsRRImm::<T>(OpsRR(tg, reg), imm) = unsafe { self.decode() }; let OpsRRImm::<T>(OpsRR(tg, reg), imm) = self.decode();
self.write_reg(tg, op(self.read_reg(reg).cast::<T>(), imm)); self.write_reg(tg, op(self.read_reg(reg).cast::<T>(), imm));
self.bump_pc::<OpsRRImm<T>>(); self.bump_pc::<OpsRRImm<T>>();
} }
@ -466,7 +458,7 @@ where
/// Perform binary operation over register and shift immediate /// Perform binary operation over register and shift immediate
#[inline(always)] #[inline(always)]
unsafe fn binary_op_shift<T: ValueVariant>(&mut self, op: impl Fn(T, u32) -> T) { unsafe fn binary_op_shift<T: ValueVariant>(&mut self, op: impl Fn(T, u32) -> T) {
let OpsRRR(tg, a0, a1) = unsafe { self.decode() }; let OpsRRR(tg, a0, a1) = self.decode();
self.write_reg( self.write_reg(
tg, tg,
op( op(
@ -480,7 +472,7 @@ where
/// Perform binary operation over register and shift immediate /// Perform binary operation over register and shift immediate
#[inline(always)] #[inline(always)]
unsafe fn binary_op_ims<T: ValueVariant>(&mut self, op: impl Fn(T, u32) -> T) { unsafe fn binary_op_ims<T: ValueVariant>(&mut self, op: impl Fn(T, u32) -> T) {
let OpsRRB(tg, reg, imm) = unsafe { self.decode() }; let OpsRRB(tg, reg, imm) = self.decode();
self.write_reg(tg, op(self.read_reg(reg).cast::<T>(), imm.into())); self.write_reg(tg, op(self.read_reg(reg).cast::<T>(), imm.into()));
self.bump_pc::<OpsRRW>(); self.bump_pc::<OpsRRW>();
} }
@ -539,7 +531,7 @@ where
/// Jump at `PC + #3` if ordering on `#0 <=> #1` is equal to expected /// Jump at `PC + #3` if ordering on `#0 <=> #1` is equal to expected
#[inline(always)] #[inline(always)]
unsafe fn cond_jmp<T: ValueVariant + Ord>(&mut self, expected: Ordering) { unsafe fn cond_jmp<T: ValueVariant + Ord>(&mut self, expected: Ordering) {
let OpsRRP(a0, a1, ja) = unsafe { self.decode() }; let OpsRRP(a0, a1, ja) = self.decode();
if self if self
.read_reg(a0) .read_reg(a0)
.cast::<T>() .cast::<T>()

View file

@ -6,4 +6,4 @@ default-run = "hbxrt"
[dependencies] [dependencies]
hbvm.path = "../hbvm" hbvm.path = "../hbvm"
memmap2 = "0.9" nix = { version = "0.27", features = ["mman", "signal"] }

View file

@ -1,50 +1,65 @@
//! Holey Bytes Experimental Runtime //! Holey Bytes Experimental Runtime
#![deny(unsafe_op_in_unsafe_fn)]
mod mem; mod mem;
use { use {
hbvm::{mem::Address, Vm, VmRunOk}, hbvm::{mem::Address, Vm, VmRunOk},
memmap2::Mmap, nix::sys::mman::{mmap, MapFlags, ProtFlags},
std::{env::args, fs::File, mem::MaybeUninit, process::exit}, std::{env::args, fs::File, num::NonZeroUsize, process::exit},
}; };
fn main() -> Result<(), Box<dyn std::error::Error>> { fn main() -> Result<(), Box<dyn std::error::Error>> {
eprintln!("== HB×RT (Holey Bytes Experimental Runtime) v0.1 =="); eprintln!("== HB×RT (Holey Bytes Linux Runtime) v0.1 ==");
eprintln!("[W] Currently supporting only flat images"); eprintln!("[W] Currently supporting only flat images");
let mut args = args().skip(1); let Some(image_path) = args().nth(1) else {
let Some(image_path) = args.next() else {
eprintln!("[E] Missing image path"); eprintln!("[E] Missing image path");
exit(1); exit(1);
}; };
let dsls = args.next().as_deref() == Some("-L");
if cfg!(not(target_os = "linux")) && dsls {
eprintln!("[E] Unsupported platform for Direct Linux syscall mode");
exit(1);
}
if dsls {
eprintln!("[I] Direct Linux syscall mode activated")
}
// Allocate stack
let mut stack = Box::new(MaybeUninit::<[u8; 1024 * 1024 * 2]>::uninit());
eprintln!("[I] Stack allocated at {:p}", stack.as_ptr());
// Load program // Load program
eprintln!("[I] Loading image from \"{image_path}\""); eprintln!("[I] Loading image from \"{image_path}\"");
let file_handle = File::open(image_path)?; let file = File::open(image_path)?;
let mmap = unsafe { Mmap::map(&file_handle) }?; let ptr = unsafe {
mmap(
None,
NonZeroUsize::new(file.metadata()?.len() as usize).ok_or("File is empty")?,
ProtFlags::PROT_READ,
MapFlags::MAP_PRIVATE,
Some(&file),
0,
)?
};
eprintln!("[I] Image loaded at {:p}", mmap.as_ptr()); eprintln!("[I] Image loaded at {ptr:p}");
let mut vm = unsafe { Vm::<_, 0>::new(mem::HostMemory, Address::new(mmap.as_ptr() as u64)) };
vm.write_reg(254, stack.as_mut_ptr() as u64);
// Execute program // Execute program
let mut vm = unsafe { Vm::<_, 0>::new(mem::HostMemory, Address::new(ptr as u64)) };
// Memory access fault handling
unsafe {
use nix::sys::signal;
extern "C" fn action(
_: std::ffi::c_int,
info: *mut nix::libc::siginfo_t,
_: *mut std::ffi::c_void,
) {
unsafe {
eprintln!("[E] Memory access fault at {:p}", (*info).si_addr());
exit(2);
}
}
signal::sigaction(
signal::Signal::SIGSEGV,
&nix::sys::signal::SigAction::new(
signal::SigHandler::SigAction(action),
signal::SaFlags::SA_NODEFER,
nix::sys::signalfd::SigSet::empty(),
),
)?;
}
let stat = loop { let stat = loop {
match vm.run() { match vm.run() {
Ok(VmRunOk::Breakpoint) => eprintln!( Ok(VmRunOk::Breakpoint) => eprintln!(
@ -52,7 +67,7 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
vm.pc, vm.registers vm.pc, vm.registers
), ),
Ok(VmRunOk::Timer) => (), Ok(VmRunOk::Timer) => (),
Ok(VmRunOk::Ecall) if dsls => unsafe { Ok(VmRunOk::Ecall) => unsafe {
std::arch::asm!( std::arch::asm!(
"syscall", "syscall",
inlateout("rax") vm.registers[1].0, inlateout("rax") vm.registers[1].0,
@ -64,10 +79,6 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
in("r9") vm.registers[7].0, in("r9") vm.registers[7].0,
) )
}, },
Ok(VmRunOk::Ecall) => {
eprintln!("[E] General environment calls not supported");
exit(1);
}
Ok(VmRunOk::End) => break Ok(()), Ok(VmRunOk::End) => break Ok(()),
Err(e) => break Err(e), Err(e) => break Err(e),
} }

View file

@ -26,6 +26,6 @@ impl Memory for HostMemory {
#[inline] #[inline]
unsafe fn prog_read<T: Copy>(&mut self, addr: Address) -> T { unsafe fn prog_read<T: Copy>(&mut self, addr: Address) -> T {
unsafe { core::ptr::read(addr.get() as *const T) } core::ptr::read(addr.get() as *const T)
} }
} }

30
spec.md
View file

@ -480,26 +480,18 @@ Program counter stays on the currently executed instruction
| long long | Long long integer | 8 | | long long | Long long integer | 8 |
| float | Single-precision float | 4 | | float | Single-precision float | 4 |
| double | Double-precision float | 8 | | double | Double-precision float | 8 |
| long double | Extended-precision float | 8 | | long double | Extended-precision float | TBD |
- Bikeshedding note: `long double` is now 8 bytes as
the base ISA does not support `f128`. an extension
for that should be made.
## Call convention ## Call convention
- Registers r1 r31 are caller saved - Registers r1 r30 are caller saved
- Registers r32 r255 are callee saved - Registers r31 r255 are callee saved
| Register | Description | Saver | | Register | Description | Saver |
|:-----------|:--------------------|:-------| |:---------|:--------------------|:-------|
| r0 | Hard-wired zero | N/A | | r0 | Hard-wired zero | N/A |
| r1 - r2 | Return values | Caller | | r1 - r2 | Return values | Caller |
| r2 - r11 | Function parameters | Caller | | r2 - r11 | Function parameters | Caller |
| r12 - r30 | General purpose | Caller | | r30 | Return address | Caller |
| r31 | Return address | Caller |
| r32 - r253 | General purpose | Callee |
| r254 | Stack pointer | Callee |
| r255 | Thread pointer | N/A |
- If return value is too big to fit r1, r2 is also used. If return value is too big to fit one register, r2 is also used.
- Values larger than two double-words are passed by reference TODO: Stack pointer, Thread pointer, ...