diff --git a/Cargo.toml b/Cargo.toml index 50c8d8c..d4c33a3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,8 +23,8 @@ hbjit = { path = "jit" } [profile.release] lto = true -debug = true -#strip = true +#debug = true +strip = true codegen-units = 1 panic = "abort" @@ -32,7 +32,7 @@ panic = "abort" rustflags = ["-Zfmt-debug=none", "-Zlocation-detail=none"] inherits = "release" opt-level = "z" -strip = true +strip = "debuginfo" lto = true codegen-units = 1 panic = "abort" diff --git a/lang/Cargo.toml b/lang/Cargo.toml index 3c40423..6c7e5e0 100644 --- a/lang/Cargo.toml +++ b/lang/Cargo.toml @@ -20,9 +20,9 @@ log = "0.4.22" [dependencies.regalloc2] git = "https://github.com/jakubDoka/regalloc2" branch = "reuse-allocations" -features = ["trace-log"] +default-features = false [features] -default = ["std"] +default = ["std", "regalloc2/trace-log"] std = [] no_log = ["log/max_level_off"] diff --git a/lang/README.md b/lang/README.md index f75e8f0..5c7d021 100644 --- a/lang/README.md +++ b/lang/README.md @@ -135,9 +135,6 @@ fib := fn(n: uint): uint { #### pointers ```hb main := fn(): uint { - n := @as(^uint, null) - if n != null return 9001 - a := 1 b := &a @@ -161,6 +158,53 @@ drop := fn(a: uint): void { } ``` +#### nullable_types +```hb +main := fn(): uint { + a := &1 + + b := @as(?^uint, null) + if decide() b = a + + if b == null return 9001 + + c := @as(?uint, *b) + if decide() c = null + + if c != null return 42 + + d := @as(?u16, null) + if decide() d = 1 + + if d == null return 69 + + f := @as(?Foo, null) + + if decide() f = .(a, 1) + + if f == null return 34 + + bar := @as(?Bar, .(a, 1)) + + if decide() bar = null + + if bar != null return 420 + + g := @as(?^uint, null) + g = a + + _rd := *g + + return d - *f.a +} + +Foo := struct {a: ^uint, b: uint} + +Bar := struct {a: ?^uint, b: uint} + +decide := fn(): bool return true +``` + #### structs ```hb Ty := struct { @@ -419,6 +463,15 @@ main := fn(): uint { } ``` +#### die +```hb +main := fn(): never { + // simply emmits 'un' instruction that immediately terminates the execution + // the expresion has similar properties to 'return' but does not accept a value + die +} +``` + ### Incomplete Examples #### comptime_pointers @@ -439,7 +492,7 @@ modify := fn($num: ^uint): void { MALLOC_SYS_CALL := 69 FREE_SYS_CALL := 96 -malloc := fn(size: uint, align: uint): ^void return @eca(MALLOC_SYS_CALL, size, align) +malloc := fn(size: uint, align: uint): ?^void return @eca(MALLOC_SYS_CALL, size, align) free := fn(ptr: ^void, size: uint, align: uint): void return @eca(FREE_SYS_CALL, ptr, size, align) Vec := fn($Elem: type): type { @@ -458,7 +511,7 @@ deinit := fn($Elem: type, vec: ^Vec(Elem)): void { return } -push := fn($Elem: type, vec: ^Vec(Elem), value: Elem): ^Elem { +push := fn($Elem: type, vec: ^Vec(Elem), value: Elem): ?^Elem { if vec.len == vec.cap { if vec.cap == 0 { vec.cap = 1 @@ -466,11 +519,11 @@ push := fn($Elem: type, vec: ^Vec(Elem), value: Elem): ^Elem { vec.cap *= 2 } - new_alloc := @as(^Elem, @bitcast(malloc(vec.cap * @sizeof(Elem), @alignof(Elem)))) - if new_alloc == 0 return @bitcast(0) + new_alloc := @as(?^Elem, @bitcast(malloc(vec.cap * @sizeof(Elem), @alignof(Elem)))) + if new_alloc == null return null src_cursor := vec.data - dst_cursor := new_alloc + dst_cursor := @as(^Elem, new_alloc) end := vec.data + vec.len loop if src_cursor == end break else { @@ -540,6 +593,30 @@ main := fn(): uint { ### Purely Testing Examples +#### only_break_loop +```hb +memory := @use("memory.hb") + +bar := fn(): int { + loop if memory.inb(0x64) != 0 return 1 +} + +foo := fn(): void { + loop if (memory.inb(0x64) & 2) == 0 break + memory.outb(0x60, 0x0) +} + +main := fn(): int { + @inline(foo) + return @inline(bar) +} + +// in module: memory.hb +inb := fn(f: int): int return f +outb := fn(f: int, g: int): void { +} +``` + #### reading_idk ```hb main := fn(): int { @@ -1210,3 +1287,4 @@ main := fn(): int { opaque := fn(): Foo { return .(3, 2) } +``` diff --git a/lang/src/fmt.rs b/lang/src/fmt.rs index fb371f7..57a2dcb 100644 --- a/lang/src/fmt.rs +++ b/lang/src/fmt.rs @@ -371,6 +371,7 @@ impl<'a> Formatter<'a> { } Expr::Bool { value, .. } => f.write_str(if value { "true" } else { "false" }), Expr::Idk { .. } => f.write_str("idk"), + Expr::Die { .. } => f.write_str("die"), Expr::Null { .. } => f.write_str("null"), Expr::BinOp { left, diff --git a/lang/src/lexer.rs b/lang/src/lexer.rs index 674ab2d..9996357 100644 --- a/lang/src/lexer.rs +++ b/lang/src/lexer.rs @@ -134,6 +134,7 @@ pub enum TokenKind { False, Null, Idk, + Die, Ctor, Tupl, @@ -306,6 +307,7 @@ gen_token_kind! { False = b"false", Null = b"null", Idk = b"idk", + Die = b"die", #[punkt] Ctor = ".{", Tupl = ".(", diff --git a/lang/src/lib.rs b/lang/src/lib.rs index a301b08..5d03b58 100644 --- a/lang/src/lib.rs +++ b/lang/src/lib.rs @@ -23,7 +23,8 @@ slice_from_ptr_range, is_sorted, iter_next_chunk, - pointer_is_aligned_to + pointer_is_aligned_to, + maybe_uninit_fill )] #![warn(clippy::dbg_macro)] #![expect(stable_features, internal_features)] @@ -67,9 +68,10 @@ pub mod fs; pub mod fuzz; pub mod lexer; pub mod parser; +pub mod regalloc; pub mod son; -mod vc; +mod utils; mod debug { pub fn panicking() -> bool { @@ -289,6 +291,7 @@ mod ty { pub type Builtin = u32; pub type Struct = u32; + pub type Opt = u32; pub type Ptr = u32; pub type Func = u32; pub type Global = u32; @@ -372,6 +375,7 @@ mod ty { crate::SymKey::Struct(st.file, st.pos, st.captures) } Kind::Ptr(p) => crate::SymKey::Pointer(&ctx.ptrs[p as usize]), + Kind::Opt(p) => crate::SymKey::Optional(&ctx.opts[p as usize]), Kind::Func(f) => { let fc = &ctx.funcs[f as usize]; if let Some(base) = fc.base { @@ -440,6 +444,10 @@ mod ty { matches!(self.expand(), Kind::Ptr(_)) || self.is_never() } + pub fn is_optional(self) -> bool { + matches!(self.expand(), Kind::Opt(_)) || self.is_never() + } + pub fn try_upcast(self, ob: Self) -> Option { let (oa, ob) = (Self(self.0.min(ob.0)), Self(self.0.max(ob.0))); let (a, b) = (oa.strip_pointer(), ob.strip_pointer()); @@ -447,6 +455,7 @@ mod ty { _ if oa == Self::from(NEVER) => ob, _ if ob == Self::from(NEVER) => oa, _ if oa == ob => oa, + _ if ob.is_optional() => ob, _ if oa.is_pointer() && ob.is_pointer() => return None, _ if a.is_signed() && b.is_signed() || a.is_unsigned() && b.is_unsigned() => ob, _ if a.is_unsigned() && b.is_signed() && a.repr() - U8 < b.repr() - I8 => ob, @@ -489,9 +498,16 @@ mod ty { pub(crate) fn loc(&self, tys: &Types) -> Loc { match self.expand() { + Kind::Opt(o) + if let ty = tys.ins.opts[o as usize].base + && ty.loc(tys) == Loc::Reg + && (ty.is_pointer() || tys.size_of(ty) < 8) => + { + Loc::Reg + } Kind::Ptr(_) | Kind::Builtin(_) => Loc::Reg, Kind::Struct(_) if tys.size_of(*self) == 0 => Loc::Reg, - Kind::Struct(_) | Kind::Slice(_) => Loc::Stack, + Kind::Struct(_) | Kind::Slice(_) | Kind::Opt(_) => Loc::Stack, Kind::Func(_) | Kind::Global(_) | Kind::Module(_) => unreachable!(), } } @@ -633,10 +649,11 @@ mod ty { Builtin, Struct, Ptr, + Slice, + Opt, Func, Global, Module, - Slice, } } @@ -674,6 +691,10 @@ mod ty { f.write_str("]") } TK::Builtin(ty) => f.write_str(to_str(ty)), + TK::Opt(ty) => { + f.write_str("?")?; + self.rety(self.tys.ins.opts[ty as usize].base).fmt(f) + } TK::Ptr(ty) => { f.write_str("^")?; self.rety(self.tys.ins.ptrs[ty as usize].base).fmt(f) @@ -729,6 +750,7 @@ type Size = u32; #[derive(PartialEq, Eq, Hash, Clone, Copy)] pub enum SymKey<'a> { Pointer(&'a Ptr), + Optional(&'a Opt), Struct(FileId, Pos, ty::Tuple), FuncInst(ty::Func, ty::Tuple), Decl(FileId, Ident), @@ -839,7 +861,12 @@ struct Struct { field_start: u32, } -#[derive(PartialEq, Eq, Hash)] +#[derive(PartialEq, Eq, Hash, Clone, Copy)] +pub struct Opt { + base: ty::Id, +} + +#[derive(PartialEq, Eq, Hash, Clone, Copy)] pub struct Ptr { base: ty::Id, } @@ -934,6 +961,7 @@ pub struct TypeIns { structs: Vec, fields: Vec, ptrs: Vec, + opts: Vec, slices: Vec, } @@ -995,16 +1023,14 @@ trait TypeParser { let Some((Expr::BinOp { left, right, .. }, name)) = f.find_decl(id) else { return match id { - Ok(name) => { - let name = files[from_file as usize].ident_str(name); - self.report(from_file, pos, format_args!("undefined indentifier: {name}")) - } + Ok(_) => ty::Id::NEVER, Err("main") => self.report( from_file, pos, format_args!( "missing main function in '{}', compiler can't \ - emmit libraries since such concept is not defined", + emmit libraries since such concept is not defined \ + (minimal main function: `main := fn(): void {{}}`)", f.path ), ), @@ -1064,6 +1090,10 @@ trait TypeParser { let base = self.parse_ty(file, val, None, files); self.tys().make_ptr(base) } + Expr::UnOp { op: TokenKind::Que, val, .. } => { + let base = self.parse_ty(file, val, None, files); + self.tys().make_opt(base) + } Expr::Ident { id, .. } if id.is_null() => id.len().into(), Expr::Ident { id, pos, .. } => self.find_type(pos, file, file, Ok(id), files), Expr::Field { target, pos, name } @@ -1205,64 +1235,44 @@ impl Types { (ret, iter) } + fn make_opt(&mut self, base: ty::Id) -> ty::Id { + self.make_generic_ty( + Opt { base }, + |ins| &mut ins.opts, + |e| SymKey::Optional(e), + ty::Kind::Opt, + ) + } + fn make_ptr(&mut self, base: ty::Id) -> ty::Id { - ty::Kind::Ptr(self.make_ptr_low(base)).compress() + self.make_generic_ty( + Ptr { base }, + |ins| &mut ins.ptrs, + |e| SymKey::Pointer(e), + ty::Kind::Ptr, + ) } - fn make_ptr_low(&mut self, base: ty::Id) -> ty::Ptr { - let ptr = Ptr { base }; - let (entry, hash) = self.syms.entry(SymKey::Pointer(&ptr), &self.ins); - match entry { - hash_map::RawEntryMut::Occupied(o) => o.get_key_value().0.value, - hash_map::RawEntryMut::Vacant(v) => { - self.ins.ptrs.push(ptr); - v.insert( - ctx_map::Key { - value: ty::Kind::Ptr(self.ins.ptrs.len() as u32 - 1).compress(), - hash, - }, - (), - ) - .0 - .value - } - } - .expand() - .inner() + fn make_array(&mut self, elem: ty::Id, len: ArrayLen) -> ty::Id { + self.make_generic_ty( + Array { elem, len }, + |ins| &mut ins.slices, + |e| SymKey::Array(e), + ty::Kind::Slice, + ) } - fn make_array(&mut self, ty: ty::Id, len: ArrayLen) -> ty::Id { - ty::Kind::Slice(self.make_array_low(ty, len)).compress() - } - - fn make_array_low(&mut self, ty: ty::Id, len: ArrayLen) -> ty::Slice { - self.syms - .get_or_insert(SymKey::Array(&Array { elem: ty, len }), &mut self.ins, |ins| { - ins.slices.push(Array { elem: ty, len }); - ty::Kind::Slice(ins.slices.len() as u32 - 1).compress() - }) - .expand() - .inner() - - //let array = Array { ty, len }; - //let (entry, hash) = self.syms.entry(SymKey::Array(&array), &self.ins); - //match entry { - // hash_map::RawEntryMut::Occupied(o) => o.get_key_value().0.value, - // hash_map::RawEntryMut::Vacant(v) => { - // self.ins.arrays.push(array); - // v.insert( - // ctx_map::Key { - // value: ty::Kind::Slice(self.ins.ptrs.len() as u32 - 1).compress(), - // hash, - // }, - // (), - // ) - // .0 - // .value - // } - //} - //.expand() - //.inner() + fn make_generic_ty( + &mut self, + ty: T, + get_col: fn(&mut TypeIns) -> &mut Vec, + key: fn(&T) -> SymKey, + kind: fn(u32) -> ty::Kind, + ) -> ty::Id { + *self.syms.get_or_insert(key(&{ ty }), &mut self.ins, |ins| { + get_col(ins).push(ty); + kind(get_col(ins).len() as u32 - 1).compress() + }) } fn size_of(&self, ty: ty::Id) -> Size { @@ -1285,6 +1295,14 @@ impl Types { self.ins.structs[stru as usize].size.set(oiter.offset); oiter.offset } + ty::Kind::Opt(opt) => { + let base = self.ins.opts[opt as usize].base; + if self.nieche_of(base).is_some() { + self.size_of(base) + } else { + self.size_of(base) + self.align_of(base) + } + } _ if let Some(size) = ty.simple_size() => size, ty => unimplemented!("size_of: {:?}", ty), } @@ -1327,6 +1345,37 @@ impl Types { } } + fn inner_of(&self, ty: ty::Id) -> Option { + match ty.expand() { + ty::Kind::Opt(o) => Some(self.ins.opts[o as usize].base), + _ => None, + } + } + + fn opt_layout(&self, inner_ty: ty::Id) -> OptLayout { + match self.nieche_of(inner_ty) { + Some((_, flag_offset, flag_ty)) => { + OptLayout { flag_ty, flag_offset, payload_offset: 0 } + } + None => OptLayout { + flag_ty: ty::Id::BOOL, + flag_offset: 0, + payload_offset: self.align_of(inner_ty), + }, + } + } + + fn nieche_of(&self, ty: ty::Id) -> Option<(bool, Offset, ty::Id)> { + match ty.expand() { + ty::Kind::Ptr(_) => Some((false, 0, ty::Id::UINT)), + // TODO: cache this + ty::Kind::Struct(s) => OffsetIter::new(s, self).into_iter(self).find_map(|(f, off)| { + self.nieche_of(f.ty).map(|(uninit, o, ty)| (uninit, o + off, ty)) + }), + _ => None, + } + } + fn find_struct_field(&self, s: ty::Struct, name: &str) -> Option { let name = self.names.project(name)?; self.struct_fields(s).iter().position(|f| f.name == name) @@ -1355,6 +1404,12 @@ impl Types { } } +struct OptLayout { + flag_ty: ty::Id, + flag_offset: Offset, + payload_offset: Offset, +} + struct OffsetIter { strct: ty::Struct, offset: Offset, diff --git a/lang/src/parser.rs b/lang/src/parser.rs index 91de4b5..e8c9e57 100644 --- a/lang/src/parser.rs +++ b/lang/src/parser.rs @@ -337,6 +337,7 @@ impl<'a, 'b> Parser<'a, 'b> { T::False => E::Bool { pos, value: false }, T::Null => E::Null { pos }, T::Idk => E::Idk { pos }, + T::Die => E::Die { pos }, T::DQuote => E::String { pos, literal: self.tok_str(token) }, T::Packed => { self.packed = true; @@ -443,7 +444,7 @@ impl<'a, 'b> Parser<'a, 'b> { pos }, }, - T::Band | T::Mul | T::Xor | T::Sub => E::UnOp { + T::Band | T::Mul | T::Xor | T::Sub | T::Que => E::UnOp { pos, op: token.kind, val: { @@ -903,6 +904,10 @@ generate_expr! { Idk { pos: Pos, }, + /// `'die'` + Die { + pos: Pos, + }, /// `'@' Ident List('(', ',', ')', Expr)` Directive { pos: Pos, diff --git a/lang/src/regalloc.rs b/lang/src/regalloc.rs new file mode 100644 index 0000000..ecaccf7 --- /dev/null +++ b/lang/src/regalloc.rs @@ -0,0 +1,150 @@ +use {crate::reg::Reg, alloc::vec::Vec, core::ops::Range}; + +type Nid = u16; + +pub trait Ctx { + fn uses_of(&self, nid: Nid) -> impl Iterator; + fn params_of(&self, nid: Nid) -> impl Iterator; + fn args_of(&self, nid: Nid) -> impl Iterator; + fn dom_of(&self, nid: Nid) -> Nid; +} + +pub struct Env<'a, C: Ctx> { + ctx: &'a C, + func: &'a Func, + res: &'a mut Res, +} + +impl<'a, C: Ctx> Env<'a, C> { + pub fn new(ctx: &'a C, func: &'a Func, res: &'a mut Res) -> Self { + Self { ctx, func, res } + } + + pub fn run(&mut self) { + self.res.reg_to_node.clear(); + self.res.reg_to_node.resize(self.func.instrs.len(), 0); + + let mut bundle = Bundle::new(self.func.instrs.len()); + for &inst in &self.func.instrs { + for uinst in self.ctx.uses_of(inst) { + let mut cursor = self.ctx.dom_of(uinst); + while cursor != self.ctx.dom_of(inst) { + let mut range = self.func.blocks + [self.func.id_to_block[cursor as usize] as usize] + .range + .clone(); + range.start = range.start.max(inst as usize); + range.end = range.end.min(uinst as usize); + bundle.add(range); + cursor = self.ctx.dom_of(cursor); + } + } + + match self.res.bundles.iter_mut().enumerate().find(|(_, b)| !b.overlaps(&bundle)) { + Some((i, other)) => { + other.merge(&bundle); + bundle.clear(); + self.res.reg_to_node[inst as usize] = i as Reg; + } + None => { + self.res.reg_to_node[inst as usize] = self.res.bundles.len() as Reg; + self.res.bundles.push(bundle); + bundle = Bundle::new(self.func.instrs.len()); + } + } + } + } +} + +pub struct Res { + bundles: Vec, + pub reg_to_node: Vec, +} + +pub struct Bundle { + //unit_range: Range, + //set: BitSet, + taken: Vec, +} + +impl Bundle { + fn new(size: usize) -> Self { + Self { taken: vec![false; size] } + } + + fn add(&mut self, range: Range) { + self.taken[range].fill(true); + } + + fn overlaps(&self, other: &Self) -> bool { + self.taken.iter().zip(other.taken.iter()).any(|(a, b)| a & b) + } + + fn merge(&mut self, other: &Self) { + debug_assert!(!self.overlaps(other)); + self.taken.iter_mut().zip(other.taken.iter()).for_each(|(a, b)| *a = *b); + } + + fn clear(&mut self) { + self.taken.fill(false); + } + + //fn overlaps(&self, other: &Self) -> bool { + // if self.unit_range.start >= other.unit_range.end + // || self.unit_range.end <= other.unit_range.start + // { + // return false; + // } + + // let [mut a, mut b] = [self, other]; + // if a.unit_range.start > b.unit_range.start { + // mem::swap(&mut a, &mut b); + // } + // let [mut tmp_a, mut tmp_b] = [0; 2]; + // let [units_a, units_b] = [a.set.units(&mut tmp_a), b.set.units(&mut tmp_b)]; + // let len = a.unit_range.len().min(b.unit_range.len()); + // let [units_a, units_b] = + // [&units_a[b.unit_range.start - a.unit_range.start..][..len], &units_b[..len]]; + // units_a.iter().zip(units_b).any(|(&a, &b)| a & b != 0) + //} + + //fn merge(mut self, mut other: Self) -> Self { + // debug_assert!(!self.overlaps(&other)); + + // if self.unit_range.start > other.unit_range.start { + // mem::swap(&mut self, &mut other); + // } + + // let final_range = self.unit_range.start..self.unit_range.end.max(other.unit_range.end); + + // self.set.reserve(final_range.len()); + + // let mut tmp = 0; + // let other_units = other.set.units(&mut tmp); + + // match self.set.units_mut() { + // Ok(units) => { + // units[other.unit_range.start - self.unit_range.start..] + // .iter_mut() + // .zip(other_units) + // .for_each(|(a, b)| *a |= b); + // } + // Err(view) => view.add_mask(tmp), + // } + + // self + //} +} + +pub struct Func { + pub blocks: Vec, + pub instrs: Vec, + pub id_to_instr: Vec, + pub id_to_block: Vec, +} + +pub struct Block { + pub range: Range, + pub start_id: Nid, + pub eld_id: Nid, +} diff --git a/lang/src/son.rs b/lang/src/son.rs index 779e819..900f704 100644 --- a/lang/src/son.rs +++ b/lang/src/son.rs @@ -11,9 +11,9 @@ use { }, task, ty::{self, Arg, ArrayLen, Loc, Tuple}, - vc::{BitSet, Vc}, - FTask, Func, Global, Ident, Offset, OffsetIter, Reloc, Sig, StringRef, SymKey, TypeParser, - TypedReloc, Types, + utils::{BitSet, Vc}, + FTask, Func, Global, Ident, Offset, OffsetIter, OptLayout, Reloc, Sig, StringRef, SymKey, + TypeParser, TypedReloc, Types, }, alloc::{string::String, vec::Vec}, core::{ @@ -39,6 +39,7 @@ const GLOBAL_ACLASS: usize = 1; pub mod hbvm; type Nid = u16; +type AClassId = u16; type Lookup = crate::ctx_map::CtxMap; @@ -137,7 +138,7 @@ impl Nodes { } depth } - Kind::Start | Kind::End => 1, + Kind::Start | Kind::End | Kind::Die => 1, u => unreachable!("{u:?}"), }; @@ -198,14 +199,18 @@ impl Nodes { } let mut deepest = VOID; - for i in 1..self[node].inputs.len() { + for i in 0..self[node].inputs.len() { let inp = self[node].inputs[i]; if self.idepth(inp) > self.idepth(deepest) { - deepest = self.idom(inp); + if matches!(self[inp].kind, Kind::Call { .. }) { + deepest = inp; + } else { + deepest = self.idom(inp); + } } } - if deepest == VOID { + if deepest == self[node].inputs[0] { return; } @@ -326,6 +331,14 @@ impl Nodes { } } + if self[node].kind == Kind::Load { + min = self.find_antideps(node, min); + } + + if self[node].kind == Kind::Stre { + self[node].antidep = self[node].inputs[0]; + } + if self[min].kind.ends_basic_block() { min = self.idom(min); } @@ -340,6 +353,67 @@ impl Nodes { self[min].outputs.push(node); } + fn find_antideps(&mut self, load: Nid, mut min: Nid) -> Nid { + debug_assert!(self[load].kind == Kind::Load); + + let (aclass, _) = self.aclass_index(self[load].inputs[1]); + + let mut cursor = min; + while cursor != self[load].inputs[0] { + self[cursor].antidep = load; + if self[cursor].clobbers.get(aclass as _) { + min = self[cursor].inputs[0]; + break; + } + cursor = self.idom(cursor); + } + + if self[load].inputs[2] == MEM { + return min; + } + + for out in self[self[load].inputs[2]].outputs.clone() { + match self[out].kind { + Kind::Stre => { + let mut cursor = self[out].inputs[0]; + while cursor != self[out].antidep { + if self[cursor].antidep == load { + min = self.common_dom(min, cursor); + if min == cursor { + self.bind(load, out); + } + break; + } + cursor = self.idom(cursor); + } + break; + } + Kind::Phi => { + let n = self[out].inputs[1..] + .iter() + .position(|&n| n == self[load].inputs[2]) + .unwrap(); + let mut cursor = self[self[out].inputs[0]].inputs[n]; + while cursor != self[out].antidep { + if self[cursor].antidep == load { + min = self.common_dom(min, cursor); + break; + } + cursor = self.idom(cursor); + } + } + _ => {} + } + } + + min + } + + fn bind(&mut self, from: Nid, to: Nid) { + self[from].outputs.push(to); + self[to].inputs.push(from); + } + fn use_block(&mut self, target: Nid, from: Nid) -> Nid { if self[from].kind != Kind::Phi { return self.idom(from); @@ -405,7 +479,6 @@ impl Nodes { to_class .last_store .set_remove(self.new_node(ty::Id::VOID, Kind::Phi, inps), self); - to_class.loads.drain(..).for_each(|d| _ = d.remove(self)); } } } @@ -439,12 +512,18 @@ impl Nodes { if node.ty != ty::Id::VOID { writeln!( out, - " node{i}[label=\"{i} {} {}\" color={color}]", + " node{i}[label=\"{i} {} {} {} {}\" color={color}]", node.kind, - ty::Display::new(tys, files, node.ty) + ty::Display::new(tys, files, node.ty), + node.aclass, + node.mem, )?; } else { - writeln!(out, " node{i}[label=\"{i} {}\" color={color}]", node.kind,)?; + writeln!( + out, + " node{i}[label=\"{i} {} {} {}\" color={color}]", + node.kind, node.aclass, node.mem, + )?; } for (j, &o) in node.outputs.iter().enumerate() { @@ -575,6 +654,14 @@ impl Nodes { } } + fn new_const(&mut self, ty: ty::Id, value: impl Into) -> Nid { + self.new_node_nop(ty, Kind::CInt { value: value.into() }, [VOID]) + } + + fn new_const_lit(&mut self, ty: ty::Id, value: impl Into) -> Value { + self.new_node_lit(ty, Kind::CInt { value: value.into() }, [VOID]) + } + fn new_node_lit(&mut self, ty: ty::Id, kind: Kind, inps: impl Into) -> Value { Value::new(self.new_node(ty, kind, inps)).ty(ty) } @@ -675,14 +762,8 @@ impl Nodes { stack.iter().skip(prev_len).for_each(|&n| self.lock(n)); } - pub fn aclass_index(&self, mut region: Nid) -> (usize, Nid) { - loop { - region = match self[region].kind { - Kind::BinOp { op: TokenKind::Add | TokenKind::Sub } => self[region].inputs[1], - Kind::Phi if self[region].inputs[2] == 0 => self[region].inputs[1], - _ => break (self[region].aclass, region), - }; - } + pub fn aclass_index(&self, region: Nid) -> (usize, Nid) { + (self[region].aclass as _, self[region].mem) } fn peephole(&mut self, target: Nid) -> Option { @@ -699,18 +780,14 @@ impl Nodes { if let (&K::CInt { value: a }, &K::CInt { value: b }) = (&self[lhs].kind, &self[rhs].kind) { - return Some(self.new_node( - ty, - K::CInt { value: op.apply_binop(a, b, is_float) }, - [ctrl], - )); + return Some(self.new_const(ty, op.apply_binop(a, b, is_float))); } if lhs == rhs { match op { - T::Sub => return Some(self.new_node(ty, K::CInt { value: 0 }, [ctrl])), + T::Sub => return Some(self.new_const(ty, 0)), T::Add => { - let rhs = self.new_node_nop(ty, K::CInt { value: 2 }, [ctrl]); + let rhs = self.new_const(ty, 2); return Some( self.new_node(ty, K::BinOp { op: T::Mul }, [ctrl, lhs, rhs]), ); @@ -728,6 +805,12 @@ impl Nodes { if let K::CInt { value } = self[rhs].kind { match (op, value) { + (T::Eq, 0) if self[lhs].ty.is_pointer() || self[lhs].kind == Kind::Stck => { + return Some(self.new_const(ty::Id::BOOL, 0)); + } + (T::Ne, 0) if self[lhs].ty.is_pointer() || self[lhs].kind == Kind::Stck => { + return Some(self.new_const(ty::Id::BOOL, 1)); + } (T::Add | T::Sub | T::Shl, 0) | (T::Mul | T::Div, 1) => return Some(lhs), (T::Mul, 0) => return Some(rhs), _ => {} @@ -740,11 +823,7 @@ impl Nodes { && let K::CInt { value: bv } = self[rhs].kind { // (a op #b) op #c => a op (#b op #c) - let new_rhs = self.new_node_nop( - ty, - K::CInt { value: op.apply_binop(av, bv, is_float) }, - [ctrl], - ); + let new_rhs = self.new_const(ty, op.apply_binop(av, bv, is_float)); return Some(self.new_node(ty, K::BinOp { op }, [ctrl, a, new_rhs])); } @@ -761,7 +840,7 @@ impl Nodes { && let K::CInt { value } = self[self[lhs].inputs[2]].kind { // a * #n + a => a * (#n + 1) - let new_rhs = self.new_node_nop(ty, K::CInt { value: value + 1 }, [ctrl]); + let new_rhs = self.new_const(ty, value + 1); return Some(self.new_node(ty, K::BinOp { op: T::Mul }, [ctrl, rhs, new_rhs])); } @@ -770,7 +849,7 @@ impl Nodes { && let K::CInt { value: a } = self[rhs].kind && let K::CInt { value: b } = self[self[lhs].inputs[2]].kind { - let new_rhs = self.new_node_nop(ty, K::CInt { value: b - a }, [ctrl]); + let new_rhs = self.new_const(ty, b - a); return Some(self.new_node(ty, K::BinOp { op: T::Add }, [ ctrl, self[lhs].inputs[1], @@ -791,17 +870,13 @@ impl Nodes { } } K::UnOp { op } => { - let &[ctrl, oper] = self[target].inputs.as_slice() else { unreachable!() }; + let &[_, oper] = self[target].inputs.as_slice() else { unreachable!() }; let ty = self[target].ty; let is_float = self[oper].ty.is_float(); if let K::CInt { value } = self[oper].kind { - return Some(self.new_node( - ty, - K::CInt { value: op.apply_unop(value, is_float) }, - [ctrl], - )); + return Some(self.new_const(ty, op.apply_unop(value, is_float))); } } K::If => { @@ -943,17 +1018,58 @@ impl Nodes { return Some(self.new_node(self[lhs].ty, Kind::Stre, vc)); } } + K::Stck => { + if let &[mut a, mut b] = self[target].outputs.as_slice() { + if self[a].kind == Kind::Load { + mem::swap(&mut a, &mut b); + } + + if matches!(self[a].kind, Kind::Call { .. }) + && self[a].inputs.last() == Some(&target) + && self[b].kind == Kind::Load + && let &[store] = self[b].outputs.as_slice() + && self[store].kind == Kind::Stre + { + let len = self[a].inputs.len(); + let stre = self[store].inputs[3]; + if stre != MEM { + self[a].inputs.push(stre); + self[a].inputs.swap(len - 1, len); + self[stre].outputs.push(a); + } + return Some(self[store].inputs[2]); + } + } + } K::Stre => { let &[_, value, region, store, ..] = self[target].inputs.as_slice() else { unreachable!() }; + if self[value].kind == Kind::Load && self[value].inputs[1] == region { + return Some(store); + } + + let mut cursor = target; + while self[cursor].kind == Kind::Stre + && self[cursor].inputs[1] != VOID + && let &[next_store] = self[cursor].outputs.as_slice() + { + if self[next_store].inputs[2] == region + && self[next_store].ty == self[target].ty + { + return Some(store); + } + cursor = next_store; + } + 'eliminate: { if self[target].outputs.is_empty() { break 'eliminate; } - if self[value].kind != Kind::Load || self[value].outputs.as_slice() != [target] + if self[value].kind != Kind::Load + || self[value].outputs.iter().any(|&n| self[n].kind != Kind::Stre) { for &ele in self[value].outputs.clone().iter().filter(|&&n| n != target) { self[ele].peep_triggers.push(target); @@ -1066,6 +1182,10 @@ impl Nodes { if self[store].kind == Kind::Stre && self[store].inputs[2] == region && self[store].ty == self[target].ty + && self[store] + .outputs + .iter() + .all(|&n| !matches!(self[n].kind, Kind::Call { .. })) { return Some(self[store].inputs[1]); } @@ -1076,6 +1196,10 @@ impl Nodes { while cursor != MEM && self[cursor].kind == Kind::Stre && self[cursor].inputs[1] != VOID + && self[cursor] + .outputs + .iter() + .all(|&n| !matches!(self[n].kind, Kind::Call { .. })) { if self[cursor].inputs[2] == region && self[cursor].ty == self[target].ty { return Some(self[cursor].inputs[1]); @@ -1176,6 +1300,7 @@ impl Nodes { Kind::If => write!(out, " if: "), Kind::Region | Kind::Loop => writeln!(out, " goto: {node}"), Kind::Return => write!(out, " ret: "), + Kind::Die => write!(out, " die: "), Kind::CInt { value } => write!(out, "cint: #{value:<4}"), Kind::Phi => write!(out, " phi: "), Kind::Arg => write!( @@ -1262,7 +1387,7 @@ impl Nodes { } node = cfg_index; } - Kind::Return => { + Kind::Return | Kind::Die => { node = self[node].outputs[0]; } Kind::Then | Kind::Else | Kind::Entry => { @@ -1354,14 +1479,17 @@ impl Nodes { self.load_loop_var(index, lvar, loops); if !self[lvar.value()].is_lazy_phi(node) { - let inps = [node, lvar.value(), VOID]; + let lvalue = lvar.value(); + let inps = [node, lvalue, VOID]; lvar.set_value(self.new_node_nop(lvar.ty, Kind::Phi, inps), self); + self[lvar.value()].aclass = self[lvalue].aclass; + self[lvar.value()].mem = self[lvalue].mem; } var.set_value(lvar.value(), self); } - fn load_loop_aclass(&mut self, index: usize, var: &mut AClass, loops: &mut [Loop]) { - if var.last_store.get() != VOID { + fn load_loop_aclass(&mut self, index: usize, aclass: &mut AClass, loops: &mut [Loop]) { + if aclass.last_store.get() != VOID { return; } @@ -1375,7 +1503,7 @@ impl Nodes { let inps = [node, lvar.last_store.get(), VOID]; lvar.last_store.set(self.new_node_nop(ty::Id::VOID, Kind::Phi, inps), self); } - var.last_store.set(lvar.last_store.get(), self); + aclass.last_store.set(lvar.last_store.get(), self); } fn check_dominance(&mut self, nd: Nid, min: Nid, check_outputs: bool) { @@ -1457,6 +1585,8 @@ pub enum Kind { // [ctrl, ?value] Return, // [ctrl] + Die, + // [ctrl] CInt { value: i64, }, @@ -1499,6 +1629,7 @@ impl Kind { Self::Start | Self::End | Self::Return + | Self::Die | Self::Entry | Self::Then | Self::Else @@ -1510,7 +1641,7 @@ impl Kind { } fn ends_basic_block(&self) -> bool { - matches!(self, Self::Return | Self::If | Self::End) + matches!(self, Self::Return | Self::If | Self::End | Self::Die) } fn is_peeped(&self) -> bool { @@ -1538,13 +1669,16 @@ pub struct Node { inputs: Vc, outputs: Vc, peep_triggers: Vc, + clobbers: BitSet, ty: ty::Id, offset: Offset, ralloc_backref: RallocBRef, depth: IDomDepth, lock_rc: LockRc, loop_depth: LoopDepth, - aclass: usize, + aclass: AClassId, + mem: Nid, + antidep: Nid, } impl Node { @@ -1562,7 +1696,7 @@ impl Node { fn is_not_gvnd(&self) -> bool { (self.kind == Kind::Phi && self.inputs[2] == 0) - || matches!(self.kind, Kind::Arg | Kind::Stck) + || matches!(self.kind, Kind::Arg | Kind::Stck | Kind::Stre) || self.kind.is_cfg() } @@ -1719,24 +1853,21 @@ impl Variable { #[derive(Default, Clone)] pub struct AClass { last_store: StrongRef, - loads: Vec, + clobber: StrongRef, } impl AClass { fn dup(&self, nodes: &mut Nodes) -> Self { - Self { - last_store: self.last_store.dup(nodes), - loads: self.loads.iter().map(|v| v.dup(nodes)).collect(), - } + Self { last_store: self.last_store.dup(nodes), clobber: self.clobber.dup(nodes) } } - fn remove(mut self, nodes: &mut Nodes) { + fn remove(self, nodes: &mut Nodes) { self.last_store.remove(nodes); - self.loads.drain(..).for_each(|n| _ = n.remove(nodes)); + self.clobber.remove(nodes); } fn new(nodes: &mut Nodes) -> Self { - Self { last_store: StrongRef::new(MEM, nodes), loads: Default::default() } + Self { last_store: StrongRef::new(MEM, nodes), clobber: StrongRef::new(VOID, nodes) } } } @@ -1766,6 +1897,7 @@ struct ItemCtx { ret: Option, task_base: usize, inline_var_base: usize, + inline_aclass_base: usize, inline_depth: u16, inline_ret: Option<(Value, StrongRef, Scope)>, nodes: Nodes, @@ -2070,7 +2202,8 @@ impl<'a> Codegen<'a> { fn new_stack(&mut self, ty: ty::Id) -> Nid { let stck = self.ci.nodes.new_node_nop(ty, Kind::Stck, [VOID, MEM]); - self.ci.nodes[stck].aclass = self.ci.scope.aclasses.len(); + self.ci.nodes[stck].aclass = self.ci.scope.aclasses.len() as _; + self.ci.nodes[stck].mem = stck; self.ci.scope.aclasses.push(AClass::new(&mut self.ci.nodes)); stck } @@ -2089,7 +2222,6 @@ impl<'a> Codegen<'a> { if value_index != 0 { // simply switch the class to the default one let aclass = &mut self.ci.scope.aclasses[value_index]; - let loads = mem::take(&mut aclass.loads); self.ci.nodes.load_loop_aclass(value_index, aclass, &mut self.ci.loops); let last_store = aclass.last_store.get(); let mut cursor = last_store; @@ -2106,23 +2238,13 @@ impl<'a> Codegen<'a> { } self.ci.scope.aclasses[0].last_store.set(last_store, &mut self.ci.nodes); } - self.ci.scope.aclasses[0].loads.extend(loads); self.ci.nodes[value_region].aclass = 0; } let (index, _) = self.ci.nodes.aclass_index(region); let aclass = &mut self.ci.scope.aclasses[index]; self.ci.nodes.load_loop_aclass(index, aclass, &mut self.ci.loops); - let mut vc = Vc::from([VOID, value, region, aclass.last_store.get()]); - for load in aclass.loads.drain(..) { - if load.get() == value { - load.soft_remove(&mut self.ci.nodes); - continue; - } - if let Some(load) = load.remove(&mut self.ci.nodes) { - vc.push(load); - } - } + let vc = Vc::from([aclass.clobber.get(), value, region, aclass.last_store.get()]); mem::take(&mut aclass.last_store).soft_remove(&mut self.ci.nodes); let store = self.ci.nodes.new_node(ty, Kind::Stre, vc); aclass.last_store = StrongRef::new(store, &mut self.ci.nodes); @@ -2146,10 +2268,8 @@ impl<'a> Codegen<'a> { let (index, _) = self.ci.nodes.aclass_index(region); let aclass = &mut self.ci.scope.aclasses[index]; self.ci.nodes.load_loop_aclass(index, aclass, &mut self.ci.loops); - let vc = [VOID, region, aclass.last_store.get()]; - let load = self.ci.nodes.new_node(ty, Kind::Load, vc); - aclass.loads.push(StrongRef::new(load, &mut self.ci.nodes)); - load + let vc = [aclass.clobber.get(), region, aclass.last_store.get()]; + self.ci.nodes.new_node(ty, Kind::Load, vc) } pub fn generate(&mut self, entry: FileId) { @@ -2194,21 +2314,31 @@ impl<'a> Codegen<'a> { // ordered by complexity of the expression match *expr { Expr::Null { pos } => { - inference!(ty, ctx, self, pos, "null pointer", "@as(^, null)"); + inference!(oty, ctx, self, pos, "null pointer", "@as(^, null)"); - if !ty.is_pointer() { + let Some(ty) = self.tys.inner_of(oty) else { self.report( pos, fa!( "'null' expression was inferred to be '{}', - which is not a pointer (and that is not supported yet)", - self.ty_display(ty) + which is not optional", + self.ty_display(oty) ), ); return Value::NEVER; - } + }; - Some(self.ci.nodes.new_node_lit(ty, Kind::CInt { value: 0 }, [VOID])) + match oty.loc(self.tys) { + Loc::Reg => Some(self.ci.nodes.new_const_lit(oty, 0)), + Loc::Stack => { + let OptLayout { flag_ty, flag_offset, .. } = self.tys.opt_layout(ty); + let stack = self.new_stack(oty); + let offset = self.offset(stack, flag_offset); + let value = self.ci.nodes.new_const(flag_ty, 0); + self.store_mem(offset, flag_ty, value); + Some(Value::ptr(stack).ty(oty)) + } + } } Expr::Idk { pos } => { inference!(ty, ctx, self, pos, "value", "@as(, idk)"); @@ -2227,21 +2357,25 @@ impl<'a> Codegen<'a> { Value::NEVER } } - Expr::Bool { value, .. } => Some(self.ci.nodes.new_node_lit( - ty::Id::BOOL, - Kind::CInt { value: value as i64 }, - [VOID], - )), - Expr::Number { value, .. } => Some(self.ci.nodes.new_node_lit( - ctx.ty.filter(|ty| ty.is_integer()).unwrap_or(ty::Id::DEFAULT_INT), - Kind::CInt { value }, - [VOID], - )), - Expr::Float { value, .. } => Some(self.ci.nodes.new_node_lit( - ctx.ty.filter(|ty| ty.is_float()).unwrap_or(ty::Id::F32), - Kind::CInt { value: value as _ }, - [VOID], - )), + Expr::Bool { value, .. } => Some(self.ci.nodes.new_const_lit(ty::Id::BOOL, value)), + Expr::Number { value, .. } => Some( + self.ci.nodes.new_const_lit( + ctx.ty + .map(|ty| self.tys.inner_of(ty).unwrap_or(ty)) + .filter(|ty| ty.is_integer()) + .unwrap_or(ty::Id::DEFAULT_INT), + value, + ), + ), + Expr::Float { value, .. } => Some( + self.ci.nodes.new_const_lit( + ctx.ty + .map(|ty| self.tys.inner_of(ty).unwrap_or(ty)) + .filter(|ty| ty.is_float()) + .unwrap_or(ty::Id::F32), + value as i64, + ), + ), Expr::Ident { id, .. } if let Some(index) = self.ci.scope.vars.iter().rposition(|v| v.id == id) => { @@ -2257,7 +2391,7 @@ impl<'a> Codegen<'a> { ty::Kind::Global(global) => { let gl = &self.tys.ins.globals[global as usize]; let value = self.ci.nodes.new_node(gl.ty, Kind::Global { global }, [VOID]); - self.ci.nodes[value].aclass = GLOBAL_ACLASS; + self.ci.nodes[value].aclass = GLOBAL_ACLASS as _; Some(Value::ptr(value).ty(gl.ty)) } _ => Some(Value::new(Nid::MAX).ty(decl)), @@ -2290,7 +2424,7 @@ impl<'a> Codegen<'a> { } }; let global = self.ci.nodes.new_node(ty, Kind::Global { global }, [VOID]); - self.ci.nodes[global].aclass = GLOBAL_ACLASS; + self.ci.nodes[global].aclass = GLOBAL_ACLASS as _; Some(Value::new(global).ty(ty)) } Expr::Return { pos, val } => { @@ -2332,12 +2466,21 @@ impl<'a> Codegen<'a> { self.ci.nodes.lock(pv.id); self.ci.ctrl.set(NEVER, &mut self.ci.nodes); } else { + for (i, aclass) in self.ci.scope.aclasses[..2].iter_mut().enumerate() { + self.ci.nodes.load_loop_aclass(i, aclass, &mut self.ci.loops); + } + self.ci.nodes.lock(value.id); let mut scope = self.ci.scope.dup(&mut self.ci.nodes); scope .vars .drain(self.ci.inline_var_base..) .for_each(|v| v.remove(&mut self.ci.nodes)); + scope + .aclasses + .drain(self.ci.inline_aclass_base..) + .for_each(|v| v.remove(&mut self.ci.nodes)); + let repl = StrongRef::new(NEVER, &mut self.ci.nodes); self.ci.inline_ret = Some((value, mem::replace(&mut self.ci.ctrl, repl), scope)); @@ -2345,9 +2488,20 @@ impl<'a> Codegen<'a> { None } + Expr::Die { .. } => { + self.ci.ctrl.set( + self.ci.nodes.new_node_nop(ty::Id::VOID, Kind::Die, [self.ci.ctrl.get()]), + &mut self.ci.nodes, + ); + + self.ci.nodes[NEVER].inputs.push(self.ci.ctrl.get()); + self.ci.nodes[self.ci.ctrl.get()].outputs.push(NEVER); + None + } Expr::Field { target, name, pos } => { let mut vtarget = self.raw_expr(target)?; self.strip_var(&mut vtarget); + self.unwrap_opt(pos, &mut vtarget); let tty = vtarget.ty; if let ty::Kind::Module(m) = tty.expand() { @@ -2360,7 +2514,7 @@ impl<'a> Codegen<'a> { let gl = &self.tys.ins.globals[global as usize]; let value = self.ci.nodes.new_node(gl.ty, Kind::Global { global }, [VOID]); - self.ci.nodes[value].aclass = GLOBAL_ACLASS; + self.ci.nodes[value].aclass = GLOBAL_ACLASS as _; Some(Value::ptr(value).ty(gl.ty)) } v => Some(Value::new(Nid::MAX).ty(v.compress())), @@ -2419,18 +2573,20 @@ impl<'a> Codegen<'a> { } Expr::UnOp { op: TokenKind::Mul, val, pos } => { let ctx = Ctx { ty: ctx.ty.map(|ty| self.tys.make_ptr(ty)) }; - let mut val = self.expr_ctx(val, ctx)?; + let mut vl = self.expr_ctx(val, ctx)?; - let Some(base) = self.tys.base_of(val.ty) else { + self.unwrap_opt(val.pos(), &mut vl); + + let Some(base) = self.tys.base_of(vl.ty) else { self.report( pos, - fa!("the '{}' can not be dereferneced", self.ty_display(val.ty)), + fa!("the '{}' can not be dereferneced", self.ty_display(vl.ty)), ); return Value::NEVER; }; - val.ptr = true; - val.ty = base; - Some(val) + vl.ptr = true; + vl.ty = base; + Some(vl) } Expr::UnOp { pos, op: op @ TokenKind::Sub, val } => { let val = @@ -2438,11 +2594,7 @@ impl<'a> Codegen<'a> { if val.ty.is_integer() { Some(self.ci.nodes.new_node_lit(val.ty, Kind::UnOp { op }, [VOID, val.id])) } else if val.ty.is_float() { - let value = self.ci.nodes.new_node_nop( - val.ty, - Kind::CInt { value: (-1f64).to_bits() as _ }, - [VOID], - ); + let value = self.ci.nodes.new_const(val.ty, (-1f64).to_bits() as i64); Some(self.ci.nodes.new_node_lit(val.ty, Kind::BinOp { op: TokenKind::Mul }, [ VOID, val.id, value, ])) @@ -2487,11 +2639,31 @@ impl<'a> Codegen<'a> { Some(Value::VOID) } + Expr::BinOp { left: &Expr::Null { pos }, .. } => { + self.report(pos, "'null' must always be no the right side of an expression"); + Value::NEVER + } + Expr::BinOp { + left, + op: op @ (TokenKind::Eq | TokenKind::Ne), + right: Expr::Null { .. }, + .. + } => { + let mut cmped = self.raw_expr(left)?; + self.strip_var(&mut cmped); + + let Some(ty) = self.tys.inner_of(cmped.ty) else { + return Some(self.ci.nodes.new_const_lit(ty::Id::BOOL, 1)); + }; + + Some(Value::new(self.gen_null_check(cmped, ty, op)).ty(ty::BOOL)) + } Expr::BinOp { left, pos, op, right } if !matches!(op, TokenKind::Assign | TokenKind::Decl) => { let mut lhs = self.raw_expr_ctx(left, ctx)?; self.strip_var(&mut lhs); + self.unwrap_opt(left.pos(), &mut lhs); match lhs.ty.expand() { _ if lhs.ty.is_pointer() @@ -2505,9 +2677,14 @@ impl<'a> Codegen<'a> { self.ci.nodes.unlock(lhs.id); let mut rhs = rhs?; self.strip_var(&mut rhs); - let ty = self.binop_ty(pos, &mut lhs, &mut rhs, op); + self.unwrap_opt(right.pos(), &mut rhs); + let (ty, aclass, mem) = self.binop_ty(pos, &mut lhs, &mut rhs, op); let inps = [VOID, lhs.id, rhs.id]; - Some(self.ci.nodes.new_node_lit(ty.bin_ret(op), Kind::BinOp { op }, inps)) + let bop = + self.ci.nodes.new_node_lit(ty.bin_ret(op), Kind::BinOp { op }, inps); + self.ci.nodes[bop.id].aclass = aclass as _; + self.ci.nodes[bop.id].mem = mem; + Some(bop) } ty::Kind::Struct(s) if op.is_homogenous() => { self.ci.nodes.lock(lhs.id); @@ -2552,14 +2729,17 @@ impl<'a> Codegen<'a> { let elem = self.tys.ins.slices[s as usize].elem; let mut idx = self.expr_ctx(index, Ctx::default().with_ty(ty::Id::DEFAULT_INT))?; self.assert_ty(index.pos(), &mut idx, ty::Id::DEFAULT_INT, "subscript"); - let value = self.tys.size_of(elem) as i64; - let size = self.ci.nodes.new_node_nop(ty::Id::INT, Kind::CInt { value }, [VOID]); + let size = self.ci.nodes.new_const(ty::Id::INT, self.tys.size_of(elem)); let inps = [VOID, idx.id, size]; let offset = self.ci.nodes.new_node(ty::Id::INT, Kind::BinOp { op: TokenKind::Mul }, inps); + let (aclass, mem) = self.ci.nodes.aclass_index(bs.id); let inps = [VOID, bs.id, offset]; let ptr = self.ci.nodes.new_node(ty::Id::INT, Kind::BinOp { op: TokenKind::Add }, inps); + self.ci.nodes[ptr].aclass = aclass as _; + self.ci.nodes[ptr].mem = mem; + Some(Value::ptr(ptr).ty(elem)) } Expr::Embed { id, .. } => { @@ -2569,19 +2749,27 @@ impl<'a> Codegen<'a> { } Expr::Directive { name: "sizeof", args: [ty], .. } => { let ty = self.ty(ty); - Some(self.ci.nodes.new_node_lit( - ctx.ty.filter(|ty| ty.is_integer()).unwrap_or(ty::Id::DEFAULT_INT), - Kind::CInt { value: self.tys.size_of(ty) as _ }, - [VOID], - )) + Some( + self.ci.nodes.new_const_lit( + ctx.ty + .map(|ty| self.tys.inner_of(ty).unwrap_or(ty)) + .filter(|ty| ty.is_integer()) + .unwrap_or(ty::Id::DEFAULT_INT), + self.tys.size_of(ty), + ), + ) } Expr::Directive { name: "alignof", args: [ty], .. } => { let ty = self.ty(ty); - Some(self.ci.nodes.new_node_lit( - ctx.ty.filter(|ty| ty.is_integer()).unwrap_or(ty::Id::DEFAULT_INT), - Kind::CInt { value: self.tys.align_of(ty) as _ }, - [VOID], - )) + Some( + self.ci.nodes.new_const_lit( + ctx.ty + .map(|ty| self.tys.inner_of(ty).unwrap_or(ty)) + .filter(|ty| ty.is_integer()) + .unwrap_or(ty::Id::DEFAULT_INT), + self.tys.align_of(ty), + ), + ) } Expr::Directive { name: "bitcast", args: [val], pos } => { let mut val = self.raw_expr(val)?; @@ -2616,6 +2804,25 @@ impl<'a> Codegen<'a> { val.ty = ty; Some(val) } + Expr::Directive { name: "unwrap", args: [expr], .. } => { + let mut val = self.raw_expr(expr)?; + self.strip_var(&mut val); + + let Some(ty) = self.tys.inner_of(val.ty) else { + self.report( + expr.pos(), + fa!( + "only optional types can be unwrapped ('{}' is not optional)", + self.ty_display(val.ty) + ), + ); + return Value::NEVER; + }; + + self.unwrap_opt_unchecked(ty, val.ty, &mut val); + val.ty = ty; + Some(val) + } Expr::Directive { name: "intcast", args: [expr], pos } => { let mut val = self.expr(expr)?; @@ -2735,7 +2942,7 @@ impl<'a> Codegen<'a> { let mut inps = Vc::from([NEVER]); let arg_base = self.tys.tmp.args.len(); - let mut clobbered_aliases = vec![GLOBAL_ACLASS]; + let mut clobbered_aliases = BitSet::default(); for arg in args { let value = self.expr(arg)?; self.add_clobbers(value, &mut clobbered_aliases); @@ -2751,7 +2958,7 @@ impl<'a> Codegen<'a> { self.ci.nodes.unlock(n); } - self.append_clobbers(&mut inps, &clobbered_aliases); + self.append_clobbers(&mut inps, &mut clobbered_aliases); let alt_value = match ty.loc(self.tys) { Loc::Reg => None, @@ -2768,7 +2975,7 @@ impl<'a> Codegen<'a> { &mut self.ci.nodes, ); - self.add_clobber_stores(&clobbered_aliases); + self.add_clobber_stores(clobbered_aliases); alt_value.or(Some(Value::new(self.ci.ctrl.get()).ty(ty))) } @@ -2808,7 +3015,7 @@ impl<'a> Codegen<'a> { let mut tys = sig.args.args(); let mut cargs = cargs.iter(); let mut args = args.iter(); - let mut clobbered_aliases = vec![GLOBAL_ACLASS]; + let mut clobbered_aliases = BitSet::default(); while let Some(ty) = tys.next(self.tys) { let carg = cargs.next().unwrap(); let Some(arg) = args.next() else { break }; @@ -2827,7 +3034,7 @@ impl<'a> Codegen<'a> { self.ci.nodes.unlock(n); } - self.append_clobbers(&mut inps, &clobbered_aliases); + self.append_clobbers(&mut inps, &mut clobbered_aliases); let alt_value = match sig.ret.loc(self.tys) { Loc::Reg => None, @@ -2844,7 +3051,7 @@ impl<'a> Codegen<'a> { &mut self.ci.nodes, ); - self.add_clobber_stores(&clobbered_aliases); + self.add_clobber_stores(clobbered_aliases); alt_value.or(Some(Value::new(self.ci.ctrl.get()).ty(sig.ret))) } @@ -2889,6 +3096,7 @@ impl<'a> Codegen<'a> { let mut args = args.iter(); let mut cargs = cargs.iter(); let var_base = self.ci.scope.vars.len(); + let aclass_base = self.ci.scope.aclasses.len(); while let Some(aty) = tys.next(self.tys) { let carg = cargs.next().unwrap(); let Some(arg) = args.next() else { break }; @@ -2925,40 +3133,55 @@ impl<'a> Codegen<'a> { } } - let prev_var_base = - mem::replace(&mut self.ci.inline_var_base, self.ci.scope.vars.len()); + let prev_var_base = mem::replace(&mut self.ci.inline_var_base, var_base); + let prev_aclass_base = mem::replace(&mut self.ci.inline_aclass_base, aclass_base); let prev_ret = self.ci.ret.replace(sig.ret); let prev_inline_ret = self.ci.inline_ret.take(); let prev_file = mem::replace(&mut self.ci.file, file); self.ci.inline_depth += 1; - if self.expr(body).is_some() && sig.ret != ty::Id::VOID { - self.report( - body.pos(), - "expected all paths in the fucntion to return \ + if self.expr(body).is_some() { + if sig.ret == ty::Id::VOID { + self.expr(&Expr::Return { pos: body.pos(), val: None }); + } else { + self.report( + body.pos(), + "expected all paths in the fucntion to return \ or the return type to be 'void'", - ); + ); + } } self.ci.ret = prev_ret; self.ci.file = prev_file; self.ci.inline_depth -= 1; self.ci.inline_var_base = prev_var_base; + self.ci.inline_aclass_base = prev_aclass_base; for var in self.ci.scope.vars.drain(var_base..) { var.remove(&mut self.ci.nodes); } + for var in self.ci.scope.aclasses.drain(aclass_base..) { + var.remove(&mut self.ci.nodes); + } mem::replace(&mut self.ci.inline_ret, prev_inline_ret).map(|(v, ctrl, scope)| { self.ci.nodes.unlock(v.id); self.ci.scope.clear(&mut self.ci.nodes); self.ci.scope = scope; self.ci.scope.vars.drain(var_base..).for_each(|v| v.remove(&mut self.ci.nodes)); + self.ci + .scope + .aclasses + .drain(aclass_base..) + .for_each(|v| v.remove(&mut self.ci.nodes)); mem::replace(&mut self.ci.ctrl, ctrl).remove(&mut self.ci.nodes); v }) } Expr::Tupl { pos, ty, fields, .. } => { - ctx.ty = ty.map(|ty| self.ty(ty)).or(ctx.ty); + ctx.ty = ty + .map(|ty| self.ty(ty)) + .or(ctx.ty.map(|ty| self.tys.inner_of(ty).unwrap_or(ty))); inference!(sty, ctx, self, pos, "struct or slice", ".(...)"); match sty.expand() { @@ -3045,11 +3268,13 @@ impl<'a> Codegen<'a> { } } Expr::Struct { .. } => { - let value = self.ty(expr).repr() as i64; - Some(self.ci.nodes.new_node_lit(ty::Id::TYPE, Kind::CInt { value }, [VOID])) + let value = self.ty(expr).repr(); + Some(self.ci.nodes.new_const_lit(ty::Id::TYPE, value)) } Expr::Ctor { pos, ty, fields, .. } => { - ctx.ty = ty.map(|ty| self.ty(ty)).or(ctx.ty); + ctx.ty = ty + .map(|ty| self.ty(ty)) + .or(ctx.ty.map(|ty| self.tys.inner_of(ty).unwrap_or(ty))); inference!(sty, ctx, self, pos, "struct", ".{...}"); let ty::Kind::Struct(s) = sty.expand() else { @@ -3151,11 +3376,14 @@ impl<'a> Codegen<'a> { scope: self.ci.scope.dup(&mut self.ci.nodes), }); - for var in self.ci.scope.vars.iter_mut() { + for var in self.ci.scope.vars.iter_mut().skip(self.ci.inline_var_base) { var.set_value(VOID, &mut self.ci.nodes); } - for aclass in self.ci.scope.aclasses.iter_mut() { + for aclass in self.ci.scope.aclasses[..2].iter_mut() { + aclass.last_store.set(VOID, &mut self.ci.nodes); + } + for aclass in self.ci.scope.aclasses.iter_mut().skip(self.ci.inline_aclass_base) { aclass.last_store.set(VOID, &mut self.ci.nodes); } @@ -3237,8 +3465,16 @@ impl<'a> Codegen<'a> { scope_class.last_store.set(prev, &mut self.ci.nodes); } } + + if loop_class.last_store.get() == 0 { + loop_class + .last_store + .set(scope_class.last_store.get(), &mut self.ci.nodes); + } } + debug_assert!(self.ci.scope.aclasses.iter().all(|a| a.last_store.get() != 0)); + scope.clear(&mut self.ci.nodes); self.ci.ctrl.set(NEVER, &mut self.ci.nodes); @@ -3414,47 +3650,31 @@ impl<'a> Codegen<'a> { } } - fn add_clobbers(&mut self, value: Value, clobbered_aliases: &mut Vec) { + fn add_clobbers(&mut self, value: Value, clobbered_aliases: &mut BitSet) { if let Some(base) = self.tys.base_of(value.ty) { - clobbered_aliases.push(self.ci.nodes.aclass_index(value.id).0); + clobbered_aliases.set(self.ci.nodes.aclass_index(value.id).0 as _); if base.has_pointers(self.tys) { - clobbered_aliases.push(0); + clobbered_aliases.set(DEFAULT_ACLASS as _); } } else if value.ty.has_pointers(self.tys) { - clobbered_aliases.push(0); + clobbered_aliases.set(DEFAULT_ACLASS as _); } } - fn append_clobbers(&mut self, inps: &mut Vc, clobbered_aliases: &[usize]) { - for &clobbered in clobbered_aliases.iter() { + fn append_clobbers(&mut self, inps: &mut Vc, clobbered_aliases: &mut BitSet) { + clobbered_aliases.set(GLOBAL_ACLASS as _); + for clobbered in clobbered_aliases.iter() { let aclass = &mut self.ci.scope.aclasses[clobbered]; self.ci.nodes.load_loop_aclass(clobbered, aclass, &mut self.ci.loops); inps.push(aclass.last_store.get()); - aclass.loads.retain_mut(|load| { - if inps.contains(&load.get()) { - return true; - } - - if let Some(load) = mem::take(load).remove(&mut self.ci.nodes) { - inps.push(load); - } - - false - }); } } - fn add_clobber_stores(&mut self, clobbered_aliases: &[usize]) { - for &clobbered in clobbered_aliases.iter() { - if clobbered == DEFAULT_ACLASS { - continue; - } - let aclass = self.ci.scope.aclasses[clobbered].last_store.get(); - if aclass == MEM { - continue; - } - self.store_mem(self.ci.nodes[aclass].inputs[2], ty::Id::VOID, VOID); + fn add_clobber_stores(&mut self, clobbered_aliases: BitSet) { + for clobbered in clobbered_aliases.iter() { + self.ci.scope.aclasses[clobbered].clobber.set(self.ci.ctrl.get(), &mut self.ci.nodes); } + self.ci.nodes[self.ci.ctrl.get()].clobbers = clobbered_aliases; } fn struct_op( @@ -3624,9 +3844,13 @@ impl<'a> Codegen<'a> { return val; } - let off = self.ci.nodes.new_node_nop(ty::Id::INT, Kind::CInt { value: off as i64 }, [VOID]); + let off = self.ci.nodes.new_const(ty::Id::INT, off); + let (aclass, mem) = self.ci.nodes.aclass_index(val); let inps = [VOID, val, off]; - self.ci.nodes.new_node(ty::Id::INT, Kind::BinOp { op: TokenKind::Add }, inps) + let seted = self.ci.nodes.new_node(ty::Id::INT, Kind::BinOp { op: TokenKind::Add }, inps); + self.ci.nodes[seted].aclass = aclass as _; + self.ci.nodes[seted].mem = mem; + seted } fn strip_var(&mut self, n: &mut Value) { @@ -3733,7 +3957,7 @@ impl<'a> Codegen<'a> { &mut self.ci.nodes, )); if ty.loc(self.tys) == Loc::Stack { - self.ci.nodes[value].aclass = self.ci.scope.aclasses.len(); + self.ci.nodes[value].aclass = self.ci.scope.aclasses.len() as _; self.ci.scope.aclasses.push(AClass::new(&mut self.ci.nodes)); } } @@ -3782,39 +4006,150 @@ impl<'a> Codegen<'a> { #[must_use] #[track_caller] - fn binop_ty(&mut self, pos: Pos, lhs: &mut Value, rhs: &mut Value, op: TokenKind) -> ty::Id { + fn binop_ty( + &mut self, + pos: Pos, + lhs: &mut Value, + rhs: &mut Value, + op: TokenKind, + ) -> (ty::Id, usize, Nid) { if let Some(upcasted) = lhs.ty.try_upcast(rhs.ty) { let to_correct = if lhs.ty != upcasted { - Some(lhs) + Some((lhs, rhs)) } else if rhs.ty != upcasted { - Some(rhs) + Some((rhs, lhs)) } else { None }; - if let Some(oper) = to_correct { + if let Some((oper, other)) = to_correct { if self.tys.size_of(upcasted) > self.tys.size_of(oper.ty) { self.extend(oper, upcasted); } if matches!(op, TokenKind::Add | TokenKind::Sub) && let Some(elem) = self.tys.base_of(upcasted) { - let value = self.tys.size_of(elem) as i64; - let cnst = - self.ci.nodes.new_node_nop(ty::Id::INT, Kind::CInt { value }, [VOID]); + let cnst = self.ci.nodes.new_const(ty::Id::INT, self.tys.size_of(elem)); oper.id = self.ci.nodes.new_node(upcasted, Kind::BinOp { op: TokenKind::Mul }, [ VOID, oper.id, cnst, ]); + return ( + upcasted, + self.ci.nodes[other.id].aclass as _, + self.ci.nodes[other.id].mem, + ); } } - upcasted + (upcasted, DEFAULT_ACLASS, VOID) } else { let ty = self.ty_display(lhs.ty); let expected = self.ty_display(rhs.ty); self.report(pos, fa!("'{ty} {op} {expected}' is not supported")); - ty::Id::NEVER + (ty::Id::NEVER, DEFAULT_ACLASS, VOID) + } + } + + fn wrap_in_opt(&mut self, val: &mut Value) { + debug_assert!(!val.var); + + let oty = self.tys.make_opt(val.ty); + + if let Some((uninit, ..)) = self.tys.nieche_of(val.ty) { + self.strip_ptr(val); + val.ty = oty; + assert!(!uninit, "TODO"); + return; + } + + let OptLayout { flag_ty, flag_offset, payload_offset } = self.tys.opt_layout(val.ty); + + match oty.loc(self.tys) { + Loc::Reg => { + self.strip_ptr(val); + // registers have inverted offsets so that accessing the inner type is a noop + let flag_offset = self.tys.size_of(oty) - flag_offset - 1; + let fill = self.ci.nodes.new_const(oty, 1i64 << (flag_offset * 8 - 1)); + val.id = self + .ci + .nodes + .new_node(oty, Kind::BinOp { op: TokenKind::Bor }, [VOID, val.id, fill]); + val.ty = oty; + } + Loc::Stack => { + self.strip_ptr(val); + let stack = self.new_stack(oty); + let fill = self.ci.nodes.new_const(flag_ty, 1); + self.store_mem(stack, flag_ty, fill); + let off = self.offset(stack, payload_offset); + self.store_mem(off, val.ty, val.id); + val.id = stack; + val.ptr = true; + val.ty = oty; + } + } + } + + fn unwrap_opt(&mut self, pos: Pos, opt: &mut Value) { + let Some(ty) = self.tys.inner_of(opt.ty) else { return }; + let null_check = self.gen_null_check(*opt, ty, TokenKind::Eq); + + // TODO: extract the if check int a fucntion + let ctrl = self.ci.nodes.new_node(ty::Id::VOID, Kind::If, [self.ci.ctrl.get(), null_check]); + let ctrl_ty = self.ci.nodes[ctrl].ty; + self.ci.nodes.remove(ctrl); + let oty = mem::replace(&mut opt.ty, ty); + match ctrl_ty { + ty::Id::LEFT_UNREACHABLE => { + self.unwrap_opt_unchecked(ty, oty, opt); + } + ty::Id::RIGHT_UNREACHABLE => { + self.report(pos, "the value is always null, some checks might need to be inverted"); + } + _ => { + self.report( + pos, + "can't prove the value is not 'null', \ + use '@unwrap()' if you believe compiler is stupid, \ + or explicitly check for null and handle it \ + ('if == null { /* handle */ } else { /* use opt */ }')", + ); + } + } + } + + fn unwrap_opt_unchecked(&mut self, ty: ty::Id, oty: ty::Id, opt: &mut Value) { + if self.tys.nieche_of(ty).is_some() { + return; + } + + let OptLayout { payload_offset, .. } = self.tys.opt_layout(ty); + + match oty.loc(self.tys) { + Loc::Reg => {} + Loc::Stack => { + opt.id = self.offset(opt.id, payload_offset); + } + } + } + + fn gen_null_check(&mut self, mut cmped: Value, ty: ty::Id, op: TokenKind) -> Nid { + let OptLayout { flag_ty, flag_offset, .. } = self.tys.opt_layout(ty); + + match cmped.ty.loc(self.tys) { + Loc::Reg => { + self.strip_ptr(&mut cmped); + let inps = [VOID, cmped.id, self.ci.nodes.new_const(cmped.ty, 0)]; + self.ci.nodes.new_node(ty::Id::BOOL, Kind::BinOp { op }, inps) + } + Loc::Stack => { + cmped.id = self.offset(cmped.id, flag_offset); + cmped.ty = flag_ty; + self.strip_ptr(&mut cmped); + let inps = [VOID, cmped.id, self.ci.nodes.new_const(flag_ty, 0)]; + self.ci.nodes.new_node(ty::Id::BOOL, Kind::BinOp { op }, inps) + } } } @@ -3829,24 +4164,43 @@ impl<'a> Codegen<'a> { if let Some(upcasted) = src.ty.try_upcast(expected) && upcasted == expected { + if src.ty.is_never() { + return true; + } + if src.ty != upcasted { - debug_assert!( - src.ty.is_integer() || src.ty == ty::Id::NEVER, - "{} {}", - self.ty_display(src.ty), - self.ty_display(upcasted) - ); - debug_assert!( - upcasted.is_integer() || src.ty == ty::Id::NEVER, - "{} {}", - self.ty_display(src.ty), - self.ty_display(upcasted) - ); - self.extend(src, upcasted); + if let Some(inner) = self.tys.inner_of(upcasted) { + if inner != src.ty { + self.assert_ty(pos, src, inner, hint); + } + self.wrap_in_opt(src); + } else { + debug_assert!( + src.ty.is_integer() || src.ty == ty::Id::NEVER, + "{} {}", + self.ty_display(src.ty), + self.ty_display(upcasted) + ); + debug_assert!( + upcasted.is_integer() || src.ty == ty::Id::NEVER, + "{} {}", + self.ty_display(src.ty), + self.ty_display(upcasted) + ); + self.extend(src, upcasted); + } } true } else { + if let Some(inner) = self.tys.inner_of(src.ty) + && inner.try_upcast(expected) == Some(expected) + { + self.unwrap_opt(pos, src); + return self.assert_ty(pos, src, expected, hint); + } + let ty = self.ty_display(src.ty); + let expected = self.ty_display(expected); self.report(pos, fa!("expected {hint} to be of type {expected}, got {ty}")); false @@ -3855,11 +4209,10 @@ impl<'a> Codegen<'a> { fn extend(&mut self, value: &mut Value, to: ty::Id) { self.strip_ptr(value); - let val = (1i64 << (self.tys.size_of(value.ty) * 8)) - 1; - value.ty = to; - let mask = self.ci.nodes.new_node_nop(to, Kind::CInt { value: val }, [VOID]); + let mask = self.ci.nodes.new_const(to, (1i64 << (self.tys.size_of(value.ty) * 8)) - 1); let inps = [VOID, value.id, mask]; *value = self.ci.nodes.new_node_lit(to, Kind::BinOp { op: TokenKind::Band }, inps); + value.ty = to; } #[track_caller] @@ -3964,8 +4317,6 @@ impl TypeParser for Codegen<'_> { } } -// FIXME: make this more efficient (allocated with arena) - #[cfg(test)] mod tests { use { @@ -4017,6 +4368,7 @@ mod tests { variables; loops; pointers; + nullable_types; structs; hex_octal_binary_literals; struct_operators; @@ -4028,6 +4380,7 @@ mod tests { inline; idk; generic_functions; + die; // Incomplete Examples; //comptime_pointers; @@ -4035,6 +4388,7 @@ mod tests { fb_driver; // Purely Testing Examples; + only_break_loop; reading_idk; nonexistent_ident_import; big_array_crash; diff --git a/lang/src/son/hbvm.rs b/lang/src/son/hbvm.rs index 7fef3f1..4411cd4 100644 --- a/lang/src/son/hbvm.rs +++ b/lang/src/son/hbvm.rs @@ -6,13 +6,12 @@ use { son::{write_reloc, Kind, MEM}, task, ty::{self, Arg, Loc}, - vc::{BitSet, Vc}, + utils::{BitSet, Vc}, HashMap, Offset, PLoc, Reloc, Sig, Size, TypedReloc, Types, }, - alloc::{borrow::ToOwned, string::String, vec::Vec}, + alloc::{borrow::ToOwned, boxed::Box, collections::BTreeMap, string::String, vec::Vec}, core::{assert_matches::debug_assert_matches, mem}, hbbytecode::{self as instrs, *}, - std::{boxed::Box, collections::BTreeMap}, }; impl Types { @@ -226,17 +225,20 @@ impl ItemCtx { let node = &fuc.nodes[nid]; let mut extend = |base: ty::Id, dest: ty::Id, from: usize, to: usize| { - if base.simple_size() == dest.simple_size() { + let (bsize, dsize) = (tys.size_of(base), tys.size_of(dest)); + debug_assert!(bsize <= 8, "{}", ty::Display::new(tys, files, base)); + debug_assert!(dsize <= 8, "{}", ty::Display::new(tys, files, dest)); + if bsize == dsize { return Default::default(); } match (base.is_signed(), dest.is_signed()) { (true, true) => { let op = [instrs::sxt8, instrs::sxt16, instrs::sxt32] - [base.simple_size().unwrap().ilog2() as usize]; + [bsize.ilog2() as usize]; op(atr(allocs[to]), atr(allocs[from])) } _ => { - let mask = (1u64 << (base.simple_size().unwrap() * 8)) - 1; + let mask = (1u64 << (bsize * 8)) - 1; instrs::andi(atr(allocs[to]), atr(allocs[from]), mask) } } @@ -306,6 +308,9 @@ impl ItemCtx { self.emit(instrs::jmp(0)); } } + Kind::Die => { + self.emit(instrs::un()); + } Kind::CInt { value } if node.ty.is_float() => { self.emit(match node.ty { ty::Id::F32 => instrs::li32( @@ -408,12 +413,20 @@ impl ItemCtx { } if let Some(PLoc::WideReg(r, size)) = ret { + debug_assert_eq!( + fuc.nodes[*node.inputs.last().unwrap()].kind, + Kind::Stck + ); let stck = fuc.nodes[*node.inputs.last().unwrap()].offset; self.emit(instrs::st(r, reg::STACK_PTR, stck as _, size)); } if let Some(PLoc::Reg(r, size)) = ret && node.ty.loc(tys) == Loc::Stack { + debug_assert_eq!( + fuc.nodes[*node.inputs.last().unwrap()].kind, + Kind::Stck + ); let stck = fuc.nodes[*node.inputs.last().unwrap()].offset; self.emit(instrs::st(r, reg::STACK_PTR, stck as _, size)); } @@ -607,7 +620,9 @@ impl ItemCtx { self.emit(instrs::addi64(reg::STACK_PTR, reg::STACK_PTR, (pushed + stack) as _)); } self.relocs.iter_mut().for_each(|r| r.reloc.offset -= stripped_prelude_size as u32); - self.emit(instrs::jala(reg::ZERO, reg::RET_ADDR, 0)); + if sig.ret != ty::Id::NEVER { + self.emit(instrs::jala(reg::ZERO, reg::RET_ADDR, 0)); + } } } @@ -812,6 +827,10 @@ impl<'a> Function<'a> { self.add_instr(nid, ops); self.emit_node(node.outputs[0], nid); } + Kind::Die => { + self.add_instr(nid, vec![]); + self.emit_node(node.outputs[0], nid); + } Kind::CInt { .. } if node.outputs.iter().all(|&o| { let ond = &self.nodes[o]; @@ -1157,7 +1176,7 @@ impl regalloc2::Function for Function<'_> { } fn is_ret(&self, insn: regalloc2::Inst) -> bool { - self.nodes[self.instrs[insn.index()].nid].kind == Kind::Return + matches!(self.nodes[self.instrs[insn.index()].nid].kind, Kind::Return | Kind::Die) } fn is_branch(&self, insn: regalloc2::Inst) -> bool { diff --git a/lang/src/utils.rs b/lang/src/utils.rs new file mode 100644 index 0000000..ca781bf --- /dev/null +++ b/lang/src/utils.rs @@ -0,0 +1,528 @@ +#![expect(dead_code)] +use { + alloc::alloc, + core::{ + alloc::Layout, + fmt::Debug, + hint::unreachable_unchecked, + mem::MaybeUninit, + ops::{Deref, DerefMut, Not}, + ptr::Unique, + }, +}; + +type Nid = u16; + +pub union BitSet { + inline: usize, + alloced: Unique, +} + +impl Debug for BitSet { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_list().entries(self.iter()).finish() + } +} + +impl Clone for BitSet { + fn clone(&self) -> Self { + if self.is_inline() { + Self { inline: unsafe { self.inline } } + } else { + let (data, _) = self.data_and_len(); + let (layout, _) = Self::layout(data.len()); + unsafe { + let ptr = alloc::alloc(layout); + ptr.copy_from_nonoverlapping(self.alloced.as_ptr() as _, layout.size()); + Self { alloced: Unique::new_unchecked(ptr as _) } + } + } + } +} + +impl Drop for BitSet { + fn drop(&mut self) { + if !self.is_inline() { + unsafe { + let cap = self.alloced.as_ref().cap; + alloc::dealloc(self.alloced.as_ptr() as _, Self::layout(cap).0); + } + } + } +} + +impl Default for BitSet { + fn default() -> Self { + Self { inline: Self::FLAG } + } +} + +impl BitSet { + const FLAG: usize = 1 << (Self::UNIT - 1); + const INLINE_ELEMS: usize = Self::UNIT - 1; + const UNIT: usize = core::mem::size_of::() * 8; + + fn is_inline(&self) -> bool { + unsafe { self.inline & Self::FLAG != 0 } + } + + fn data_and_len(&self) -> (&[usize], usize) { + unsafe { + if self.is_inline() { + (core::slice::from_ref(&self.inline), Self::INLINE_ELEMS) + } else { + let small_vec = self.alloced.as_ref(); + ( + core::slice::from_raw_parts( + &small_vec.data as *const _ as *const usize, + small_vec.cap, + ), + small_vec.cap * core::mem::size_of::() * 8, + ) + } + } + } + + fn data_mut_and_len(&mut self) -> (&mut [usize], usize) { + unsafe { + if self.is_inline() { + (core::slice::from_mut(&mut self.inline), INLINE_ELEMS) + } else { + let small_vec = self.alloced.as_mut(); + ( + core::slice::from_raw_parts_mut( + &mut small_vec.data as *mut _ as *mut usize, + small_vec.cap, + ), + small_vec.cap * Self::UNIT, + ) + } + } + } + + fn indexes(index: usize) -> (usize, usize) { + (index / Self::UNIT, index % Self::UNIT) + } + + pub fn get(&self, index: Nid) -> bool { + let index = index as usize; + let (data, len) = self.data_and_len(); + if index >= len { + return false; + } + let (elem, bit) = Self::indexes(index); + (unsafe { *data.get_unchecked(elem) }) & (1 << bit) != 0 + } + + pub fn set(&mut self, index: Nid) -> bool { + let index = index as usize; + let (mut data, len) = self.data_mut_and_len(); + if core::intrinsics::unlikely(index >= len) { + self.grow(index.next_power_of_two().max(4 * Self::UNIT)); + (data, _) = self.data_mut_and_len(); + } + + let (elem, bit) = Self::indexes(index); + let elem = unsafe { data.get_unchecked_mut(elem) }; + let prev = *elem; + *elem |= 1 << bit; + *elem != prev + } + + fn grow(&mut self, size: usize) { + debug_assert!(size.is_power_of_two()); + let slot_count = size / Self::UNIT; + let (layout, off) = Self::layout(slot_count); + let (ptr, prev_len) = unsafe { + if self.is_inline() { + let ptr = alloc::alloc(layout); + *ptr.add(off).cast::() = self.inline & !Self::FLAG; + (ptr, 1) + } else { + let prev_len = self.alloced.as_ref().cap; + let (prev_layout, _) = Self::layout(prev_len); + (alloc::realloc(self.alloced.as_ptr() as _, prev_layout, layout.size()), prev_len) + } + }; + unsafe { + MaybeUninit::fill( + core::slice::from_raw_parts_mut( + ptr.add(off).cast::>().add(prev_len), + slot_count - prev_len, + ), + 0, + ); + *ptr.cast::() = slot_count; + core::ptr::write(self, Self { alloced: Unique::new_unchecked(ptr as _) }); + } + } + + fn layout(slot_count: usize) -> (core::alloc::Layout, usize) { + unsafe { + core::alloc::Layout::new::() + .extend(Layout::array::(slot_count).unwrap_unchecked()) + .unwrap_unchecked() + } + } + + pub fn iter(&self) -> BitSetIter { + if self.is_inline() { + BitSetIter { index: 0, current: unsafe { self.inline & !Self::FLAG }, remining: &[] } + } else { + let &[current, ref remining @ ..] = self.data_and_len().0 else { + unsafe { unreachable_unchecked() } + }; + BitSetIter { index: 0, current, remining } + } + } + + pub fn clear(&mut self, len: usize) { + self.reserve(len); + if self.is_inline() { + unsafe { self.inline &= Self::FLAG }; + } else { + self.data_mut_and_len().0.fill(0); + } + } + + pub fn units<'a>(&'a self, slot: &'a mut usize) -> &'a [usize] { + if self.is_inline() { + *slot = unsafe { self.inline } & !Self::FLAG; + core::slice::from_ref(slot) + } else { + self.data_and_len().0 + } + } + + pub fn reserve(&mut self, len: usize) { + if len > self.data_and_len().1 { + self.grow(len.next_power_of_two().max(4 * Self::UNIT)); + } + } + + pub fn units_mut(&mut self) -> Result<&mut [usize], &mut InlineBitSetView> { + if self.is_inline() { + Err(unsafe { + core::mem::transmute::<&mut usize, &mut InlineBitSetView>(&mut self.inline) + }) + } else { + Ok(self.data_mut_and_len().0) + } + } +} + +pub struct InlineBitSetView(usize); + +impl InlineBitSetView { + pub(crate) fn add_mask(&mut self, tmp: usize) { + debug_assert!(tmp & BitSet::FLAG == 0); + self.0 |= tmp; + } +} + +pub struct BitSetIter<'a> { + index: usize, + current: usize, + remining: &'a [usize], +} + +impl Iterator for BitSetIter<'_> { + type Item = usize; + + fn next(&mut self) -> Option { + while self.current == 0 { + self.current = *self.remining.take_first()?; + self.index += 1; + } + + let sub_idx = self.current.trailing_zeros() as usize; + self.current &= self.current - 1; + Some(self.index * BitSet::UNIT + sub_idx) + } +} + +struct AllocedBitSet { + cap: usize, + data: [usize; 0], +} + +#[cfg(test)] +#[test] +fn test_small_bit_set() { + use std::vec::Vec; + + let mut sv = BitSet::default(); + + sv.set(10); + debug_assert!(sv.get(10)); + sv.set(100); + debug_assert!(sv.get(100)); + sv.set(10000); + debug_assert!(sv.get(10000)); + debug_assert_eq!(sv.iter().collect::>(), &[10, 100, 10000]); + sv.clear(10000); + debug_assert_eq!(sv.iter().collect::>(), &[]); +} + +pub union Vc { + inline: InlineVc, + alloced: AllocedVc, +} + +impl Default for Vc { + fn default() -> Self { + Vc { inline: InlineVc { elems: MaybeUninit::uninit(), cap: Default::default() } } + } +} + +impl Debug for Vc { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + self.as_slice().fmt(f) + } +} + +impl FromIterator for Vc { + fn from_iter>(iter: T) -> Self { + let mut slf = Self::default(); + for i in iter { + slf.push(i); + } + slf + } +} + +const INLINE_ELEMS: usize = VC_SIZE / 2 - 1; +const VC_SIZE: usize = 16; + +impl Vc { + fn is_inline(&self) -> bool { + unsafe { self.inline.cap <= INLINE_ELEMS as Nid } + } + + fn layout(&self) -> Option { + unsafe { + self.is_inline().not().then(|| { + core::alloc::Layout::array::(self.alloced.cap as _).unwrap_unchecked() + }) + } + } + + pub fn len(&self) -> usize { + unsafe { + if self.is_inline() { + self.inline.cap as _ + } else { + self.alloced.len as _ + } + } + } + + fn len_mut(&mut self) -> &mut Nid { + unsafe { + if self.is_inline() { + &mut self.inline.cap + } else { + &mut self.alloced.len + } + } + } + + fn as_ptr(&self) -> *const Nid { + unsafe { + match self.is_inline() { + true => self.inline.elems.as_ptr().cast(), + false => self.alloced.base.as_ptr(), + } + } + } + + fn as_mut_ptr(&mut self) -> *mut Nid { + unsafe { + match self.is_inline() { + true => self.inline.elems.as_mut_ptr().cast(), + false => self.alloced.base.as_ptr(), + } + } + } + + pub fn as_slice(&self) -> &[Nid] { + unsafe { core::slice::from_raw_parts(self.as_ptr(), self.len()) } + } + + fn as_slice_mut(&mut self) -> &mut [Nid] { + unsafe { core::slice::from_raw_parts_mut(self.as_mut_ptr(), self.len()) } + } + + pub fn push(&mut self, value: Nid) { + if let Some(layout) = self.layout() + && unsafe { self.alloced.len == self.alloced.cap } + { + unsafe { + self.alloced.cap *= 2; + self.alloced.base = Unique::new_unchecked( + alloc::realloc( + self.alloced.base.as_ptr().cast(), + layout, + self.alloced.cap as usize * core::mem::size_of::(), + ) + .cast(), + ); + } + } else if self.len() == INLINE_ELEMS { + unsafe { + let mut allcd = + Self::alloc((self.inline.cap + 1).next_power_of_two() as _, self.len()); + core::ptr::copy_nonoverlapping(self.as_ptr(), allcd.as_mut_ptr(), self.len()); + *self = allcd; + } + } + + unsafe { + *self.len_mut() += 1; + self.as_mut_ptr().add(self.len() - 1).write(value); + } + } + + unsafe fn alloc(cap: usize, len: usize) -> Self { + debug_assert!(cap > INLINE_ELEMS); + let layout = unsafe { core::alloc::Layout::array::(cap).unwrap_unchecked() }; + let alloc = unsafe { alloc::alloc(layout) }; + unsafe { + Vc { + alloced: AllocedVc { + base: Unique::new_unchecked(alloc.cast()), + len: len as _, + cap: cap as _, + }, + } + } + } + + pub fn swap_remove(&mut self, index: usize) { + let len = self.len() - 1; + self.as_slice_mut().swap(index, len); + *self.len_mut() -= 1; + } + + pub fn remove(&mut self, index: usize) { + self.as_slice_mut().copy_within(index + 1.., index); + *self.len_mut() -= 1; + } +} + +impl Drop for Vc { + fn drop(&mut self) { + if let Some(layout) = self.layout() { + unsafe { + alloc::dealloc(self.alloced.base.as_ptr().cast(), layout); + } + } + } +} + +impl Clone for Vc { + fn clone(&self) -> Self { + self.as_slice().into() + } +} + +impl IntoIterator for Vc { + type IntoIter = VcIntoIter; + type Item = Nid; + + fn into_iter(self) -> Self::IntoIter { + VcIntoIter { start: 0, end: self.len(), vc: self } + } +} + +pub struct VcIntoIter { + start: usize, + end: usize, + vc: Vc, +} + +impl Iterator for VcIntoIter { + type Item = Nid; + + fn next(&mut self) -> Option { + if self.start == self.end { + return None; + } + + let ret = unsafe { core::ptr::read(self.vc.as_slice().get_unchecked(self.start)) }; + self.start += 1; + Some(ret) + } + + fn size_hint(&self) -> (usize, Option) { + let len = self.end - self.start; + (len, Some(len)) + } +} + +impl DoubleEndedIterator for VcIntoIter { + fn next_back(&mut self) -> Option { + if self.start == self.end { + return None; + } + + self.end -= 1; + Some(unsafe { core::ptr::read(self.vc.as_slice().get_unchecked(self.end)) }) + } +} + +impl ExactSizeIterator for VcIntoIter {} + +impl From<[Nid; SIZE]> for Vc { + fn from(value: [Nid; SIZE]) -> Self { + value.as_slice().into() + } +} + +impl<'a> From<&'a [Nid]> for Vc { + fn from(value: &'a [Nid]) -> Self { + if value.len() <= INLINE_ELEMS { + let mut dflt = Self::default(); + unsafe { + core::ptr::copy_nonoverlapping(value.as_ptr(), dflt.as_mut_ptr(), value.len()) + }; + dflt.inline.cap = value.len() as _; + dflt + } else { + let mut allcd = unsafe { Self::alloc(value.len(), value.len()) }; + unsafe { + core::ptr::copy_nonoverlapping(value.as_ptr(), allcd.as_mut_ptr(), value.len()) + }; + allcd + } + } +} + +impl Deref for Vc { + type Target = [Nid]; + + fn deref(&self) -> &Self::Target { + self.as_slice() + } +} + +impl DerefMut for Vc { + fn deref_mut(&mut self) -> &mut Self::Target { + self.as_slice_mut() + } +} + +#[derive(Clone, Copy)] +#[repr(C)] +struct InlineVc { + cap: Nid, + elems: MaybeUninit<[Nid; INLINE_ELEMS]>, +} + +#[derive(Clone, Copy)] +#[repr(C)] +struct AllocedVc { + cap: Nid, + len: Nid, + base: Unique, +} diff --git a/lang/src/vc.rs b/lang/src/vc.rs deleted file mode 100644 index cacf136..0000000 --- a/lang/src/vc.rs +++ /dev/null @@ -1,306 +0,0 @@ -use { - alloc::vec::Vec, - core::{ - fmt::Debug, - mem::MaybeUninit, - ops::{Deref, DerefMut, Not}, - ptr::Unique, - }, -}; - -type Nid = u16; - -const VC_SIZE: usize = 16; -const INLINE_ELEMS: usize = VC_SIZE / 2 - 1; - -pub union Vc { - inline: InlineVc, - alloced: AllocedVc, -} - -impl Default for Vc { - fn default() -> Self { - Vc { inline: InlineVc { elems: MaybeUninit::uninit(), cap: Default::default() } } - } -} - -impl Debug for Vc { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - self.as_slice().fmt(f) - } -} - -impl FromIterator for Vc { - fn from_iter>(iter: T) -> Self { - let mut slf = Self::default(); - for i in iter { - slf.push(i); - } - slf - } -} - -impl Vc { - fn is_inline(&self) -> bool { - unsafe { self.inline.cap <= INLINE_ELEMS as Nid } - } - - fn layout(&self) -> Option { - unsafe { - self.is_inline().not().then(|| { - core::alloc::Layout::array::(self.alloced.cap as _).unwrap_unchecked() - }) - } - } - - pub fn len(&self) -> usize { - unsafe { - if self.is_inline() { - self.inline.cap as _ - } else { - self.alloced.len as _ - } - } - } - - fn len_mut(&mut self) -> &mut Nid { - unsafe { - if self.is_inline() { - &mut self.inline.cap - } else { - &mut self.alloced.len - } - } - } - - fn as_ptr(&self) -> *const Nid { - unsafe { - match self.is_inline() { - true => self.inline.elems.as_ptr().cast(), - false => self.alloced.base.as_ptr(), - } - } - } - - fn as_mut_ptr(&mut self) -> *mut Nid { - unsafe { - match self.is_inline() { - true => self.inline.elems.as_mut_ptr().cast(), - false => self.alloced.base.as_ptr(), - } - } - } - - pub fn as_slice(&self) -> &[Nid] { - unsafe { core::slice::from_raw_parts(self.as_ptr(), self.len()) } - } - - fn as_slice_mut(&mut self) -> &mut [Nid] { - unsafe { core::slice::from_raw_parts_mut(self.as_mut_ptr(), self.len()) } - } - - pub fn push(&mut self, value: Nid) { - if let Some(layout) = self.layout() - && unsafe { self.alloced.len == self.alloced.cap } - { - unsafe { - self.alloced.cap *= 2; - self.alloced.base = Unique::new_unchecked( - alloc::alloc::realloc( - self.alloced.base.as_ptr().cast(), - layout, - self.alloced.cap as usize * core::mem::size_of::(), - ) - .cast(), - ); - } - } else if self.len() == INLINE_ELEMS { - unsafe { - let mut allcd = - Self::alloc((self.inline.cap + 1).next_power_of_two() as _, self.len()); - core::ptr::copy_nonoverlapping(self.as_ptr(), allcd.as_mut_ptr(), self.len()); - *self = allcd; - } - } - - unsafe { - *self.len_mut() += 1; - self.as_mut_ptr().add(self.len() - 1).write(value); - } - } - - unsafe fn alloc(cap: usize, len: usize) -> Self { - debug_assert!(cap > INLINE_ELEMS); - let layout = unsafe { core::alloc::Layout::array::(cap).unwrap_unchecked() }; - let alloc = unsafe { alloc::alloc::alloc(layout) }; - unsafe { - Vc { - alloced: AllocedVc { - base: Unique::new_unchecked(alloc.cast()), - len: len as _, - cap: cap as _, - }, - } - } - } - - pub fn swap_remove(&mut self, index: usize) { - let len = self.len() - 1; - self.as_slice_mut().swap(index, len); - *self.len_mut() -= 1; - } - - pub fn remove(&mut self, index: usize) { - self.as_slice_mut().copy_within(index + 1.., index); - *self.len_mut() -= 1; - } -} - -impl Drop for Vc { - fn drop(&mut self) { - if let Some(layout) = self.layout() { - unsafe { - alloc::alloc::dealloc(self.alloced.base.as_ptr().cast(), layout); - } - } - } -} - -impl Clone for Vc { - fn clone(&self) -> Self { - self.as_slice().into() - } -} - -impl IntoIterator for Vc { - type IntoIter = VcIntoIter; - type Item = Nid; - - fn into_iter(self) -> Self::IntoIter { - VcIntoIter { start: 0, end: self.len(), vc: self } - } -} - -pub struct VcIntoIter { - start: usize, - end: usize, - vc: Vc, -} - -impl Iterator for VcIntoIter { - type Item = Nid; - - fn next(&mut self) -> Option { - if self.start == self.end { - return None; - } - - let ret = unsafe { core::ptr::read(self.vc.as_slice().get_unchecked(self.start)) }; - self.start += 1; - Some(ret) - } - - fn size_hint(&self) -> (usize, Option) { - let len = self.end - self.start; - (len, Some(len)) - } -} - -impl DoubleEndedIterator for VcIntoIter { - fn next_back(&mut self) -> Option { - if self.start == self.end { - return None; - } - - self.end -= 1; - Some(unsafe { core::ptr::read(self.vc.as_slice().get_unchecked(self.end)) }) - } -} - -impl ExactSizeIterator for VcIntoIter {} - -impl From<[Nid; SIZE]> for Vc { - fn from(value: [Nid; SIZE]) -> Self { - value.as_slice().into() - } -} - -impl<'a> From<&'a [Nid]> for Vc { - fn from(value: &'a [Nid]) -> Self { - if value.len() <= INLINE_ELEMS { - let mut dflt = Self::default(); - unsafe { - core::ptr::copy_nonoverlapping(value.as_ptr(), dflt.as_mut_ptr(), value.len()) - }; - dflt.inline.cap = value.len() as _; - dflt - } else { - let mut allcd = unsafe { Self::alloc(value.len(), value.len()) }; - unsafe { - core::ptr::copy_nonoverlapping(value.as_ptr(), allcd.as_mut_ptr(), value.len()) - }; - allcd - } - } -} - -impl Deref for Vc { - type Target = [Nid]; - - fn deref(&self) -> &Self::Target { - self.as_slice() - } -} - -impl DerefMut for Vc { - fn deref_mut(&mut self) -> &mut Self::Target { - self.as_slice_mut() - } -} - -#[derive(Clone, Copy)] -#[repr(C)] -struct InlineVc { - cap: Nid, - elems: MaybeUninit<[Nid; INLINE_ELEMS]>, -} - -#[derive(Clone, Copy)] -#[repr(C)] -struct AllocedVc { - cap: Nid, - len: Nid, - base: Unique, -} - -#[derive(Default, Clone)] -pub struct BitSet { - data: Vec, -} - -impl BitSet { - const ELEM_SIZE: usize = core::mem::size_of::() * 8; - - pub fn clear(&mut self, bit_size: usize) { - let new_len = bit_size.div_ceil(Self::ELEM_SIZE); - self.data.clear(); - self.data.resize(new_len, 0); - } - - pub fn set(&mut self, idx: Nid) -> bool { - let idx = idx as usize; - let data_idx = idx / Self::ELEM_SIZE; - let sub_idx = idx % Self::ELEM_SIZE; - let prev = self.data[data_idx] & (1 << sub_idx); - self.data[data_idx] |= 1 << sub_idx; - prev == 0 - } - - pub fn get(&self, idx: Nid) -> bool { - let idx = idx as usize; - let data_idx = idx / Self::ELEM_SIZE; - let sub_idx = idx % Self::ELEM_SIZE; - let prev = self.data[data_idx] & (1 << sub_idx); - prev != 0 - } -} diff --git a/lang/tests/son_tests_die.txt b/lang/tests/son_tests_die.txt new file mode 100644 index 0000000..253b7cd --- /dev/null +++ b/lang/tests/son_tests_die.txt @@ -0,0 +1,5 @@ +main: + UN +code size: 9 +ret: 0 +status: Err(Unreachable) diff --git a/lang/tests/son_tests_generic_types.txt b/lang/tests/son_tests_generic_types.txt index 183d7b2..755dcb1 100644 --- a/lang/tests/son_tests_generic_types.txt +++ b/lang/tests/son_tests_generic_types.txt @@ -1,20 +1,17 @@ deinit: - ADDI64 r254, r254, -48d - ST r31, r254, 24a, 24h - LD r5, r2, 16a, 8h + ADDI64 r254, r254, -16d + ST r31, r254, 0a, 16h CP r32, r2 + LD r5, r2, 16a, 8h LI64 r4, 8d MUL64 r3, r5, r4 CP r5, r32 LD r2, r5, 0a, 8h JAL r31, r0, :free - ADDI64 r33, r254, 0d - CP r1, r33 + CP r1, r32 JAL r31, r0, :new - CP r2, r32 - BMC r33, r2, 24h - LD r31, r254, 24a, 24h - ADDI64 r254, r254, 48d + LD r31, r254, 0a, 16h + ADDI64 r254, r254, 16d JALA r0, r31, 0a free: CP r10, r2 @@ -26,23 +23,21 @@ free: ECA JALA r0, r31, 0a main: - ADDI64 r254, r254, -80d - ST r31, r254, 48a, 32h - ADDI64 r32, r254, 24d + ADDI64 r254, r254, -48d + ST r31, r254, 24a, 24h + ADDI64 r32, r254, 0d CP r1, r32 JAL r31, r0, :new - ADDI64 r33, r254, 0d - BMC r32, r33, 24h LI64 r3, 69d - CP r2, r33 + CP r2, r32 JAL r31, r0, :push - LD r12, r254, 0a, 8h - LD r34, r12, 0a, 8h - CP r2, r33 + LD r9, r254, 0a, 8h + LD r33, r9, 0a, 8h + CP r2, r32 JAL r31, r0, :deinit - CP r1, r34 - LD r31, r254, 48a, 32h - ADDI64 r254, r254, 80d + CP r1, r33 + LD r31, r254, 24a, 24h + ADDI64 r254, r254, 48d JALA r0, r31, 0a malloc: CP r9, r2 @@ -80,52 +75,53 @@ push: MUL64 r2, r36, r37 CP r3, r37 JAL r31, r0, :malloc - CP r38, r34 - ST r36, r38, 16a, 8h - JNE r1, r35, :3 - CP r1, r35 + CP r38, r1 + CP r39, r34 + ST r36, r39, 16a, 8h + LI64 r1, 0d + CP r7, r38 + JNE r7, r1, :3 JMP :4 - 3: CP r39, r1 - CP r1, r35 - LD r6, r38, 8a, 8h - MULI64 r8, r6, 8d - LD r12, r38, 0a, 8h - ADD64 r11, r12, r8 - CP r3, r39 - 9: JNE r11, r12, :5 - LD r8, r38, 8a, 8h - JEQ r8, r1, :6 + 3: CP r38, r7 + LD r8, r39, 8a, 8h + MULI64 r10, r8, 8d + LD r3, r39, 0a, 8h + ADD64 r7, r3, r10 + CP r5, r38 + 9: LD r2, r39, 0a, 8h + LD r10, r39, 8a, 8h + JNE r7, r3, :5 + JEQ r10, r35, :6 CP r4, r37 - MUL64 r3, r8, r4 - LD r2, r38, 0a, 8h + MUL64 r3, r10, r4 JAL r31, r0, :free - CP r5, r39 + CP r6, r38 JMP :7 - 6: CP r5, r39 - 7: ST r5, r38, 0a, 8h + 6: CP r6, r38 + 7: ST r6, r39, 0a, 8h JMP :8 5: CP r4, r37 - CP r5, r39 - ADDI64 r6, r3, 8d - ADDI64 r7, r12, 8d - LD r8, r12, 0a, 8h - ST r8, r3, 0a, 8h - CP r3, r6 - CP r12, r7 + CP r6, r38 + ADDI64 r8, r5, 8d + ADDI64 r9, r3, 8d + LD r10, r3, 0a, 8h + ST r10, r5, 0a, 8h + CP r3, r9 + CP r5, r8 JMP :9 - 0: CP r38, r34 - 8: LD r3, r38, 8a, 8h - MULI64 r5, r3, 8d - LD r4, r38, 0a, 8h - ADD64 r1, r4, r5 + 0: CP r39, r34 + 8: LD r5, r39, 8a, 8h + MULI64 r7, r5, 8d + LD r6, r39, 0a, 8h + ADD64 r1, r6, r7 CP r3, r32 ST r3, r1, 0a, 8h - LD r11, r38, 8a, 8h - ADD64 r2, r11, r33 - ST r2, r38, 8a, 8h + LD r2, r39, 8a, 8h + ADD64 r3, r2, r33 + ST r3, r39, 8a, 8h 4: LD r31, r254, 0a, 72h ADDI64 r254, r254, 72d JALA r0, r31, 0a -code size: 980 +code size: 955 ret: 69 status: Ok(()) diff --git a/lang/tests/son_tests_idk.txt b/lang/tests/son_tests_idk.txt index f4664fe..b48669b 100644 --- a/lang/tests/son_tests_idk.txt +++ b/lang/tests/son_tests_idk.txt @@ -4,9 +4,9 @@ main: LI64 r6, 128d LI64 r7, 0d ADDI64 r4, r254, 0d - 2: JLTU r7, r6, :0 - LD r1, r254, 42a, 1h - ANDI r1, r1, 255d + 2: LD r12, r254, 42a, 1h + JLTU r7, r6, :0 + ANDI r1, r12, 255d JMP :1 0: ADDI64 r3, r7, 1d ADD64 r7, r4, r7 diff --git a/lang/tests/son_tests_inline_test.txt b/lang/tests/son_tests_inline_test.txt index 3ed6ff4..a427d1b 100644 --- a/lang/tests/son_tests_inline_test.txt +++ b/lang/tests/son_tests_inline_test.txt @@ -23,16 +23,16 @@ scalar_values: JALA r0, r31, 0a structs: ADDI64 r254, r254, -32d - LI64 r2, 5d - ST r2, r254, 16a, 8h - ST r2, r254, 24a, 8h - LD r6, r254, 16a, 8h - ADDI64 r8, r6, 15d - ST r8, r254, 0a, 8h - LI64 r7, 20d - ST r7, r254, 8a, 8h - LD r1, r254, 0a, 8h - SUB64 r1, r1, r7 + LI64 r1, 5d + ST r1, r254, 0a, 8h + ST r1, r254, 8a, 8h + LD r5, r254, 0a, 8h + ADDI64 r7, r5, 15d + ST r7, r254, 16a, 8h + LI64 r10, 20d + ST r10, r254, 24a, 8h + LD r1, r254, 16a, 8h + SUB64 r1, r1, r10 ADDI64 r254, r254, 32d JALA r0, r31, 0a code size: 310 diff --git a/lang/tests/son_tests_nullable_types.txt b/lang/tests/son_tests_nullable_types.txt new file mode 100644 index 0000000..273bcc1 --- /dev/null +++ b/lang/tests/son_tests_nullable_types.txt @@ -0,0 +1,87 @@ +decide: + LI8 r1, 1b + JALA r0, r31, 0a +main: + ADDI64 r254, r254, -136d + ST r31, r254, 80a, 56h + JAL r31, r0, :decide + LI64 r4, 0d + ADDI64 r32, r254, 72d + ANDI r1, r1, 255d + JNE r1, r0, :0 + CP r33, r4 + JMP :1 + 0: CP r33, r32 + 1: JNE r33, r4, :2 + LI64 r1, 9001d + JMP :3 + 2: JAL r31, r0, :decide + LI8 r34, 0b + LI8 r35, 1b + ANDI r1, r1, 255d + JNE r1, r0, :4 + ST r35, r254, 56a, 1h + LD r9, r33, 0a, 8h + ST r9, r254, 64a, 8h + JMP :5 + 4: ST r34, r254, 56a, 1h + 5: LD r6, r254, 56a, 1h + ANDI r6, r6, 255d + ANDI r34, r34, 255d + JEQ r6, r34, :6 + LI64 r1, 42d + JMP :3 + 6: JAL r31, r0, :decide + LI32 r2, 0w + ANDI r1, r1, 255d + JNE r1, r0, :7 + CP r36, r2 + JMP :8 + 7: LI32 r36, 8388609w + 8: ANDI r36, r36, 4294967295d + ANDI r2, r2, 4294967295d + JNE r36, r2, :9 + LI64 r1, 69d + JMP :3 + 9: JAL r31, r0, :decide + LI64 r3, 0d + LI64 r37, 1d + ANDI r1, r1, 255d + JNE r1, r0, :10 + ST r3, r254, 16a, 8h + JMP :11 +10: ST r32, r254, 16a, 8h + ST r37, r254, 24a, 8h + ST r37, r254, 72a, 8h +11: LD r2, r254, 16a, 8h + JNE r2, r3, :12 + LI64 r1, 34d + JMP :3 +12: JAL r31, r0, :decide + ADDI64 r10, r254, 32d + ANDI r1, r1, 255d + JNE r1, r0, :13 + ADDI64 r11, r254, 0d + ST r32, r254, 0a, 8h + ST r37, r254, 8a, 8h + ST r35, r254, 32a, 1h + ADDI64 r12, r10, 8d + BMC r11, r12, 16h + JMP :14 +13: ST r34, r254, 32a, 1h +14: LD r11, r254, 32a, 1h + ANDI r11, r11, 255d + ANDI r34, r34, 255d + JEQ r11, r34, :15 + LI64 r1, 420d + JMP :3 +15: LD r5, r254, 16a, 8h + LD r7, r5, 0a, 8h + ANDI r9, r36, 65535d + SUB64 r1, r9, r7 + 3: LD r31, r254, 80a, 56h + ADDI64 r254, r254, 136d + JALA r0, r31, 0a +code size: 729 +ret: 0 +status: Ok(()) diff --git a/lang/tests/son_tests_only_break_loop.txt b/lang/tests/son_tests_only_break_loop.txt new file mode 100644 index 0000000..93b55da --- /dev/null +++ b/lang/tests/son_tests_only_break_loop.txt @@ -0,0 +1,30 @@ +inb: + CP r1, r2 + JALA r0, r31, 0a +main: + ADDI64 r254, r254, -24d + ST r31, r254, 0a, 24h + LI64 r32, 0d + LI64 r33, 100d + 4: CP r2, r33 + JAL r31, r0, :inb + ANDI r7, r1, 2d + JNE r7, r32, :0 + LI64 r2, 96d + CP r3, r32 + JAL r31, r0, :outb + 3: CP r2, r33 + JAL r31, r0, :inb + JEQ r1, r32, :1 + LI64 r1, 1d + JMP :2 + 1: JMP :3 + 0: JMP :4 + 2: LD r31, r254, 0a, 24h + ADDI64 r254, r254, 24d + JALA r0, r31, 0a +outb: + JALA r0, r31, 0a +code size: 198 +ret: 1 +status: Ok(()) diff --git a/lang/tests/son_tests_overwrite_aliasing_overoptimization.txt b/lang/tests/son_tests_overwrite_aliasing_overoptimization.txt index 84be8f7..72aae6b 100644 --- a/lang/tests/son_tests_overwrite_aliasing_overoptimization.txt +++ b/lang/tests/son_tests_overwrite_aliasing_overoptimization.txt @@ -1,25 +1,23 @@ main: - ADDI64 r254, r254, -72d - ST r31, r254, 56a, 16h - ADDI64 r32, r254, 0d + ADDI64 r254, r254, -48d + ST r31, r254, 40a, 8h + LI64 r4, 4d + ADDI64 r3, r254, 24d + ADDI64 r6, r254, 0d + ST r4, r254, 24a, 8h + LI64 r5, 1d + ST r5, r254, 32a, 8h + ST r5, r254, 16a, 8h + BMC r3, r6, 16h JAL r31, r0, :opaque ST r1, r254, 0a, 16h - LI64 r6, 4d - ADDI64 r5, r254, 40d - ADDI64 r8, r254, 16d - ST r6, r254, 40a, 8h - LI64 r7, 1d - ST r7, r254, 48a, 8h - ST r7, r254, 32a, 8h - BMC r5, r8, 16h - BMC r32, r8, 16h - LD r7, r254, 24a, 8h - LD r9, r254, 32a, 8h - ADD64 r11, r9, r7 - LD r9, r254, 16a, 8h - SUB64 r1, r9, r11 - LD r31, r254, 56a, 16h - ADDI64 r254, r254, 72d + LD r4, r254, 8a, 8h + LD r6, r254, 16a, 8h + ADD64 r8, r6, r4 + LD r6, r254, 0a, 8h + SUB64 r1, r6, r8 + LD r31, r254, 40a, 8h + ADDI64 r254, r254, 48d JALA r0, r31, 0a opaque: ADDI64 r254, r254, -16d @@ -31,6 +29,6 @@ opaque: LD r1, r2, 0a, 16h ADDI64 r254, r254, 16d JALA r0, r31, 0a -code size: 339 +code size: 323 ret: 0 status: Ok(()) diff --git a/lang/tests/son_tests_returning_global_struct.txt b/lang/tests/son_tests_returning_global_struct.txt index 485cc96..6f3bca0 100644 --- a/lang/tests/son_tests_returning_global_struct.txt +++ b/lang/tests/son_tests_returning_global_struct.txt @@ -1,29 +1,27 @@ main: - ADDI64 r254, r254, -24d - ST r31, r254, 8a, 16h - ADDI64 r32, r254, 4d + ADDI64 r254, r254, -12d + ST r31, r254, 4a, 8h + ADDI64 r2, r254, 0d JAL r31, r0, :random_color - ST r1, r254, 4a, 4h - ADDI64 r5, r254, 0d - BMC r32, r5, 4h - LD r8, r254, 0a, 1h - LD r11, r254, 1a, 1h - LD r3, r254, 2a, 1h - ANDI r12, r8, 255d - ANDI r4, r11, 255d - LD r9, r254, 3a, 1h - ANDI r8, r3, 255d - ADD64 r7, r4, r12 - ANDI r1, r9, 255d - ADD64 r12, r7, r8 - ADD64 r1, r12, r1 - LD r31, r254, 8a, 16h - ADDI64 r254, r254, 24d + ST r1, r254, 0a, 4h + LD r5, r254, 0a, 1h + LD r8, r254, 1a, 1h + LD r12, r254, 2a, 1h + ANDI r9, r5, 255d + ANDI r1, r8, 255d + LD r6, r254, 3a, 1h + ANDI r5, r12, 255d + ADD64 r4, r1, r9 + ANDI r10, r6, 255d + ADD64 r9, r4, r5 + ADD64 r1, r9, r10 + LD r31, r254, 4a, 8h + ADDI64 r254, r254, 12d JALA r0, r31, 0a random_color: LRA r1, r0, :white LD r1, r1, 0a, 4h JALA r0, r31, 0a -code size: 257 +code size: 241 ret: 1020 status: Ok(()) diff --git a/lang/tests/son_tests_small_struct_assignment.txt b/lang/tests/son_tests_small_struct_assignment.txt index dc49aef..27ddda1 100644 --- a/lang/tests/son_tests_small_struct_assignment.txt +++ b/lang/tests/son_tests_small_struct_assignment.txt @@ -1,14 +1,12 @@ main: ADDI64 r254, r254, -4d - LRA r1, r0, :black + LRA r2, r0, :white ADDI64 r3, r254, 0d - LRA r5, r0, :white - BMC r1, r3, 4h - BMC r5, r3, 4h - LD r9, r254, 3a, 1h - ANDI r1, r9, 255d + BMC r2, r3, 4h + LD r6, r254, 3a, 1h + ANDI r1, r6, 255d ADDI64 r254, r254, 4d JALA r0, r31, 0a -code size: 108 +code size: 92 ret: 255 status: Ok(()) diff --git a/lang/tests/son_tests_string_flip.txt b/lang/tests/son_tests_string_flip.txt index 596dfc3..49787bd 100644 --- a/lang/tests/son_tests_string_flip.txt +++ b/lang/tests/son_tests_string_flip.txt @@ -8,8 +8,8 @@ main: 6: JNE r8, r7, :0 LI64 r7, 2d CP r8, r4 - 4: JNE r8, r6, :1 - LD r1, r254, 0a, 8h + 4: LD r1, r254, 0a, 8h + JNE r8, r6, :1 JMP :2 1: MUL64 r10, r8, r7 ADD64 r8, r8, r6 @@ -19,14 +19,14 @@ main: 5: JNE r2, r7, :3 JMP :4 3: ADD64 r1, r2, r6 - ADD64 r11, r10, r2 - ADD64 r12, r9, r2 - MULI64 r2, r11, 8d + ADD64 r11, r9, r2 + MULI64 r3, r11, 8d + ADD64 r12, r10, r2 + ADD64 r11, r5, r3 MULI64 r12, r12, 8d - ADD64 r11, r5, r2 ADD64 r12, r5, r12 - BMC r12, r11, 8h BMC r11, r12, 8h + BMC r12, r11, 8h CP r2, r1 JMP :5 0: ADD64 r11, r8, r6 diff --git a/lang/tests/son_tests_struct_operators.txt b/lang/tests/son_tests_struct_operators.txt index 37995cd..1ba0a4b 100644 --- a/lang/tests/son_tests_struct_operators.txt +++ b/lang/tests/son_tests_struct_operators.txt @@ -1,37 +1,6 @@ main: - ADDI64 r254, r254, -64d - LI64 r3, 1d - ADDI64 r2, r254, 32d - ST r3, r254, 32a, 8h - LI64 r6, 2d - ST r6, r254, 40a, 8h - LI64 r6, -3d - ADDI64 r5, r254, 0d - ADDI64 r11, r254, 48d - ST r6, r254, 0a, 8h - LI64 r6, -4d - BMC r2, r11, 16h - ST r6, r254, 8a, 8h - ADDI64 r3, r5, 16d - LD r9, r254, 56a, 8h - LI64 r8, 4d - LD r10, r254, 48a, 8h - LI64 r11, 3d - BMC r2, r3, 16h - SUB64 r4, r8, r9 - LD r12, r254, 24a, 8h - ADD64 r7, r10, r11 - LD r1, r254, 0a, 8h - SUB64 r8, r11, r10 - LD r2, r254, 16a, 8h - ADD64 r6, r12, r4 - ADD64 r3, r1, r7 - ADD64 r10, r2, r8 - ADD64 r12, r9, r6 - ADD64 r9, r10, r3 - ADD64 r1, r9, r12 - ADDI64 r254, r254, 64d + LI64 r1, 10d JALA r0, r31, 0a -code size: 308 +code size: 29 ret: 10 status: Ok(()) diff --git a/lang/tests/son_tests_structs.txt b/lang/tests/son_tests_structs.txt index 2c7edad..baa5202 100644 --- a/lang/tests/son_tests_structs.txt +++ b/lang/tests/son_tests_structs.txt @@ -1,24 +1,23 @@ main: - ADDI64 r254, r254, -72d - ST r31, r254, 48a, 24h + ADDI64 r254, r254, -56d + ST r31, r254, 32a, 24h LI64 r3, 4d - ADDI64 r2, r254, 32d - ST r3, r254, 32a, 8h + ADDI64 r2, r254, 16d + ST r3, r254, 16a, 8h LI64 r32, 3d - ST r32, r254, 40a, 8h - ADDI64 r33, r254, 16d + ST r32, r254, 24a, 8h + ADDI64 r33, r254, 0d LD r3, r2, 0a, 16h JAL r31, r0, :odher_pass - ST r1, r254, 16a, 16h - ADDI64 r2, r254, 0d - BMC r33, r2, 16h - LD r4, r254, 8a, 8h - JNE r4, r32, :0 + ST r1, r254, 0a, 16h + LD r2, r254, 8a, 8h + JNE r2, r32, :0 + CP r2, r33 JAL r31, r0, :pass JMP :1 0: LI64 r1, 0d - 1: LD r31, r254, 48a, 24h - ADDI64 r254, r254, 72d + 1: LD r31, r254, 32a, 24h + ADDI64 r254, r254, 56d JALA r0, r31, 0a odher_pass: ADDI64 r254, r254, -16d @@ -30,6 +29,6 @@ odher_pass: pass: LD r1, r2, 0a, 8h JALA r0, r31, 0a -code size: 318 +code size: 305 ret: 4 status: Ok(()) diff --git a/lang/tests/son_tests_tests_ptr_to_ptr_copy.txt b/lang/tests/son_tests_tests_ptr_to_ptr_copy.txt index 08f8362..7c29a58 100644 --- a/lang/tests/son_tests_tests_ptr_to_ptr_copy.txt +++ b/lang/tests/son_tests_tests_ptr_to_ptr_copy.txt @@ -8,9 +8,9 @@ main: 4: JLTU r9, r8, :0 LI64 r4, 10d CP r7, r6 - 3: JLTU r7, r4, :1 - LD r10, r254, 2048a, 1h - ANDI r1, r10, 255d + 3: LD r9, r254, 2048a, 1h + JLTU r7, r4, :1 + ANDI r1, r9, 255d JMP :2 1: ADD64 r12, r7, r6 MULI64 r1, r7, 1024d diff --git a/lang/tests/son_tests_wide_ret.txt b/lang/tests/son_tests_wide_ret.txt index 05632f1..baac64a 100644 --- a/lang/tests/son_tests_wide_ret.txt +++ b/lang/tests/son_tests_wide_ret.txt @@ -1,45 +1,47 @@ main: - ADDI64 r254, r254, -48d - ST r31, r254, 32a, 16h - ADDI64 r32, r254, 16d + ADDI64 r254, r254, -24d + ST r31, r254, 16a, 8h + ADDI64 r3, r254, 0d LI64 r4, 0d CP r3, r4 JAL r31, r0, :maina - ST r1, r254, 16a, 16h - ADDI64 r7, r254, 0d - BMC r32, r7, 16h - LD r11, r254, 12a, 1h - LD r12, r254, 3a, 1h - SUB8 r2, r12, r11 - ANDI r1, r2, 255d - LD r31, r254, 32a, 16h - ADDI64 r254, r254, 48d + ST r1, r254, 0a, 16h + LD r8, r254, 12a, 1h + LD r9, r254, 3a, 1h + SUB8 r11, r9, r8 + ANDI r1, r11, 255d + LD r31, r254, 16a, 8h + ADDI64 r254, r254, 24d JALA r0, r31, 0a maina: - ADDI64 r254, r254, -36d - ST r31, r254, 28a, 8h - ADDI64 r5, r254, 24d + ADDI64 r254, r254, -28d + ST r31, r254, 20a, 8h + ADDI64 r5, r254, 16d JAL r31, r0, :small_struct - ST r1, r254, 24a, 4h - LI8 r11, 0b - ADDI64 r10, r254, 0d - ST r11, r254, 0a, 1h - ST r11, r254, 1a, 1h - ST r11, r254, 2a, 1h - LI8 r4, 3b - ST r4, r254, 3a, 1h - LI8 r7, 1b - ST r7, r254, 4a, 1h - ST r11, r254, 5a, 1h - ST r11, r254, 6a, 1h - ST r11, r254, 7a, 1h - ADDI64 r1, r254, 8d - BMC r10, r1, 8h - ADDI64 r4, r1, 8d - BMC r10, r4, 8h + ST r1, r254, 16a, 4h + LI8 r9, 0b + ADDI64 r1, r254, 0d + ST r9, r254, 0a, 1h + ST r9, r254, 1a, 1h + ST r9, r254, 2a, 1h + LI8 r3, 3b + ST r3, r254, 3a, 1h + LI8 r6, 1b + ST r6, r254, 4a, 1h + ST r9, r254, 5a, 1h + ST r9, r254, 6a, 1h + ST r9, r254, 7a, 1h + ST r9, r254, 8a, 1h + ST r9, r254, 9a, 1h + ST r9, r254, 10a, 1h + ST r3, r254, 11a, 1h + ST r6, r254, 12a, 1h + ST r9, r254, 13a, 1h + ST r9, r254, 14a, 1h + ST r9, r254, 15a, 1h LD r1, r1, 0a, 16h - LD r31, r254, 28a, 8h - ADDI64 r254, r254, 36d + LD r31, r254, 20a, 8h + ADDI64 r254, r254, 28d JALA r0, r31, 0a small_struct: ADDI64 r254, r254, -4d @@ -50,6 +52,6 @@ small_struct: LD r1, r3, 0a, 4h ADDI64 r254, r254, 4d JALA r0, r31, 0a -code size: 514 +code size: 570 ret: 2 status: Ok(())