Removed some code -- backtrack a bit to focus on basics (IR, frontend SSA construction)
This commit is contained in:
parent
4733efe3a3
commit
09dd367a12
|
@ -1,18 +0,0 @@
|
|||
//! Liveness analysis.
|
||||
|
||||
use crate::{backward_pass, dataflow_use_def};
|
||||
use crate::{ir::*, pass::*};
|
||||
|
||||
backward_pass!(
|
||||
Liveness,
|
||||
UnionBitSet,
|
||||
dataflow_use_def!(
|
||||
UnionBitSet,
|
||||
use: |u, lattice| {
|
||||
lattice.add(u.index() as usize);
|
||||
},
|
||||
def: |d, lattice| {
|
||||
lattice.remove(d.index() as usize);
|
||||
}
|
||||
)
|
||||
);
|
|
@ -1,4 +0,0 @@
|
|||
//! Analyses.
|
||||
|
||||
pub mod liveness;
|
||||
use liveness::*;
|
|
@ -1,50 +0,0 @@
|
|||
//! Decide locations for each Value.
|
||||
|
||||
use crate::cfg::CFGInfo;
|
||||
use crate::ir::*;
|
||||
use fxhash::FxHashMap;
|
||||
use wasmparser::Type;
|
||||
|
||||
/// Pass to compute reference counts for every value.
|
||||
struct UseCounts {
|
||||
counts: FxHashMap<Value, usize>,
|
||||
}
|
||||
|
||||
impl UseCounts {
|
||||
fn new(f: &FunctionBody) -> UseCounts {
|
||||
let mut counts = FxHashMap::default();
|
||||
for block in &f.blocks {
|
||||
block.visit_uses(|value| {
|
||||
*counts.entry(value).or_insert(0) += 1;
|
||||
});
|
||||
}
|
||||
UseCounts { counts }
|
||||
}
|
||||
|
||||
fn count(&self, value: Value) -> usize {
|
||||
*self.counts.get(&value).unwrap_or(&0)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum Location {
|
||||
// Store in a local.
|
||||
Local(usize),
|
||||
// Immediately generate at a single use-site.
|
||||
Stack,
|
||||
// No location.
|
||||
None,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Locations {
|
||||
next_local: usize,
|
||||
extra_locals: Vec<Type>,
|
||||
locations: Vec</* Value, */ Location>,
|
||||
}
|
||||
|
||||
impl Locations {
|
||||
fn compute(_f: &FunctionBody, _cfg: &CFGInfo) -> Self {
|
||||
todo!()
|
||||
}
|
||||
}
|
|
@ -2,6 +2,3 @@
|
|||
|
||||
mod stackify;
|
||||
pub(crate) use stackify::*;
|
||||
|
||||
mod location;
|
||||
pub(crate) use location::*;
|
||||
|
|
|
@ -2,23 +2,6 @@
|
|||
|
||||
#![allow(dead_code)]
|
||||
|
||||
/*
|
||||
|
||||
- TODO: better local-variable handling:
|
||||
- pre-pass to scan for locations of definitions of all locals. for
|
||||
each frame, record set of vars that are def'd.
|
||||
- during main pass:
|
||||
- for an if/else, add blockparams to join block for all vars def'd
|
||||
in either side.
|
||||
- for a block, add blockparams to out-block for all vars def'd in
|
||||
body of block.
|
||||
- for a loop, add blockparams to header block for all vars def'd
|
||||
in body.
|
||||
- when generating a branch to any block, just emit current values
|
||||
for every local in blockparams.
|
||||
|
||||
*/
|
||||
|
||||
use crate::ir::*;
|
||||
use crate::op_traits::{op_inputs, op_outputs};
|
||||
use anyhow::{bail, Result};
|
||||
|
|
|
@ -7,12 +7,10 @@
|
|||
pub use wasm_encoder;
|
||||
pub use wasmparser;
|
||||
|
||||
mod analysis;
|
||||
mod backend;
|
||||
mod cfg;
|
||||
mod frontend;
|
||||
mod ir;
|
||||
mod op_traits;
|
||||
mod pass;
|
||||
|
||||
pub use ir::*;
|
||||
|
|
|
@ -1,306 +0,0 @@
|
|||
//! Iterative dataflow analysis (forward and backward) using lattice
|
||||
//! analysis values.
|
||||
|
||||
use crate::cfg::CFGInfo;
|
||||
use crate::ir::*;
|
||||
use crate::pass::Lattice;
|
||||
use fxhash::{FxHashMap, FxHashSet};
|
||||
use std::collections::hash_map::Entry as HashEntry;
|
||||
use std::marker::PhantomData;
|
||||
use std::{collections::VecDeque, default::Default};
|
||||
use wasmparser::Type;
|
||||
|
||||
impl FunctionBody {
|
||||
fn insts(&self) -> impl Iterator<Item = &Inst> {
|
||||
self.blocks.iter().map(|block| block.insts.iter()).flatten()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait DataflowFunctions {
|
||||
type L: Lattice;
|
||||
|
||||
fn start_block(&self, _lattice: &mut Self::L, _block: BlockId, _param_types: &[Type]) {}
|
||||
fn end_block(
|
||||
&self,
|
||||
_lattce: &mut Self::L,
|
||||
_block: BlockId,
|
||||
_next: BlockId,
|
||||
_terminator: &Terminator,
|
||||
) {
|
||||
}
|
||||
fn instruction(&self, _lattice: &mut Self::L, _block: BlockId, _instid: InstId, _inst: &Inst) {}
|
||||
}
|
||||
|
||||
pub struct DataflowFunctionsImpl<L, F1, F2, F3> {
|
||||
f1: F1,
|
||||
f2: F2,
|
||||
f3: F3,
|
||||
_phantom: PhantomData<L>,
|
||||
}
|
||||
|
||||
impl<L, F1, F2, F3> DataflowFunctionsImpl<L, F1, F2, F3> {
|
||||
pub fn new(f1: F1, f2: F2, f3: F3) -> Self {
|
||||
Self {
|
||||
f1,
|
||||
f2,
|
||||
f3,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<L, F1, F2, F3> DataflowFunctions for DataflowFunctionsImpl<L, F1, F2, F3>
|
||||
where
|
||||
L: Lattice,
|
||||
F1: Fn(&mut L, BlockId, InstId, &Inst),
|
||||
F2: Fn(&mut L, BlockId, &[Type]),
|
||||
F3: Fn(&mut L, BlockId, BlockId, &Terminator),
|
||||
{
|
||||
type L = L;
|
||||
fn instruction(&self, lattice: &mut L, block: BlockId, instid: InstId, inst: &Inst) {
|
||||
(self.f1)(lattice, block, instid, inst);
|
||||
}
|
||||
fn start_block(&self, lattice: &mut L, block: BlockId, params: &[Type]) {
|
||||
(self.f2)(lattice, block, params);
|
||||
}
|
||||
fn end_block(&self, lattice: &mut L, block: BlockId, next: BlockId, terminator: &Terminator) {
|
||||
(self.f3)(lattice, block, next, terminator);
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! dataflow {
|
||||
($latticety:ty,
|
||||
|$lattice:ident, $block:ident, $instid:ident, $inst:ident| { $($body:tt)* }) => {
|
||||
DataflowFunctionsImpl::new(|$lattice:&mut $latticety, $block, $instid, $inst| {
|
||||
$($body)*
|
||||
}, |_, _, _| {}, |_, _, _, _| {})
|
||||
};
|
||||
|
||||
($latticety:ty,
|
||||
inst: |$lattice1:ident, $block1:ident, $instid1:ident, $inst1:ident| { $($body1:tt)* },
|
||||
start_block: |$lattice2:ident, $block2:ident, $params2:ident| { $($body2:tt)* }) => {
|
||||
DataflowFunctionsImpl::new(|$lattice1:&mut $latticety, $block1, $instid1, $inst1| {
|
||||
$($body1)*
|
||||
},
|
||||
|$lattice2, $block2, $params2| {
|
||||
$($body2)*
|
||||
}, |_, _, _, _| {})
|
||||
};
|
||||
|
||||
($latticety:ty,
|
||||
inst: |$lattice1:ident, $block1:ident, $instid1:ident, $inst1:ident| { $($body1:tt)* },
|
||||
start_block: |$lattice2:ident, $block2:ident, $params2:ident| { $($body2:tt)* },
|
||||
end_block: |$lattice3:ident, $block3:ident, $next3:ident, $term3:ident| { $($body3:tt)* }) => {
|
||||
DataflowFunctionsImpl::new(|$lattice1:&mut $latticety, $block1, $instid1, $inst1:&Inst| {
|
||||
$($body1)*
|
||||
},
|
||||
|$lattice2:&mut $latticety, $block2, $params2:&[wasmparser::Type]| {
|
||||
$($body2)*
|
||||
},
|
||||
|$lattice3:&mut $latticety, $block3, $next3, $term3:&Terminator| {
|
||||
$($body3)*
|
||||
})
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! dataflow_use_def {
|
||||
($lattice:ty,
|
||||
use: |$use:ident, $uselattice:ident| { $($usebody:tt)* },
|
||||
def: |$def:ident, $deflattice:ident| { $($defbody:tt)* }) => {
|
||||
{
|
||||
$crate::dataflow!(
|
||||
$lattice,
|
||||
inst: |lattice, block, instid, inst| {
|
||||
let $deflattice = lattice;
|
||||
for output in 0..inst.n_outputs {
|
||||
let $def = $crate::ir::Value::inst(block, instid, output);
|
||||
$($defbody)*
|
||||
}
|
||||
let $uselattice = $deflattice;
|
||||
for &input in &inst.inputs {
|
||||
let $use = input;
|
||||
$($usebody)*
|
||||
}
|
||||
},
|
||||
start_block: |lattice, block, param_tys| {
|
||||
let $deflattice = lattice;
|
||||
for i in 0..param_tys.len() {
|
||||
let $def = $crate::ir::Value::blockparam(block, i);
|
||||
$($defbody)*
|
||||
}
|
||||
},
|
||||
end_block: |lattice, _block, _next, term| {
|
||||
let $uselattice = lattice;
|
||||
term.visit_uses(|u| {
|
||||
let $use = u;
|
||||
$($usebody)*
|
||||
});
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ForwardDataflow<L: Lattice> {
|
||||
block_in: FxHashMap<BlockId, L>,
|
||||
}
|
||||
|
||||
impl<L: Lattice> ForwardDataflow<L> {
|
||||
pub fn new<D: DataflowFunctions<L = L>>(f: &FunctionBody, d: &D) -> Self {
|
||||
let mut analysis = Self {
|
||||
block_in: FxHashMap::default(),
|
||||
};
|
||||
analysis.compute(f, d);
|
||||
analysis
|
||||
}
|
||||
|
||||
fn compute<D: DataflowFunctions<L = L>>(&mut self, f: &FunctionBody, d: &D) {
|
||||
let mut workqueue = VecDeque::new();
|
||||
let mut workqueue_set = FxHashSet::default();
|
||||
|
||||
workqueue.push_back(0);
|
||||
workqueue_set.insert(0);
|
||||
while let Some(block) = workqueue.pop_front() {
|
||||
workqueue_set.remove(&block);
|
||||
|
||||
let mut value = self
|
||||
.block_in
|
||||
.entry(block)
|
||||
.or_insert_with(|| D::L::top())
|
||||
.clone();
|
||||
|
||||
d.start_block(&mut value, block, &f.blocks[block].params[..]);
|
||||
|
||||
for (instid, inst) in f.blocks[block].insts.iter().enumerate() {
|
||||
d.instruction(&mut value, block, instid, inst);
|
||||
}
|
||||
|
||||
let succs = f.blocks[block].terminator.successors();
|
||||
for (i, &succ) in succs.iter().enumerate() {
|
||||
let mut value = if i + 1 < succs.len() {
|
||||
value.clone()
|
||||
} else {
|
||||
std::mem::replace(&mut value, D::L::top())
|
||||
};
|
||||
|
||||
d.end_block(&mut value, block, succ, &f.blocks[block].terminator);
|
||||
|
||||
let (succ_in, mut changed) = match self.block_in.entry(succ) {
|
||||
HashEntry::Vacant(v) => (v.insert(D::L::top()), true),
|
||||
HashEntry::Occupied(o) => (o.into_mut(), false),
|
||||
};
|
||||
changed |= succ_in.meet_with(&value);
|
||||
|
||||
if changed && !workqueue_set.contains(&succ) {
|
||||
workqueue.push_back(succ);
|
||||
workqueue_set.insert(succ);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct BackwardDataflow<L: Lattice> {
|
||||
block_out: FxHashMap<BlockId, L>,
|
||||
}
|
||||
|
||||
impl<L: Lattice> BackwardDataflow<L> {
|
||||
pub fn new<D: DataflowFunctions<L = L>>(f: &FunctionBody, cfginfo: &CFGInfo, d: &D) -> Self {
|
||||
let mut analysis = Self {
|
||||
block_out: FxHashMap::default(),
|
||||
};
|
||||
analysis.compute(f, cfginfo, d);
|
||||
analysis
|
||||
}
|
||||
|
||||
fn compute<D: DataflowFunctions<L = L>>(&mut self, f: &FunctionBody, cfginfo: &CFGInfo, d: &D) {
|
||||
let mut workqueue = VecDeque::new();
|
||||
let mut workqueue_set = FxHashSet::default();
|
||||
|
||||
let returns = f
|
||||
.blocks
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter(|(_, block)| matches!(&block.terminator, &Terminator::Return { .. }))
|
||||
.map(|(id, _)| id)
|
||||
.collect::<Vec<BlockId>>();
|
||||
|
||||
for ret in returns {
|
||||
workqueue.push_back(ret);
|
||||
workqueue_set.insert(ret);
|
||||
}
|
||||
|
||||
while let Some(block) = workqueue.pop_front() {
|
||||
workqueue_set.remove(&block);
|
||||
|
||||
let mut value = self
|
||||
.block_out
|
||||
.entry(block)
|
||||
.or_insert_with(|| D::L::top())
|
||||
.clone();
|
||||
|
||||
for (instid, inst) in f.blocks[block].insts.iter().rev().enumerate() {
|
||||
d.instruction(&mut value, block, instid, inst);
|
||||
}
|
||||
|
||||
d.start_block(&mut value, block, &f.blocks[block].params[..]);
|
||||
|
||||
let preds = &cfginfo.block_preds[block];
|
||||
for (i, pred) in preds.iter().cloned().enumerate() {
|
||||
let mut value = if i + 1 < preds.len() {
|
||||
value.clone()
|
||||
} else {
|
||||
std::mem::replace(&mut value, D::L::top())
|
||||
};
|
||||
|
||||
d.end_block(&mut value, pred, block, &f.blocks[pred].terminator);
|
||||
|
||||
let (pred_out, mut changed) = match self.block_out.entry(pred) {
|
||||
HashEntry::Vacant(v) => (v.insert(L::top()), true),
|
||||
HashEntry::Occupied(o) => (o.into_mut(), false),
|
||||
};
|
||||
changed |= pred_out.meet_with(&value);
|
||||
|
||||
if changed && !workqueue_set.contains(&pred) {
|
||||
workqueue.push_back(pred);
|
||||
workqueue_set.insert(pred);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! forward_pass {
|
||||
($name:ident, $lattice:ident, $($dataflow:tt),*) => {
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct $name($crate::pass::ForwardDataflow<$lattice>);
|
||||
|
||||
impl $name {
|
||||
pub fn compute(f: &$crate::ir::FunctionBody) -> $name {
|
||||
let results = $crate::pass::ForwardDataflow::new(f, $($dataflow)*);
|
||||
Self(results)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! backward_pass {
|
||||
($name:ident, $lattice:ident, $($dataflow:tt)*) => {
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct $name($crate::pass::BackwardDataflow<$lattice>);
|
||||
|
||||
impl $name {
|
||||
pub fn compute(f: &$crate::ir::FunctionBody, c: &$crate::cfg::CFGInfo) -> $name {
|
||||
let dataflow = $($dataflow)*;
|
||||
let results = $crate::pass::BackwardDataflow::new(f, c, &dataflow);
|
||||
Self(results)
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
|
@ -1,139 +0,0 @@
|
|||
//! Lattice trait definition and some common implementations.
|
||||
|
||||
use crate::ir::*;
|
||||
use regalloc2::indexset::IndexSet;
|
||||
use std::fmt::Debug;
|
||||
|
||||
/// A lattice type used for an analysis.
|
||||
///
|
||||
/// The `meet` operator must compute the greatest lower bound for its
|
||||
/// operands (that is, its result must be "less than or equal to" its
|
||||
/// operands, according to the lattice's partial order, and must be
|
||||
/// the greatest value that satisfies this condition). It must obey
|
||||
/// the usual lattice laws:
|
||||
///
|
||||
/// * a `meet` a == a (reflexivity)
|
||||
/// * a `meet` b == b `meet` a (commutativity)
|
||||
/// * a `meet` (b `meet` c) == (a `meet` b) `meet` c (associativity)
|
||||
/// * a `meet` top == a
|
||||
/// * a `meet` bottom == bottom
|
||||
///
|
||||
/// Note that while we require that the lattice is a consistent
|
||||
/// partial order, we don't actually require the user to implement
|
||||
/// `PartialOrd` on the type, because we never make direct ordering
|
||||
/// comparisons when we perform a dataflow analysis. Instead the
|
||||
/// ordering is only implicitly depended upon, in order to ensure that
|
||||
/// the analysis terminates. For this to be true, we also require that
|
||||
/// the lattice has only a finite chain length -- that is, there must
|
||||
/// not be an infinite ordered sequence in the lattice (or, moving to
|
||||
/// "lesser" values will always reach bottom in finite steps).
|
||||
pub trait Lattice: Clone + Debug {
|
||||
/// Return the `top` lattice value.
|
||||
fn top() -> Self;
|
||||
/// Return the `bottom` lattice value.
|
||||
fn bottom() -> Self;
|
||||
/// Mutate self to `meet(self, other)`. Returns `true` if any
|
||||
/// changes occurred.
|
||||
fn meet_with(&mut self, other: &Self) -> bool;
|
||||
}
|
||||
|
||||
/// An analysis-value lattice whose values are sets of `ValueId`
|
||||
/// indices. `top` is empty and `bottom` is the universe set; the
|
||||
/// `meet` function is a union. This is useful for may-analyses,
|
||||
/// i.e. when an analysis computes whether a property *may* be true
|
||||
/// about a value in some case.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct UnionBitSet {
|
||||
set: IndexSet,
|
||||
/// The set has degenerated to contain "the universe" (all
|
||||
/// possible values).
|
||||
universe: bool,
|
||||
}
|
||||
|
||||
impl Lattice for UnionBitSet {
|
||||
fn top() -> Self {
|
||||
UnionBitSet {
|
||||
set: IndexSet::new(),
|
||||
universe: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn bottom() -> Self {
|
||||
UnionBitSet {
|
||||
set: IndexSet::new(),
|
||||
universe: true,
|
||||
}
|
||||
}
|
||||
|
||||
fn meet_with(&mut self, other: &UnionBitSet) -> bool {
|
||||
if !self.universe && other.universe {
|
||||
self.universe = true;
|
||||
return true;
|
||||
}
|
||||
self.set.union_with(&other.set)
|
||||
}
|
||||
}
|
||||
|
||||
impl UnionBitSet {
|
||||
pub fn contains(&self, index: usize) -> bool {
|
||||
self.universe || self.set.get(index)
|
||||
}
|
||||
|
||||
pub fn add(&mut self, index: usize) {
|
||||
if !self.universe {
|
||||
self.set.set(index, true);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remove(&mut self, index: usize) {
|
||||
if !self.universe {
|
||||
self.set.set(index, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An analysis-value lattice whose values are sets of `ValueId`
|
||||
/// indices. `top` is the universe set and `bottom` is the empty set;
|
||||
/// the `meet` function is an intersection. This is useful for
|
||||
/// must-analyses, i.e. when an analysis computes whether a property
|
||||
/// *must* be true about a value in all cases.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct IntersectionBitSet {
|
||||
/// We store the dual to the actual set, i.e., elements that are
|
||||
/// *not* included.
|
||||
not_set: UnionBitSet,
|
||||
}
|
||||
|
||||
impl Lattice for IntersectionBitSet {
|
||||
fn top() -> Self {
|
||||
// `top` here is the universe-set; the dual of this set is the
|
||||
// empty-set, which is UnionBitSet's `top()`.
|
||||
Self {
|
||||
not_set: UnionBitSet::top(),
|
||||
}
|
||||
}
|
||||
|
||||
fn bottom() -> Self {
|
||||
Self {
|
||||
not_set: UnionBitSet::bottom(),
|
||||
}
|
||||
}
|
||||
|
||||
fn meet_with(&mut self, other: &IntersectionBitSet) -> bool {
|
||||
self.not_set.meet_with(&other.not_set)
|
||||
}
|
||||
}
|
||||
|
||||
impl IntersectionBitSet {
|
||||
pub fn contains(&self, index: usize) -> bool {
|
||||
!self.not_set.contains(index)
|
||||
}
|
||||
|
||||
pub fn add(&mut self, index: usize) {
|
||||
self.not_set.remove(index);
|
||||
}
|
||||
|
||||
pub fn remove(&mut self, index: usize) {
|
||||
self.not_set.add(index);
|
||||
}
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
//! Pass framework: skeletons for common kinds of passes over code.
|
||||
//!
|
||||
//! Terminology note: a "pass" is a readonly analysis of a function
|
||||
//! body. It does not mutate code; it only traverses the code in a
|
||||
//! certain order, possibly multiple times (to converge), in order to
|
||||
//! compute some derived information.
|
||||
|
||||
pub mod dataflow;
|
||||
pub use dataflow::*;
|
||||
pub mod lattice;
|
||||
pub use lattice::*;
|
Loading…
Reference in a new issue