diff --git a/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs b/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs index e71e23d2032..a96a8d70e04 100644 --- a/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs +++ b/compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs @@ -30,7 +30,7 @@ use crate::ssa::{ dfg::{DataFlowGraph, InsertInstructionResult}, function::Function, instruction::{Instruction, InstructionId}, - value::{Value, ValueId}, + value::ValueId, }, ssa_gen::Ssa, }; @@ -77,7 +77,6 @@ impl Context { // Cache of instructions without any side-effects along with their outputs. let mut cached_instruction_results: HashMap> = HashMap::default(); - let mut constrained_values: HashMap = HashMap::default(); for instruction_id in instructions { Self::fold_constants_into_instruction( @@ -85,7 +84,6 @@ impl Context { block, instruction_id, &mut cached_instruction_results, - &mut constrained_values, ); } self.block_queue.extend(function.dfg[block].successors()); @@ -96,9 +94,8 @@ impl Context { block: BasicBlockId, id: InstructionId, instruction_result_cache: &mut HashMap>, - constrained_values: &mut HashMap, ) { - let instruction = Self::resolve_instruction(id, dfg, constrained_values); + let instruction = Self::resolve_instruction(id, dfg); let old_results = dfg.instruction_results(id).to_vec(); // If a copy of this instruction exists earlier in the block, then reuse the previous results. @@ -112,42 +109,15 @@ impl Context { Self::replace_result_ids(dfg, &old_results, &new_results); - Self::cache_instruction( - instruction, - new_results, - dfg, - instruction_result_cache, - constrained_values, - ); + Self::cache_instruction(instruction, new_results, dfg, instruction_result_cache); } /// Fetches an [`Instruction`] by its [`InstructionId`] and fully resolves its inputs. - fn resolve_instruction( - instruction_id: InstructionId, - dfg: &DataFlowGraph, - constrained_values: &HashMap, - ) -> Instruction { + fn resolve_instruction(instruction_id: InstructionId, dfg: &DataFlowGraph) -> Instruction { let instruction = dfg[instruction_id].clone(); - // Alternate between resolving `value_id` in the `dfg` and checking to see if the resolved value - // has been constrained to be equal to some simpler value in the current block. - // - // This allows us to reach a stable final `ValueId` for each instruction input as we add more - // constraints to the cache. - fn resolve_cache( - dfg: &DataFlowGraph, - cache: &HashMap, - value_id: ValueId, - ) -> ValueId { - let resolved_id = dfg.resolve(value_id); - match cache.get(&resolved_id) { - Some(cached_value) => resolve_cache(dfg, cache, *cached_value), - None => resolved_id, - } - } - // Resolve any inputs to ensure that we're comparing like-for-like instructions. - instruction.map_values(|value_id| resolve_cache(dfg, constrained_values, value_id)) + instruction.map_values(|value_id| dfg.resolve(value_id)) } /// Pushes a new [`Instruction`] into the [`DataFlowGraph`] which applies any optimizations @@ -185,35 +155,7 @@ impl Context { instruction_results: Vec, dfg: &DataFlowGraph, instruction_result_cache: &mut HashMap>, - constraint_cache: &mut HashMap, ) { - // If the instruction was a constraint, then create a link between the two `ValueId`s - // to map from the more complex to the simpler value. - if let Instruction::Constrain(lhs, rhs, _) = instruction { - // These `ValueId`s should be fully resolved now. - match (&dfg[lhs], &dfg[rhs]) { - // Ignore trivial constraints - (Value::NumericConstant { .. }, Value::NumericConstant { .. }) => (), - - // Prefer replacing with constants where possible. - (Value::NumericConstant { .. }, _) => { - constraint_cache.insert(rhs, lhs); - } - (_, Value::NumericConstant { .. }) => { - constraint_cache.insert(lhs, rhs); - } - // Otherwise prefer block parameters over instruction results. - // This is as block parameters are more likely to be a single witness rather than a full expression. - (Value::Param { .. }, Value::Instruction { .. }) => { - constraint_cache.insert(rhs, lhs); - } - (Value::Instruction { .. }, Value::Param { .. }) => { - constraint_cache.insert(lhs, rhs); - } - (_, _) => (), - } - } - // If the instruction doesn't have side-effects, cache the results so we can reuse them if // the same instruction appears again later in the block. if instruction.is_pure(dfg) { @@ -391,55 +333,4 @@ mod test { assert_eq!(instruction, &Instruction::Cast(ValueId::test_new(0), Type::unsigned(32))); } - - #[test] - fn constrained_value_replacement() { - // fn main f0 { - // b0(v0: Field): - // constrain v0 == Field 10 - // v1 = add v0, Field 1 - // constrain v1 == Field 11 - // } - // - // After constructing this IR, we run constant folding which should replace references to `v0` - // with the constant `10`. This then allows us to optimize away the rest of the circuit. - - let main_id = Id::test_new(0); - - // Compiling main - let mut builder = FunctionBuilder::new("main".into(), main_id, RuntimeType::Acir); - let v0 = builder.add_parameter(Type::field()); - - let field_10 = builder.field_constant(10u128); - builder.insert_constrain(v0, field_10, None); - - let field_1 = builder.field_constant(1u128); - let v1 = builder.insert_binary(v0, BinaryOp::Add, field_1); - - let field_11 = builder.field_constant(11u128); - builder.insert_constrain(v1, field_11, None); - - let mut ssa = builder.finish(); - let main = ssa.main_mut(); - let instructions = main.dfg[main.entry_block()].instructions(); - assert_eq!(instructions.len(), 3); - - // Expected output: - // - // fn main f0 { - // b0(v0: Field): - // constrain v0 == Field 10 - // } - let ssa = ssa.fold_constants(); - let main = ssa.main(); - let instructions = main.dfg[main.entry_block()].instructions(); - - assert_eq!(instructions.len(), 1); - let instruction = &main.dfg[instructions[0]]; - - assert_eq!( - instruction, - &Instruction::Constrain(ValueId::test_new(0), ValueId::test_new(1), None) - ); - } } diff --git a/tooling/nargo_cli/tests/execution_success/slices/src/main.nr b/tooling/nargo_cli/tests/execution_success/slices/src/main.nr index 8fbe14bfea3..1e5a7a605aa 100644 --- a/tooling/nargo_cli/tests/execution_success/slices/src/main.nr +++ b/tooling/nargo_cli/tests/execution_success/slices/src/main.nr @@ -92,16 +92,16 @@ fn regression_merge_slices(x: Field, y: Field) { fn merge_slices_if(x: Field, y: Field) { let slice = merge_slices_return(x, y); - assert(slice[2] == 10); assert(slice.len() == 3); + assert(slice[2] == 10); let slice = merge_slices_mutate(x, y); - assert(slice[3] == 5); assert(slice.len() == 4); + assert(slice[3] == 5); let slice = merge_slices_mutate_in_loop(x, y); - assert(slice[6] == 4); assert(slice.len() == 7); + assert(slice[6] == 4); let slice = merge_slices_mutate_two_ifs(x, y); assert(slice.len() == 6);