Skip to content

Commit

Permalink
Merge branch 'master' into michaeljklein/stdlib-static-assertions
Browse files Browse the repository at this point in the history
  • Loading branch information
michaeljklein authored Jan 14, 2025
2 parents dea58ad + a0ffedf commit a79c443
Show file tree
Hide file tree
Showing 37 changed files with 762 additions and 246 deletions.
3 changes: 3 additions & 0 deletions .github/critical_libraries_status/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
.actual.jsonl
.actual.jsonl.jq
.failures.jsonl.jq
Empty file.
46 changes: 15 additions & 31 deletions .github/workflows/reports.yml
Original file line number Diff line number Diff line change
Expand Up @@ -300,14 +300,17 @@ jobs:
matrix:
include:
# - project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-contracts, is_library: true }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/private-kernel-inner, take_average: true }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/private-kernel-tail, take_average: true }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/private-kernel-reset, take_average: true }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/rollup-root, take_average: true }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/private-kernel-inner, num_runs: 5 }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/private-kernel-tail, num_runs: 5 }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/private-kernel-reset, num_runs: 5 }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/rollup-base-private, num_runs: 5 }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/rollup-base-public, num_runs: 5 }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/rollup-block-merge, num_runs: 5 }
#- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/rollup-block-root-empty }
#- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/rollup-block-single-tx }
#- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/rollup-block-root }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/rollup-block-merge, take_average: true }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/rollup-base-private, take_average: true }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/rollup-base-public, take_average: true }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/rollup-merge, num_runs: 5 }
- project: { repo: AztecProtocol/aztec-packages, path: noir-projects/noir-protocol-circuits/crates/rollup-root, num_runs: 5 }

name: External repo compilation and execution reports - ${{ matrix.project.repo }}/${{ matrix.project.path }}
steps:
Expand Down Expand Up @@ -341,41 +344,22 @@ jobs:
path: test-repo
ref: ${{ matrix.project.ref }}

- name: Generate compilation report without averages
- name: Generate compilation report
working-directory: ./test-repo/${{ matrix.project.path }}
if: ${{ !matrix.project.take_average }}
run: |
mv /home/runner/work/noir/noir/scripts/test_programs/compilation_report.sh ./compilation_report.sh
touch parse_time.sh
chmod +x parse_time.sh
cp /home/runner/work/noir/noir/scripts/test_programs/parse_time.sh ./parse_time.sh
./compilation_report.sh 1
- name: Generate compilation report with averages
working-directory: ./test-repo/${{ matrix.project.path }}
if: ${{ matrix.project.take_average }}
run: |
mv /home/runner/work/noir/noir/scripts/test_programs/compilation_report.sh ./compilation_report.sh
touch parse_time.sh
chmod +x parse_time.sh
cp /home/runner/work/noir/noir/scripts/test_programs/parse_time.sh ./parse_time.sh
./compilation_report.sh 1 1
./compilation_report.sh 1 ${{ matrix.project.num_runs }}
- name: Generate execution report without averages
- name: Generate execution report
working-directory: ./test-repo/${{ matrix.project.path }}
if: ${{ !matrix.project.is_library && !matrix.project.take_average }}
run: |
mv /home/runner/work/noir/noir/scripts/test_programs/execution_report.sh ./execution_report.sh
mv /home/runner/work/noir/noir/scripts/test_programs/parse_time.sh ./parse_time.sh
./execution_report.sh 1
- name: Generate execution report with averages
working-directory: ./test-repo/${{ matrix.project.path }}
if: ${{ !matrix.project.is_library && matrix.project.take_average }}
if: ${{ !matrix.project.is_library }}
run: |
mv /home/runner/work/noir/noir/scripts/test_programs/execution_report.sh ./execution_report.sh
mv /home/runner/work/noir/noir/scripts/test_programs/parse_time.sh ./parse_time.sh
./execution_report.sh 1 1
./execution_report.sh 1 ${{ matrix.project.num_runs }}
- name: Move compilation report
id: compilation_report
Expand Down
1 change: 1 addition & 0 deletions CRITICAL_NOIR_LIBRARIES
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
https://github.com/noir-lang/noir_check_shuffle
https://github.com/noir-lang/ec
https://github.com/noir-lang/eddsa
https://github.com/noir-lang/mimc
Expand Down
37 changes: 37 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

41 changes: 3 additions & 38 deletions acvm-repo/acvm/src/compiler/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,6 @@ pub use simulator::CircuitSimulator;
use transformers::transform_internal;
pub use transformers::{transform, MIN_EXPRESSION_WIDTH};

/// We need multiple passes to stabilize the output.
/// The value was determined by running tests.
const MAX_OPTIMIZER_PASSES: usize = 3;

/// This module moves and decomposes acir opcodes. The transformation map allows consumers of this module to map
/// metadata they had about the opcodes to the new opcode structure generated after the transformation.
#[derive(Debug)]
Expand Down Expand Up @@ -84,41 +80,10 @@ pub fn compile<F: AcirField>(
) -> (Circuit<F>, AcirTransformationMap) {
let acir_opcode_positions = (0..acir.opcodes.len()).collect::<Vec<_>>();

if MAX_OPTIMIZER_PASSES == 0 {
return (acir, AcirTransformationMap::new(&acir_opcode_positions));
}

let mut pass = 0;
let mut prev_opcodes_hash = fxhash::hash64(&acir.opcodes);
let mut prev_acir = acir;
let mut prev_acir_opcode_positions = acir_opcode_positions;

// For most test programs it would be enough to only loop `transform_internal`,
// but some of them don't stabilize unless we also repeat the backend agnostic optimizations.
let (mut acir, acir_opcode_positions) = loop {
let (acir, acir_opcode_positions) =
optimize_internal(prev_acir, prev_acir_opcode_positions);

// Stop if we have already done at least one transform and an extra optimization changed nothing.
if pass > 0 && prev_opcodes_hash == fxhash::hash64(&acir.opcodes) {
break (acir, acir_opcode_positions);
}

let (acir, acir_opcode_positions) =
transform_internal(acir, expression_width, acir_opcode_positions);

let opcodes_hash = fxhash::hash64(&acir.opcodes);

// Stop if the output hasn't change in this loop or we went too long.
if pass == MAX_OPTIMIZER_PASSES - 1 || prev_opcodes_hash == opcodes_hash {
break (acir, acir_opcode_positions);
}
let (acir, acir_opcode_positions) = optimize_internal(acir, acir_opcode_positions);

pass += 1;
prev_acir = acir;
prev_opcodes_hash = opcodes_hash;
prev_acir_opcode_positions = acir_opcode_positions;
};
let (mut acir, acir_opcode_positions) =
transform_internal(acir, expression_width, acir_opcode_positions);

let transformation_map = AcirTransformationMap::new(&acir_opcode_positions);
acir.assert_messages = transform_assert_messages(acir.assert_messages, &transformation_map);
Expand Down
23 changes: 20 additions & 3 deletions acvm-repo/acvm/src/compiler/transformers/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,17 @@ mod csat;

pub(crate) use csat::CSatTransformer;
pub use csat::MIN_EXPRESSION_WIDTH;
use tracing::info;

use super::{
optimizers::MergeExpressionsOptimizer, transform_assert_messages, AcirTransformationMap,
MAX_OPTIMIZER_PASSES,
optimizers::{MergeExpressionsOptimizer, RangeOptimizer},
transform_assert_messages, AcirTransformationMap,
};

/// We need multiple passes to stabilize the output.
/// The value was determined by running tests.
const MAX_TRANSFORMER_PASSES: usize = 3;

/// Applies [`ProofSystemCompiler`][crate::ProofSystemCompiler] specific optimizations to a [`Circuit`].
pub fn transform<F: AcirField>(
acir: Circuit<F>,
Expand Down Expand Up @@ -50,12 +55,18 @@ pub(super) fn transform_internal<F: AcirField>(
expression_width: ExpressionWidth,
mut acir_opcode_positions: Vec<usize>,
) -> (Circuit<F>, Vec<usize>) {
if acir.opcodes.len() == 1 && matches!(acir.opcodes[0], Opcode::BrilligCall { .. }) {
info!("Program is fully unconstrained, skipping transformation pass");
return (acir, acir_opcode_positions);
}

// Allow multiple passes until we have stable output.
let mut prev_opcodes_hash = fxhash::hash64(&acir.opcodes);

// For most test programs it would be enough to loop here, but some of them
// don't stabilize unless we also repeat the backend agnostic optimizations.
for _ in 0..MAX_OPTIMIZER_PASSES {
for _ in 0..MAX_TRANSFORMER_PASSES {
info!("Number of opcodes {}", acir.opcodes.len());
let (new_acir, new_acir_opcode_positions) =
transform_internal_once(acir, expression_width, acir_opcode_positions);

Expand Down Expand Up @@ -217,6 +228,12 @@ fn transform_internal_once<F: AcirField>(
..acir
};

// The `MergeOptimizer` can merge two witnesses which have range opcodes applied to them
// so we run the `RangeOptimizer` afterwards to clear these up.
let range_optimizer = RangeOptimizer::new(acir);
let (acir, new_acir_opcode_positions) =
range_optimizer.replace_redundant_ranges(new_acir_opcode_positions);

(acir, new_acir_opcode_positions)
}

Expand Down
1 change: 1 addition & 0 deletions compiler/noirc_evaluator/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ chrono = "0.4.37"
rayon.workspace = true
cfg-if.workspace = true
smallvec = { version = "1.13.2", features = ["serde"] }
vec-collections = "0.4.3"

[dev-dependencies]
proptest.workspace = true
Expand Down
34 changes: 15 additions & 19 deletions compiler/noirc_evaluator/src/acir/generated_acir.rs
Original file line number Diff line number Diff line change
Expand Up @@ -479,7 +479,7 @@ impl<F: AcirField> GeneratedAcir<F> {
pub(crate) fn is_equal(&mut self, lhs: &Expression<F>, rhs: &Expression<F>) -> Witness {
let t = lhs - rhs;

self.is_zero(&t)
self.is_zero(t)
}

/// Returns a `Witness` that is constrained to be:
Expand Down Expand Up @@ -534,36 +534,32 @@ impl<F: AcirField> GeneratedAcir<F> {
/// By setting `z` to be `0`, we can make `y` equal to `1`.
/// This is easily observed: `y = 1 - t * 0`
/// Now since `y` is one, this means that `t` needs to be zero, or else `y * t == 0` will fail.
fn is_zero(&mut self, t_expr: &Expression<F>) -> Witness {
// We're checking for equality with zero so we can negate the expression without changing the result.
// This is useful as it will sometimes allow us to simplify an expression down to a witness.
let t_witness = if let Some(witness) = t_expr.to_witness() {
witness
fn is_zero(&mut self, t_expr: Expression<F>) -> Witness {
// We're going to be multiplying this expression by two different witnesses in a second so we want to
// ensure that this expression only contains a single witness. We can tolerate coefficients and constant terms however.
let linear_t = if t_expr.is_degree_one_univariate() {
t_expr
} else {
let negated_expr = t_expr * -F::one();
self.get_or_create_witness(&negated_expr)
Expression::<F>::from(self.get_or_create_witness(&t_expr))
};

// Call the inversion directive, since we do not apply a constraint
// the prover can choose anything here.
let z = self.brillig_inverse(t_witness.into());
let z = self.brillig_inverse(linear_t.clone());
let z_expr = Expression::<F>::from(z);

let y = self.next_witness_index();
let y_expr = Expression::<F>::from(y);

// Add constraint y == 1 - tz => y + tz - 1 == 0
let y_is_boolean_constraint = Expression {
mul_terms: vec![(F::one(), t_witness, z)],
linear_combinations: vec![(F::one(), y)],
q_c: -F::one(),
};
let mut y_is_boolean_constraint =
(&z_expr * &linear_t).expect("multiplying two linear expressions");
y_is_boolean_constraint.push_addition_term(F::one(), y);
let y_is_boolean_constraint = y_is_boolean_constraint - F::one();
self.assert_is_zero(y_is_boolean_constraint);

// Add constraint that y * t == 0;
let ty_zero_constraint = Expression {
mul_terms: vec![(F::one(), t_witness, y)],
linear_combinations: vec![],
q_c: F::zero(),
};
let ty_zero_constraint = (&y_expr * &linear_t).expect("multiplying two linear expressions");
self.assert_is_zero(ty_zero_constraint);

y
Expand Down
4 changes: 1 addition & 3 deletions compiler/noirc_evaluator/src/ssa/ir/dfg.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ use iter_extended::vecmap;
use serde::{Deserialize, Serialize};
use serde_with::serde_as;
use serde_with::DisplayFromStr;
use tracing::warn;

/// The DataFlowGraph contains most of the actual data in a function including
/// its blocks, instructions, and values. This struct is largely responsible for
Expand Down Expand Up @@ -238,8 +237,7 @@ impl DataFlowGraph {
call_stack: CallStackId,
) -> InsertInstructionResult {
if !self.is_handled_by_runtime(&instruction) {
warn!("Attempted to insert instruction not handled by runtime: {instruction:?}");
return InsertInstructionResult::InstructionRemoved;
panic!("Attempted to insert instruction not handled by runtime: {instruction:?}");
}

match instruction.simplify(self, block, ctrl_typevars.clone(), call_stack) {
Expand Down
3 changes: 2 additions & 1 deletion compiler/noirc_evaluator/src/ssa/opt/constant_folding.rs
Original file line number Diff line number Diff line change
Expand Up @@ -307,14 +307,15 @@ impl<'brillig> Context<'brillig> {
let old_results = dfg.instruction_results(id).to_vec();

// If a copy of this instruction exists earlier in the block, then reuse the previous results.
let runtime_is_brillig = dfg.runtime().is_brillig();
if let Some(cache_result) =
self.get_cached(dfg, dom, &instruction, *side_effects_enabled_var, block)
{
match cache_result {
CacheResult::Cached(cached) => {
// We track whether we may mutate MakeArray instructions before we deduplicate
// them but we still need to issue an extra inc_rc in case they're mutated afterward.
if matches!(instruction, Instruction::MakeArray { .. }) {
if runtime_is_brillig && matches!(instruction, Instruction::MakeArray { .. }) {
let value = *cached.last().unwrap();
let inc_rc = Instruction::IncrementRc { value };
let call_stack = dfg.get_instruction_call_stack_id(id);
Expand Down
10 changes: 6 additions & 4 deletions compiler/noirc_evaluator/src/ssa/opt/loop_invariant.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,10 +112,12 @@ impl<'f> LoopInvariantContext<'f> {

// If we are hoisting a MakeArray instruction,
// we need to issue an extra inc_rc in case they are mutated afterward.
if matches!(
self.inserter.function.dfg[instruction_id],
Instruction::MakeArray { .. }
) {
if self.inserter.function.runtime().is_brillig()
&& matches!(
self.inserter.function.dfg[instruction_id],
Instruction::MakeArray { .. }
)
{
let result =
self.inserter.function.dfg.instruction_results(instruction_id)[0];
let inc_rc = Instruction::IncrementRc { value: result };
Expand Down
Loading

0 comments on commit a79c443

Please sign in to comment.