Skip to content

Commit

Permalink
merkle intermediate layers (#548)
Browse files Browse the repository at this point in the history
<!-- Reviewable:start -->
This change is [<img src="https://reviewable.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.io/reviews/starkware-libs/stwo/548)
<!-- Reviewable:end -->
  • Loading branch information
spapinistarkware authored Apr 2, 2024
1 parent df3b5d1 commit bb5e4f3
Show file tree
Hide file tree
Showing 5 changed files with 376 additions and 204 deletions.
81 changes: 44 additions & 37 deletions src/commitment_scheme/blake2_merkle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,11 @@ use super::ops::{MerkleHasher, MerkleOps};
use crate::core::backend::CPUBackend;
use crate::core::fields::m31::BaseField;

#[derive(Copy, Clone, PartialEq, Eq, Default)]
pub struct Blake2sHash(pub [u32; 8]);
pub struct Blake2Hasher;
impl MerkleHasher for Blake2Hasher {
type Hash = [u32; 8];
type Hash = Blake2sHash;

fn hash_node(
children_hashes: Option<(Self::Hash, Self::Hash)>,
Expand All @@ -33,16 +35,26 @@ impl MerkleHasher for Blake2Hasher {
for chunk in padded_values.array_chunks::<16>() {
state = compress(state, unsafe { std::mem::transmute(chunk) }, 0, 0, 0, 0);
}
state
Blake2sHash(state)
}
}

impl std::fmt::Debug for Blake2sHash {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Write as hex.
for &byte in self.0.iter() {
write!(f, "{:02x}", byte)?;
}
Ok(())
}
}

impl MerkleOps<Blake2Hasher> for CPUBackend {
fn commit_on_layer(
log_size: u32,
prev_layer: Option<&Vec<[u32; 8]>>,
prev_layer: Option<&Vec<Blake2sHash>>,
columns: &[&Vec<BaseField>],
) -> Vec<[u32; 8]> {
) -> Vec<Blake2sHash> {
(0..(1 << log_size))
.map(|i| {
Blake2Hasher::hash_node(
Expand All @@ -56,34 +68,34 @@ impl MerkleOps<Blake2Hasher> for CPUBackend {

#[cfg(test)]
mod tests {
use std::collections::BTreeMap;

use itertools::Itertools;
use num_traits::Zero;
use rand::rngs::StdRng;
use rand::{Rng, SeedableRng};

use crate::commitment_scheme::blake2_merkle::Blake2Hasher;
use crate::commitment_scheme::blake2_merkle::{Blake2Hasher, Blake2sHash};
use crate::commitment_scheme::prover::{MerkleDecommitment, MerkleProver};
use crate::commitment_scheme::verifier::{MerkleTreeVerifier, MerkleVerificationError};
use crate::commitment_scheme::verifier::{MerkleVerificationError, MerkleVerifier};
use crate::core::backend::CPUBackend;
use crate::core::fields::m31::BaseField;

type TestData = (
Vec<usize>,
BTreeMap<u32, Vec<usize>>,
MerkleDecommitment<Blake2Hasher>,
Vec<(u32, Vec<BaseField>)>,
MerkleTreeVerifier<Blake2Hasher>,
Vec<Vec<BaseField>>,
MerkleVerifier<Blake2Hasher>,
);
fn prepare_merkle() -> TestData {
const N_COLS: usize = 400;
const N_QUERIES: usize = 7;
let log_size_range = 6..9;

let rng = &mut StdRng::seed_from_u64(0);
let log_sizes = (0..N_COLS)
.map(|_| rng.gen_range(6..9))
.sorted()
.rev()
.map(|_| rng.gen_range(log_size_range.clone()))
.collect_vec();
let max_log_size = *log_sizes.iter().max().unwrap();
let cols = log_sizes
.iter()
.map(|&log_size| {
Expand All @@ -94,26 +106,21 @@ mod tests {
.collect_vec();
let merkle = MerkleProver::<CPUBackend, Blake2Hasher>::commit(cols.iter().collect_vec());

let queries = (0..N_QUERIES)
.map(|_| rng.gen_range(0..(1 << max_log_size)))
.sorted()
.dedup()
.collect_vec();
let decommitment = merkle.decommit(queries.clone());
let values = cols
.iter()
.map(|col| {
let layer_queries = queries
.iter()
.map(|&q| q >> (max_log_size - col.len().ilog2()))
.dedup();
layer_queries.map(|q| col[q]).collect_vec()
})
.collect_vec();
let values = log_sizes.into_iter().zip(values).collect_vec();
let mut queries = BTreeMap::<u32, Vec<usize>>::new();
for log_size in log_size_range.rev() {
let layer_queries = (0..N_QUERIES)
.map(|_| rng.gen_range(0..(1 << log_size)))
.sorted()
.dedup()
.collect_vec();
queries.insert(log_size, layer_queries);
}

let (values, decommitment) = merkle.decommit(queries.clone(), cols.iter().collect_vec());

let verifier = MerkleTreeVerifier {
let verifier = MerkleVerifier {
root: merkle.root(),
column_log_sizes: log_sizes,
};
(queries, decommitment, values, verifier)
}
Expand All @@ -128,7 +135,7 @@ mod tests {
#[test]
fn test_merkle_invalid_witness() {
let (queries, mut decommitment, values, verifier) = prepare_merkle();
decommitment.witness[20] = [0; 8];
decommitment.hash_witness[20] = Blake2sHash([0; 8]);

assert_eq!(
verifier.verify(queries, values, decommitment).unwrap_err(),
Expand All @@ -139,7 +146,7 @@ mod tests {
#[test]
fn test_merkle_invalid_value() {
let (queries, decommitment, mut values, verifier) = prepare_merkle();
values[3].1[6] = BaseField::zero();
values[3][6] = BaseField::zero();

assert_eq!(
verifier.verify(queries, values, decommitment).unwrap_err(),
Expand All @@ -150,7 +157,7 @@ mod tests {
#[test]
fn test_merkle_witness_too_short() {
let (queries, mut decommitment, values, verifier) = prepare_merkle();
decommitment.witness.pop();
decommitment.hash_witness.pop();

assert_eq!(
verifier.verify(queries, values, decommitment).unwrap_err(),
Expand All @@ -161,7 +168,7 @@ mod tests {
#[test]
fn test_merkle_column_values_too_long() {
let (queries, decommitment, mut values, verifier) = prepare_merkle();
values[3].1.push(BaseField::zero());
values[3].push(BaseField::zero());

assert_eq!(
verifier.verify(queries, values, decommitment).unwrap_err(),
Expand All @@ -172,7 +179,7 @@ mod tests {
#[test]
fn test_merkle_column_values_too_short() {
let (queries, decommitment, mut values, verifier) = prepare_merkle();
values[3].1.pop();
values[3].pop();

assert_eq!(
verifier.verify(queries, values, decommitment).unwrap_err(),
Expand All @@ -183,7 +190,7 @@ mod tests {
#[test]
fn test_merkle_witness_too_long() {
let (queries, mut decommitment, values, verifier) = prepare_merkle();
decommitment.witness.push([0; 8]);
decommitment.hash_witness.push(Blake2sHash([0; 8]));

assert_eq!(
verifier.verify(queries, values, decommitment).unwrap_err(),
Expand Down
Loading

0 comments on commit bb5e4f3

Please sign in to comment.