Skip to content

Commit

Permalink
Use is_sparse, specialize to SparseTernary inside
Browse files Browse the repository at this point in the history
Now, use the general NoiseDistribution.is_sparse method to do a different cost estimation for sparse secrets. But, when it is sparse ternary, use the splitting methods from that noise distribution to get the precise costs.

In the case of lwe_dual, there was only code that worked for SparseTernary, so crash if other objects are put in.
In the case of lwe_guess, use an estimate, i.e. the given for all values in the bounded range the probability to be non-zero is assumed to be the same.

See: malb#127 (comment)
  • Loading branch information
ludopulles committed Sep 23, 2024
1 parent 36fa7d9 commit 31957df
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 17 deletions.
19 changes: 13 additions & 6 deletions estimator/lwe_dual.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,12 +59,19 @@ def dual_reduce(
)

# Compute new secret distribution
if type(params.Xs) is SparseTernary:
if params.Xs.is_sparse:
h = params.Xs.hamming_weight
if not 0 <= h1 <= h:
raise OutOfBoundsError(f"Splitting weight {h1} must be between 0 and h={h}.")
# split the +1 and -1 entries in a balanced way.
slv_Xs, red_Xs = params.Xs.split_balanced(zeta, h1)

if type(params.Xs) is SparseTernary:
# split the +1 and -1 entries in a balanced way.
slv_Xs, red_Xs = params.Xs.split_balanced(zeta, h1)
else:
# TODO: Implement this for sparse secret that are not SparseTernary,
# i.e. DiscreteGaussian with extremely small stddev.
raise NotImplementedError(f"Unknown how to exploit sparsity of {params.Xs}")

if h1 == h:
# no reason to do lattice reduction if we assume
# that the hw on the reduction part is 0
Expand Down Expand Up @@ -172,7 +179,7 @@ def cost(
Logging.log("dual", log_level, f"{repr(cost)}")

rep = 1
if type(params.Xs) is SparseTernary:
if params.Xs.is_sparse:
h = params.Xs.hamming_weight
probability = RR(prob_drop(params.n, h, zeta, h1))
rep = prob_amplify(success_probability, probability)
Expand Down Expand Up @@ -309,7 +316,7 @@ def f(beta):
beta = cost["beta"]

cost["zeta"] = zeta
if type(params.Xs) is SparseTernary:
if params.Xs.is_sparse:
cost["h1"] = h1
return cost

Expand Down Expand Up @@ -424,7 +431,7 @@ def __call__(

params = params.normalize()

if type(params.Xs) is SparseTernary:
if params.Xs.is_sparse:
Cost.register_impermanent(h1=False)

def _optimize_blocksize(
Expand Down
33 changes: 22 additions & 11 deletions estimator/lwe_guess.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,10 +239,17 @@ def mitm_analytical(self, params: LWEParameters, success_probability=0.99):
# we could now call self.cost with this k, but using our model below seems
# about 3x faster and reasonably accurate

if type(params.Xs) is SparseTernary:
if params.Xs.is_sparse:
h = params.Xs.hamming_weight
# split optimally and compute the probability of this event
success_probability_ = params.Xs.split_probability(k)
if type(params.Xs) is SparseTernary:
# split optimally and compute the probability of this event
success_probability_ = params.Xs.split_probability(k)
else:
split_h = (h * k / n).round('down')
# Assume each coefficient is sampled i.i.d.:
success_probability_ = (
binomial(k, split_h) * binomial(n - k, h - split_h) / binomial(n, h)
)

logT = RR(h * (log2(n) - log2(h) + log2(sd_rng - 1) + log2(e))) / (2 - delta)
logT -= RR(log2(h) / 2)
Expand Down Expand Up @@ -273,21 +280,25 @@ def cost(
nd_rng, nd_p = self.X_range(params.Xe)
delta = nd_rng / params.q # possible error range scaled

sd_rng, sd_p = self.X_range(params.Xs)
n = params.n

if type(params.Xs) is SparseTernary:
if params.Xs.is_sparse:
h = params.Xs.hamming_weight
# we assume the hamming weight to be distributed evenly across the two parts
# if not we can rerandomize on the coordinates and try again -> repeat
sec_tab, sec_sea = params.Xs.split_balanced(k)
if type(params.Xs) is SparseTernary:
sec_tab, sec_sea = params.Xs.split_balanced(k)
size_tab = sec_tab.support_size()
size_sea = sec_sea.support_size()
else:
# Assume each coefficient is sampled i.i.d.:
split_h = (h * k / n).round('down')
size_tab = RR((sd_rng - 1) ** split_h * binomial(k, split_h))
size_sea = RR((sd_rng - 1) ** (h - split_h) * binomial(n - k, h - split_h))

size_tab = sec_tab.support_size()
size_sea = sec_sea.support_size()
success_probability_ = size_tab * size_sea / params.Xs.support_size()

sd_p = 1.0
else:
sd_rng, sd_p = self.X_range(params.Xs)

size_tab = sd_rng**k
size_sea = sd_rng ** (n - k)
success_probability_ = 1
Expand Down
9 changes: 9 additions & 0 deletions estimator/nd.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,6 +464,15 @@ def split_probability(self, new_n, new_hw=None):
left, right = self.split_balanced(new_n, new_hw)
return left.support_size() * right.support_size() / self.support_size()

@property
def is_sparse(self):
"""
Always say this is a sparse distribution, even if p + m >= n/2, because there is correlation between the
coefficients: if you split the distribution into two of half the length, then you expect in each of them to be
half the weight.
"""
return True

@property
def hamming_weight(self):
return self.p + self.m
Expand Down

0 comments on commit 31957df

Please sign in to comment.