diff --git a/docs/algorithms/lwe-bkw.rst b/docs/algorithms/lwe-bkw.rst index 2b089df..5c7975c 100644 --- a/docs/algorithms/lwe-bkw.rst +++ b/docs/algorithms/lwe-bkw.rst @@ -6,7 +6,7 @@ Coded-BKW for LWE We construct an example LWE instance:: from estimator import * - params = LWE.Parameters(n=400, q=7981, Xs=ND.SparseTernary(384, 16), Xe=ND.CenteredBinomial(4), m=800) + params = LWE.Parameters(n=400, q=7981, Xs=ND.SparseTernary(16), Xe=ND.CenteredBinomial(4), m=800) params and estimate the cost of Coded-BKW [C:GuoJohSta15]_, [C:KirFou15]_:: diff --git a/docs/algorithms/lwe-dual.rst b/docs/algorithms/lwe-dual.rst index fe37582..c67a304 100644 --- a/docs/algorithms/lwe-dual.rst +++ b/docs/algorithms/lwe-dual.rst @@ -7,14 +7,14 @@ We construct an (easy) example LWE instance:: from estimator import * from estimator.lwe_dual import dual_hybrid, matzov - params = LWE.Parameters(n=200, q=7981, Xs=ND.SparseTernary(384, 16), Xe=ND.CenteredBinomial(4)) + params = LWE.Parameters(n=200, q=7981, Xs=ND.SparseTernary(16), Xe=ND.CenteredBinomial(4)) params -The simples (and quickest to estimate) algorithm is the "plain" dual attack as described in [PQCBook:MicReg09]_:: +The simplest (and quickest to estimate) algorithm is the "plain" dual attack as described in [PQCBook:MicReg09]_:: LWE.dual(params) -We can improve these results by considering a dual hybrid attack as in [EC:Albrecht17,INDOCRYPT:EspJouKha20]_:: +We can improve these results by considering a dual hybrid attack as in [EC:Albrecht17]_, [INDOCRYPT:EspJouKha20]_:: dual_hybrid(params) diff --git a/docs/algorithms/lwe-primal.rst b/docs/algorithms/lwe-primal.rst index 3e611f4..e4d80f9 100644 --- a/docs/algorithms/lwe-primal.rst +++ b/docs/algorithms/lwe-primal.rst @@ -6,7 +6,7 @@ LWE Primal Attacks We construct an (easy) example LWE instance:: from estimator import * - params = LWE.Parameters(n=200, q=7981, Xs=ND.SparseTernary(384, 16), Xe=ND.CenteredBinomial(4)) + params = LWE.Parameters(n=200, q=7981, Xs=ND.SparseTernary(16), Xe=ND.CenteredBinomial(4)) params The simplest (and quickest to estimate) model is solving via uSVP and assuming the Geometric Series @@ -21,9 +21,7 @@ we optimize β and d separately:: LWE.primal_usvp(params, red_shape_model=Simulator.GSA) -To get a more precise answer we may use the CN11 simulator by Chen and Nguyen [AC:CheNgu11]_ (as -`implemented in FPyLLL -_`):: +To get a more precise answer we may use the CN11 simulator by Chen and Nguyen [AC:CheNgu11]_ (as `implemented in FPyLLL `__):: LWE.primal_usvp(params, red_shape_model=Simulator.CN11) diff --git a/docs/schemes/hes.rst b/docs/schemes/hes.rst index c405638..1b48358 100644 --- a/docs/schemes/hes.rst +++ b/docs/schemes/hes.rst @@ -1,5 +1,5 @@ Homomorphic Encryption Parameters -=============================== +================================= :: diff --git a/estimator/__init__.py b/estimator/__init__.py index 8c9ce46..1c7c11f 100644 --- a/estimator/__init__.py +++ b/estimator/__init__.py @@ -2,11 +2,11 @@ __all__ = ['ND', 'Logging', 'RC', 'Simulator', 'LWE', 'NTRU', 'SIS', 'schemes'] -from .nd import NoiseDistribution as ND from .io import Logging from .reduction import RC from . import simulator as Simulator from . import lwe as LWE from . import ntru as NTRU +from . import nd as ND from . import sis as SIS from . import schemes diff --git a/estimator/gb.py b/estimator/gb.py index c819aa2..5da17b4 100644 --- a/estimator/gb.py +++ b/estimator/gb.py @@ -213,8 +213,7 @@ def __call__( rop: ≈2^227.2, dreg: 54, mem: ≈2^227.2, t: 4, m: 1024, tag: arora-gb >>> LWE.arora_gb(params.updated(Xs=ND.UniformMod(3), Xe=ND.CenteredBinomial(4), m=1024)) rop: ≈2^189.9, dreg: 39, mem: ≈2^189.9, t: 4, m: 1024, tag: arora-gb - >>> Xs, Xe =ND.SparseTernary(1024, 64, 0), ND.DiscreteGaussian(2**10) - >>> LWE.arora_gb(LWE.Parameters(n=1024, q=2**40, Xs=Xs, Xe=Xe)) + >>> LWE.arora_gb(LWE.Parameters(n=1024, q=2**40, Xs=ND.SparseBinary(64), Xe=ND.DiscreteGaussian(2**10))) rop: ≈2^inf, dreg: ≈2^inf, tag: arora-gb .. [EPRINT:ACFP14] Martin R. Albrecht, Carlos Cid, Jean-Charles Faugère & Ludovic Perret. (2014). diff --git a/estimator/lwe_bkw.py b/estimator/lwe_bkw.py index 3867333..4b64654 100644 --- a/estimator/lwe_bkw.py +++ b/estimator/lwe_bkw.py @@ -239,7 +239,7 @@ def sf(x, best): # the search cannot fail. It just outputs some X with X["oracle"]>m. if best["m"] > params.m: raise InsufficientSamplesError( - f"Got m≈2^{float(log(params.m, 2.0)):.1f} samples, but require ≈2^{float(log(best['m'],2.0)):.1f}.", + f"Got m≈2^{float(log(params.m, 2.0)):.1f} samples, but require ≈2^{float(log(best['m'], 2.0)):.1f}.", best["m"], ) return best diff --git a/estimator/lwe_dual.py b/estimator/lwe_dual.py index 6aecd9a..18071fe 100644 --- a/estimator/lwe_dual.py +++ b/estimator/lwe_dual.py @@ -7,7 +7,6 @@ """ from functools import partial -from dataclasses import replace from sage.all import oo, ceil, sqrt, log, cached_function, RR, exp, pi, e, coth, tanh @@ -19,7 +18,7 @@ from .io import Logging from .conf import red_cost_model as red_cost_model_default, mitm_opt as mitm_opt_default from .errors import OutOfBoundsError, InsufficientSamplesError -from .nd import NoiseDistribution +from .nd import DiscreteGaussian, SparseTernary from .lwe_guess import exhaustive_search, mitm, distinguish @@ -61,22 +60,26 @@ def dual_reduce( # Compute new secret distribution if params.Xs.is_sparse: - h = params.Xs.get_hamming_weight(params.n) + h = params.Xs.hamming_weight if not 0 <= h1 <= h: raise OutOfBoundsError(f"Splitting weight {h1} must be between 0 and h={h}.") - # assuming the non-zero entries are uniform - p = h1 / 2 - red_Xs = NoiseDistribution.SparseTernary(params.n - zeta, h / 2 - p) - slv_Xs = NoiseDistribution.SparseTernary(zeta, p) + + if type(params.Xs) is SparseTernary: + # split the +1 and -1 entries in a balanced way. + slv_Xs, red_Xs = params.Xs.split_balanced(zeta, h1) + else: + # TODO: Implement this for sparse secret that are not SparseTernary, + # i.e. DiscreteGaussian with extremely small stddev. + raise NotImplementedError(f"Unknown how to exploit sparsity of {params.Xs}") if h1 == h: # no reason to do lattice reduction if we assume # that the hw on the reduction part is 0 - return replace(params, Xs=slv_Xs, m=oo), 1 + return params.updated(Xs=slv_Xs, m=oo), 1 else: # distribution is i.i.d. for each coordinate - red_Xs = replace(params.Xs, n=params.n - zeta) - slv_Xs = replace(params.Xs, n=zeta) + red_Xs = params.Xs.resize(params.n - zeta) + slv_Xs = params.Xs.resize(zeta) c = red_Xs.stddev * params.q / params.Xe.stddev @@ -91,7 +94,7 @@ def dual_reduce( # Compute new noise as in [INDOCRYPT:EspJouKha20] # ~ sigma_ = rho * red_Xs.stddev * delta ** (m_ + red_Xs.n) / c ** (m_ / (m_ + red_Xs.n)) sigma_ = rho * red_Xs.stddev * delta**d / c ** (m_ / d) - slv_Xe = NoiseDistribution.DiscreteGaussian(params.q * sigma_) + slv_Xe = DiscreteGaussian(params.q * sigma_) slv_params = LWEParameters( n=zeta, @@ -177,7 +180,7 @@ def cost( rep = 1 if params.Xs.is_sparse: - h = params.Xs.get_hamming_weight(params.n) + h = params.Xs.hamming_weight probability = RR(prob_drop(params.n, h, zeta, h1)) rep = prob_amplify(success_probability, probability) # don't need more samples to re-run attack, since we may @@ -210,7 +213,7 @@ def fft_solver(params, success_probability, t=0): probability = sqrt(success_probability) try: - size = params.Xs.support_size(n=params.n, fraction=probability) + size = params.Xs.support_size(probability) size_fft = 2**t except NotImplementedError: # not achieving required probability with search space @@ -367,7 +370,7 @@ def __call__( >>> from estimator import * >>> from estimator.lwe_dual import dual_hybrid - >>> params = LWE.Parameters(n=1024, q = 2**32, Xs=ND.Uniform(0,1), Xe=ND.DiscreteGaussian(3.0)) + >>> params = LWE.Parameters(n=1024, q = 2**32, Xs=ND.Binary, Xe=ND.DiscreteGaussian(3.0)) >>> LWE.dual(params) rop: ≈2^107.0, mem: ≈2^66.4, m: 970, β: 264, d: 1994, ↻: 1, tag: dual >>> dual_hybrid(params) @@ -377,13 +380,13 @@ def __call__( >>> dual_hybrid(params, mitm_optimization="numerical") rop: ≈2^129.0, m: 1145, k: 1, mem: ≈2^131.0, ↻: 1, β: 346, d: 2044, ζ: 125, tag: dual_mitm_hybrid - >>> params = params.updated(Xs=ND.SparseTernary(params.n, 32)) + >>> params = params.updated(Xs=ND.SparseTernary(32)) >>> LWE.dual(params) rop: ≈2^103.4, mem: ≈2^63.9, m: 904, β: 251, d: 1928, ↻: 1, tag: dual >>> dual_hybrid(params) - rop: ≈2^92.1, mem: ≈2^78.2, m: 716, β: 170, d: 1464, ↻: 1989, ζ: 276, h1: 8, tag: dual_hybrid + rop: ≈2^91.6, mem: ≈2^77.2, m: 711, β: 168, d: 1456, ↻: ≈2^11.2, ζ: 279, h1: 8, tag: dual_hybrid >>> dual_hybrid(params, mitm_optimization=True) - rop: ≈2^98.2, mem: ≈2^78.6, m: 728, k: 292, ↻: ≈2^18.7, β: 180, d: 1267, ζ: 485, h1: 17, tag: ... + rop: ≈2^98.7, mem: ≈2^78.6, m: 737, k: 288, ↻: ≈2^19.6, β: 184, d: 1284, ζ: 477, h1: 17, tag: dual_mitm_... >>> params = params.updated(Xs=ND.CenteredBinomial(8)) >>> LWE.dual(params) @@ -402,7 +405,7 @@ def __call__( rop: ≈2^160.7, mem: ≈2^156.8, m: 1473, k: 25, ↻: 1, β: 456, d: 2472, ζ: 25, tag: dual_mitm_hybrid >>> dual_hybrid(schemes.NTRUHPS2048509Enc) - rop: ≈2^131.7, mem: ≈2^128.5, m: 436, β: 358, d: 906, ↻: 1, ζ: 38, tag: dual_hybrid + rop: ≈2^136.2, mem: ≈2^127.8, m: 434, β: 356, d: 902, ↻: 35, ζ: 40, h1: 19, tag: dual_hybrid >>> LWE.dual(schemes.CHHS_4096_67) rop: ≈2^206.9, mem: ≈2^137.5, m: ≈2^11.8, β: 616, d: 7779, ↻: 1, tag: dual @@ -440,7 +443,7 @@ def _optimize_blocksize( log_level=None, fft=False, ): - h = params.Xs.get_hamming_weight(params.n) + h = params.Xs.hamming_weight h1_min = max(0, h - (params.n - zeta)) h1_max = min(zeta, h) if h1_min == h1_max: diff --git a/estimator/lwe_guess.py b/estimator/lwe_guess.py index fce54ed..0b96faf 100644 --- a/estimator/lwe_guess.py +++ b/estimator/lwe_guess.py @@ -15,7 +15,7 @@ from .lwe_parameters import LWEParameters from .prob import amplify as prob_amplify, drop as prob_drop, amplify_sigma from .util import local_minimum, log2 -from .nd import sigmaf +from .nd import sigmaf, SparseTernary class guess_composition: @@ -102,7 +102,7 @@ def sparse_solve(cls, f, params, log_level=5, **kwds): :param params: LWE parameters. """ base = params.Xs.bounds[1] - params.Xs.bounds[0] # we exclude zero - h = ceil(len(params.Xs) * params.Xs.density) # nr of non-zero entries + h = params.Xs.hamming_weight with local_minimum(0, params.n - 40, log_level=log_level) as it: for zeta in it: @@ -127,13 +127,13 @@ def __call__(self, params, log_level=5, **kwds): >>> from estimator import * >>> from estimator.lwe_guess import guess_composition - >>> guess_composition(LWE.primal_usvp)(schemes.Kyber512.updated(Xs=ND.SparseTernary(512, 16))) - rop: ≈2^99.4, red: ≈2^99.4, δ: 1.008705, β: 113, d: 421, tag: usvp, ↻: ≈2^37.5, ζ: 265, |S|: 1, ... + >>> guess_composition(LWE.primal_usvp)(schemes.Kyber512.updated(Xs=ND.SparseTernary(16))) + rop: ≈2^102.2, red: ≈2^102.2, δ: 1.008011, β: 132, d: 461, tag: usvp, ↻: ≈2^34.9, ζ: 252, |S|: 1, ... Compare:: - >>> LWE.primal_hybrid(schemes.Kyber512.updated(Xs=ND.SparseTernary(512, 16))) - rop: ≈2^85.8, red: ≈2^84.8, svp: ≈2^84.8, β: 105, η: 2, ζ: 366, |S|: ≈2^85.1, d: 315, prob: ≈2^-23.4, ... + >>> LWE.primal_hybrid(schemes.Kyber512.updated(Xs=ND.SparseTernary(16))) + rop: ≈2^85.8, red: ≈2^84.8, svp: ≈2^84.8, β: 105, η: 2, ζ: 366, |S|: ≈2^85.1, d: 315, prob: ≈2^-23.4, ↻:... """ params = LWEParameters.normalize(params) @@ -161,12 +161,12 @@ def __call__(self, params: LWEParameters, success_probability=0.99, quantum: boo >>> from estimator import * >>> from estimator.lwe_guess import exhaustive_search - >>> params = LWE.Parameters(n=64, q=2**40, Xs=ND.UniformMod(2), Xe=ND.DiscreteGaussian(3.2)) + >>> params = LWE.Parameters(n=64, q=2**40, Xs=ND.Binary, Xe=ND.DiscreteGaussian(3.2)) >>> exhaustive_search(params) rop: ≈2^73.6, mem: ≈2^72.6, m: 397.198 - >>> params = LWE.Parameters(n=1024, q=2**40, Xs=ND.SparseTernary(n=1024, p=32), Xe=ND.DiscreteGaussian(3.2)) + >>> params = LWE.Parameters(n=1024, q=2**40, Xs=ND.SparseTernary(32), Xe=ND.DiscreteGaussian(3.2)) >>> exhaustive_search(params) - rop: ≈2^417.3, mem: ≈2^416.3, m: ≈2^11.2 + rop: ≈2^413.9, mem: ≈2^412.9, m: ≈2^11.1 """ params = LWEParameters.normalize(params) @@ -175,7 +175,7 @@ def __call__(self, params: LWEParameters, success_probability=0.99, quantum: boo probability = sqrt(success_probability) try: - size = params.Xs.support_size(n=params.n, fraction=probability) + size = params.Xs.support_size(probability) except NotImplementedError: # not achieving required probability with search space # given our settings that means the search space is huge @@ -221,7 +221,7 @@ def X_range(self, nd): else: # setting fraction=0 to ensure that support size does not # throw error. we'll take the probability into account later - rng = nd.support_size(n=1, fraction=0.0) + rng = nd.resize(1).support_size(0.0) return rng, nd.gaussian_tail_prob def local_range(self, center): @@ -240,11 +240,16 @@ def mitm_analytical(self, params: LWEParameters, success_probability=0.99): # about 3x faster and reasonably accurate if params.Xs.is_sparse: - h = params.Xs.get_hamming_weight(n=params.n) - split_h = round(h * k / n) - success_probability_ = ( - binomial(k, split_h) * binomial(n - k, h - split_h) / binomial(n, h) - ) + h = params.Xs.hamming_weight + if type(params.Xs) is SparseTernary: + # split optimally and compute the probability of this event + success_probability_ = params.Xs.split_probability(k) + else: + split_h = (h * k / n).round('down') + # Assume each coefficient is sampled i.i.d.: + success_probability_ = ( + binomial(k, split_h) * binomial(n - k, h - split_h) / binomial(n, h) + ) logT = RR(h * (log2(n) - log2(h) + log2(sd_rng - 1) + log2(e))) / (2 - delta) logT -= RR(log2(h) / 2) @@ -279,16 +284,20 @@ def cost( n = params.n if params.Xs.is_sparse: - h = params.Xs.get_hamming_weight(n=n) - + h = params.Xs.hamming_weight # we assume the hamming weight to be distributed evenly across the two parts # if not we can rerandomize on the coordinates and try again -> repeat - split_h = round(h * k / n) - size_tab = RR((sd_rng - 1) ** split_h * binomial(k, split_h)) - size_sea = RR((sd_rng - 1) ** (h - split_h) * binomial(n - k, h - split_h)) - success_probability_ = ( - binomial(k, split_h) * binomial(n - k, h - split_h) / binomial(n, h) - ) + if type(params.Xs) is SparseTernary: + sec_tab, sec_sea = params.Xs.split_balanced(k) + size_tab = sec_tab.support_size() + size_sea = sec_sea.support_size() + else: + # Assume each coefficient is sampled i.i.d.: + split_h = (h * k / n).round('down') + size_tab = RR((sd_rng - 1) ** split_h * binomial(k, split_h)) + size_sea = RR((sd_rng - 1) ** (h - split_h) * binomial(n - k, h - split_h)) + + success_probability_ = size_tab * size_sea / params.Xs.support_size() else: size_tab = sd_rng**k size_sea = sd_rng ** (n - k) @@ -338,16 +347,16 @@ def __call__(self, params: LWEParameters, success_probability=0.99, optimization >>> from estimator import * >>> from estimator.lwe_guess import mitm - >>> params = LWE.Parameters(n=64, q=2**40, Xs=ND.UniformMod(2), Xe=ND.DiscreteGaussian(3.2)) + >>> params = LWE.Parameters(n=64, q=2**40, Xs=ND.Binary, Xe=ND.DiscreteGaussian(3.2)) >>> mitm(params) rop: ≈2^37.0, mem: ≈2^37.2, m: 37, k: 32, ↻: 1 >>> mitm(params, optimization="numerical") rop: ≈2^39.2, m: 36, k: 32, mem: ≈2^39.1, ↻: 1 - >>> params = LWE.Parameters(n=1024, q=2**40, Xs=ND.SparseTernary(n=1024, p=32), Xe=ND.DiscreteGaussian(3.2)) + >>> params = LWE.Parameters(n=1024, q=2**40, Xs=ND.SparseTernary(32), Xe=ND.DiscreteGaussian(3.2)) >>> mitm(params) - rop: ≈2^215.4, mem: ≈2^210.2, m: ≈2^13.1, k: 512, ↻: 43 + rop: ≈2^217.8, mem: ≈2^210.2, m: ≈2^15.5, k: 512, ↻: 226 >>> mitm(params, optimization="numerical") - rop: ≈2^216.0, m: ≈2^13.1, k: 512, mem: ≈2^211.4, ↻: 43 + rop: ≈2^215.6, m: ≈2^15.5, k: 512, mem: ≈2^208.6, ↻: 226 """ Cost.register_impermanent(rop=True, mem=False, m=True, k=False) @@ -400,7 +409,7 @@ def __call__(self, params: LWEParameters, success_probability=0.99): >>> from estimator import * >>> from estimator.lwe_guess import distinguish - >>> params = LWE.Parameters(n=0, q=2 ** 32, Xs=ND.UniformMod(2), Xe=ND.DiscreteGaussian(2 ** 32)) + >>> params = LWE.Parameters(n=0, q=2 ** 32, Xs=ND.Binary, Xe=ND.DiscreteGaussian(2 ** 32)) >>> distinguish(params) rop: ≈2^60.0, mem: ≈2^60.0, m: ≈2^60.0 diff --git a/estimator/lwe_parameters.py b/estimator/lwe_parameters.py index 6822cba..8bb1b04 100644 --- a/estimator/lwe_parameters.py +++ b/estimator/lwe_parameters.py @@ -1,10 +1,9 @@ # -*- coding: utf-8 -*- from dataclasses import dataclass -from copy import copy from sage.all import oo, binomial, log, sqrt, ceil -from .nd import NoiseDistribution +from .nd import NoiseDistribution, DiscreteGaussian from .errors import InsufficientSamplesError @@ -24,11 +23,9 @@ class LWEParameters: tag: str = None #: a name for the patameter set def __post_init__(self, **kwds): - self.Xs = copy(self.Xs) - self.Xs.n = self.n + self.Xs = self.Xs.resize(self.n) if self.m < oo: - self.Xe = copy(self.Xe) - self.Xe.n = self.m + self.Xe = self.Xe.resize(self.m) @property def _homogeneous(self): @@ -123,12 +120,12 @@ def amplify_m(self, m): # -two signs per position (+1,-1) # - all "-" and all "+" are the same if binomial(self.m, k) * 2**k - 1 >= m: - Xe = NoiseDistribution.DiscreteGaussian(float(sqrt(k) * self.Xe.stddev)) + Xe = DiscreteGaussian(float(sqrt(k) * self.Xe.stddev)) d["Xe"] = Xe d["m"] = ceil(m) return LWEParameters(**d) else: - raise NotImplementedError(f"Cannot amplify to ≈2^{log(m,2):1} using {{+1,-1}} additions.") + raise NotImplementedError(f"Cannot amplify to ≈2^{log(m, 2):1} using {{+1,-1}} additions.") def switch_modulus(self): """ @@ -143,10 +140,11 @@ def switch_modulus(self): LWEParameters(n=128, q=5289, Xs=D(σ=0.82), Xe=D(σ=3.08), m=+Infinity, tag=None) """ - n = self.Xs.density * len(self.Xs) + # Note: hamming_weight rounds to an integer, which we do not want here. + h = len(self.Xs) * self.Xs._density - # n uniform in -(0.5,0.5) ± stddev(χ_s) - Xr_stddev = sqrt(n / 12) * self.Xs.stddev # rounding noise + # h uniform in -(0.5,0.5) ± stddev(χ_s) + Xr_stddev = sqrt(h / 12) * self.Xs.stddev # rounding noise # χ_r == p/q ⋅ χ_e # we want the rounding noise match the scaled noise p = ceil(Xr_stddev * self.q / self.Xe.stddev) @@ -160,7 +158,7 @@ def switch_modulus(self): self.n, p, Xs=self.Xs, - Xe=NoiseDistribution.DiscreteGaussian(sqrt(2) * self.Xe.stddev * scale), + Xe=DiscreteGaussian(sqrt(2) * self.Xe.stddev * scale), m=self.m, tag=f"{self.tag},scaled" if self.tag else None, ) diff --git a/estimator/lwe_primal.py b/estimator/lwe_primal.py index 87dda2b..6f38b24 100644 --- a/estimator/lwe_primal.py +++ b/estimator/lwe_primal.py @@ -405,7 +405,7 @@ def ssf(x): if zeta: # the number of non-zero entries - h = ceil(len(params.Xs) * params.Xs.density) + h = params.Xs.hamming_weight probability = RR(prob_drop(params.n, h, zeta)) hw = 1 while hw < min(h, zeta): @@ -568,17 +568,18 @@ def __call__( EXAMPLES:: >>> from estimator import * - >>> LWE.primal_hybrid(schemes.Kyber512.updated(Xs=ND.SparseTernary(512, 16)), mitm = False, babai = False) - rop: ≈2^91.5, red: ≈2^90.7, svp: ≈2^90.2, β: 178, η: 21, ζ: 256, |S|: ≈2^56.6, d: 531, ... + >>> params = schemes.Kyber512.updated(Xs=ND.SparseTernary(16)) + >>> LWE.primal_hybrid(params, mitm=False, babai=False) + rop: ≈2^91.5, red: ≈2^90.7, svp: ≈2^90.2, β: 178, η: 21, ζ: 256, |S|: ≈2^56.6, d: 531, prob: 0.003, ↻: 1... - >>> LWE.primal_hybrid(schemes.Kyber512.updated(Xs=ND.SparseTernary(512, 16)), mitm = False, babai = True) - rop: ≈2^88.7, red: ≈2^88.0, svp: ≈2^87.2, β: 98, η: 2, ζ: 323, |S|: ≈2^39.7, d: 346, ... + >>> LWE.primal_hybrid(params, mitm=False, babai=True) + rop: ≈2^88.7, red: ≈2^88.0, svp: ≈2^87.2, β: 98, η: 2, ζ: 323, |S|: ≈2^39.7, d: 346, prob: ≈2^-28.4, ↻: ... - >>> LWE.primal_hybrid(schemes.Kyber512.updated(Xs=ND.SparseTernary(512, 16)), mitm = True, babai = False) - rop: ≈2^74.1, red: ≈2^73.7, svp: ≈2^71.9, β: 104, η: 16, ζ: 320, |S|: ≈2^77.1, d: 359, ... + >>> LWE.primal_hybrid(params, mitm=True, babai=False) + rop: ≈2^74.1, red: ≈2^73.7, svp: ≈2^71.9, β: 104, η: 16, ζ: 320, |S|: ≈2^77.1, d: 359, prob: ≈2^-12.3, ↻... - >>> LWE.primal_hybrid(schemes.Kyber512.updated(Xs=ND.SparseTernary(512, 16)), mitm = True, babai = True) - rop: ≈2^85.8, red: ≈2^84.8, svp: ≈2^84.8, β: 105, η: 2, ζ: 366, |S|: ≈2^85.1, d: 315, ... + >>> LWE.primal_hybrid(params, mitm=True, babai=True) + rop: ≈2^85.8, red: ≈2^84.8, svp: ≈2^84.8, β: 105, η: 2, ζ: 366, |S|: ≈2^85.1, d: 315, prob: ≈2^-23.4, ↻:... TESTS: @@ -622,31 +623,18 @@ def __call__( log_level=log_level + 1, ) - def find_zeta_max(params, red_cost_model): + if zeta is None: + # Find the smallest value for zeta such that the square root of the search space for + # zeta is larger than the number of operations to solve uSVP on the whole LWE instance + # (without guessing). usvp_cost = primal_usvp(params, red_cost_model=red_cost_model)["rop"] - zeta_max = 1 - while zeta_max < params.n: - # TODO: once support_size() is supported for NTRU, remove the below try/except - try: - if params.Xs.support_size(zeta_max) > usvp_cost: - # double it for mitm - return 2 * zeta_max - zeta_max +=1 - except NotImplementedError: - return params.n - return params.n + zeta_max = params.n + while zeta_max < params.n and sqrt(params.Xs.resize(zeta_max).support_size()) < usvp_cost: + zeta_max += 1 - if zeta is None: - zeta_max = find_zeta_max(params, red_cost_model) with local_minimum(0, min(zeta_max, params.n), log_level=log_level) as it: for zeta in it: - it.update( - f( - zeta=zeta, - optimize_d=False, - **kwds, - ) - ) + it.update(f(zeta=zeta, optimize_d=False, **kwds)) # TODO: this should not be required cost = min(it.y, f(0, optimize_d=False, **kwds)) else: diff --git a/estimator/nd.py b/estimator/nd.py index c693e17..7c2fa5e 100644 --- a/estimator/nd.py +++ b/estimator/nd.py @@ -1,8 +1,9 @@ # -*- coding: utf-8 -*- +from copy import copy from dataclasses import dataclass -from sage.all import binomial, ceil, exp, log, oo, parent, pi, RealField, RR, sqrt +from sage.all import binomial, ceil, exp, floor, log, oo, parent, pi, QQ, RealField, RR, sqrt def stddevf(sigma): @@ -13,18 +14,16 @@ def stddevf(sigma): EXAMPLE:: - >>> from estimator.nd import stddevf - >>> stddevf(64.0) + >>> from estimator import * + >>> ND.stddevf(64.0) 25.532... - >>> stddevf(64) + >>> ND.stddevf(64) 25.532... - >>> stddevf(RealField(256)(64)).prec() + >>> ND.stddevf(RealField(256)(64)).prec() 256 - """ - try: prec = parent(sigma).prec() except AttributeError: @@ -44,18 +43,17 @@ def sigmaf(stddev): EXAMPLE:: - >>> from estimator.nd import stddevf, sigmaf + >>> from estimator import * >>> n = 64.0 - >>> sigmaf(stddevf(n)) + >>> ND.sigmaf(ND.stddevf(n)) 64.000... - >>> sigmaf(RealField(128)(1.0)) + >>> ND.sigmaf(RealField(128)(1.0)) 2.5066282746310005024157652848110452530 - >>> sigmaf(1.0) + >>> ND.sigmaf(1.0) 2.506628274631... - >>> sigmaf(1) + >>> ND.sigmaf(1) 2.506628274631... - """ RR = parent(stddev) # check that we got ourselves a real number type @@ -71,21 +69,28 @@ def sigmaf(stddev): class NoiseDistribution: """ All noise distributions are instances of this class. - + It is recommended to pick one of the following available implementations below: + - DiscreteGaussian + - DiscreteGaussianAlpha + - CenteredBinomial + - Uniform + - UniformMod + - SparseTernary + - SparseBinary + - Binary + - Ternary + + NOTE: + Generally, to generate an LWE parameter you call one of the above for the secret and error, + **without** specifying the dimension `n` and `m` for secret/error respectively! + These are initialized, when constructing the LWEParameters object. """ - - # cut-off for Gaussian distributions - gaussian_tail_bound = 2 - - # probability that a coefficient falls within the cut-off - gaussian_tail_prob = 1 - 2 * exp(-4 * pi) - - stddev: float - mean: float = 0 - n: int = None - bounds: tuple = (None, None) - density: float = 1.0 # Hamming weight / dimension. - tag: str = "" + n: int = None # dimension of noise + mean: float = 0 # expectation value + stddev: float = 0 # standard deviation (square root of variance) + bounds: tuple = (-oo, oo) # range in which each coefficient is sampled with high probability + is_Gaussian_like: bool = False # whether the distribution "decays like a gaussian" + _density: float = 1.0 # proportion of nonzero coefficients in a sample def __lt__(self, other): """ @@ -93,7 +98,7 @@ def __lt__(self, other): EXAMPLE:: - >>> from estimator.nd import NoiseDistribution as ND + >>> from estimator import * >>> ND.DiscreteGaussian(2.0) < ND.CenteredBinomial(18) True >>> ND.DiscreteGaussian(3.0) < ND.CenteredBinomial(18) @@ -113,7 +118,7 @@ def __le__(self, other): EXAMPLE:: - >>> from estimator.nd import NoiseDistribution as ND + >>> from estimator import * >>> ND.DiscreteGaussian(2.0) <= ND.CenteredBinomial(18) True >>> ND.DiscreteGaussian(3.0) <= ND.CenteredBinomial(18) @@ -131,7 +136,7 @@ def __str__(self): """ EXAMPLE:: - >>> from estimator.nd import NoiseDistribution as ND + >>> from estimator import * >>> ND.DiscreteGaussianAlpha(0.01, 7681) D(σ=30.64) @@ -151,8 +156,8 @@ def __hash__(self): """ EXAMPLE:: - >>> from estimator.nd import NoiseDistribution as ND - >>> hash(ND(3.0, 1.0)) == hash((3.0, 1.0, None)) + >>> from estimator import * + >>> hash(ND.DiscreteGaussian(3.0, 1.0)) == hash((3.0, 1.0, None)) True """ @@ -160,226 +165,370 @@ def __hash__(self): def __len__(self): """ + Dimension of this noise distribution, i.e. number of coefficients that gets sampled. + EXAMPLE:: - >>> from estimator.nd import NoiseDistribution as ND - >>> D = ND.SparseTernary(1024, p=128, m=128) - >>> len(D) + >>> from estimator import * + >>> len(ND.SparseTernary(128, n=1024)) 1024 - >>> int(round(len(D) * float(D.density))) - 256 """ - if self.n is not None: - return self.n - else: + if self.n is None: raise ValueError("Distribution has no length.") + return self.n + + def resize(self, new_n): + """ + Return an altered distribution having a dimension `new_n`. + + :param int new_n: new dimension to change to + """ + new_self = copy(self) + new_self.n = new_n + return new_self @property - def is_Gaussian_like(self): - return ("Gaussian" in self.tag) or ("CenteredBinomial" in self.tag) + def hamming_weight(self): + """ + The number of non-zero coefficients in this distribution + + EXAMPLE:: + + >>> from estimator import * + >>> ND.SparseTernary(128, n=1024).hamming_weight + 256 + >>> ND.SparseTernary(128, 64, 1024).hamming_weight + 192 + """ + return round(len(self) * float(self._density)) @property def is_bounded(self): + """ + Whether the value of coefficients are bounded + """ return (self.bounds[1] - self.bounds[0]) < oo @property def is_sparse(self): """ - We consider a distribution "sparse" if its density is < 1/2. + Whether the density of the distribution is < 1/2. + Note: 1/2 might be considered somewhat arbitrary. """ # NOTE: somewhat arbitrary - return self.density < 0.5 + return self._density < 0.5 + + def support_size(self, fraction=1.0): + raise NotImplementedError("support_size") + + +class DiscreteGaussian(NoiseDistribution): + """ + A discrete Gaussian distribution with standard deviation ``stddev`` per component. + + EXAMPLE:: + + >>> from estimator import * + >>> ND.DiscreteGaussian(3.0, 1.0) + D(σ=3.00, μ=1.00) + """ + # cut-off for Gaussian distributions + gaussian_tail_bound: int = 2 + # probability that a coefficient falls within the cut-off + gaussian_tail_prob: float = 1 - 2 * exp(-4 * pi) + + def __init__(self, stddev, mean=0, n=None): + b_val = oo if n is None else ceil(log(n, 2) * stddev) + density = max(0.0, 1 - RR(1 / sigmaf(stddev))) # NOTE: approximation that is accurate for large stddev. + + super().__init__( + n=n, + mean=mean, + stddev=stddev, + bounds=(-b_val, b_val), + _density=density, + is_Gaussian_like=True, + ) - def support_size(self, n=None, fraction=1.0): + def support_size(self, fraction=1.0): """ Compute the size of the support covering the probability given as fraction. EXAMPLE:: - >>> from estimator.nd import NoiseDistribution as ND - >>> D = ND.Uniform(-3,3, 64) - >>> D.support_size(fraction=.99) - 1207562882759477428726191443614714994252339953407098880 - >>> D = ND.SparseTernary(64, 8) - >>> D.support_size() - 32016101348447354880 - """ - if not n: - if not self.n: - raise ValueError(f"Length required to determine support size, but n was {n}.") - n = self.n - - if "SparseTernary" in self.tag: - h = self.get_hamming_weight(n) - # TODO: this is assuming that the non-zero entries are uniform over {-1,1} - # need p and m for more accurate calculation - size = 2**h * binomial(n, h) * RR(fraction) - elif self.is_bounded: - # TODO: this might be suboptimal/inaccurate for binomial distribution - a, b = self.bounds - size = RR(fraction) * (b - a + 1) ** n - else: - # Looks like nd is Gaussian - # -> we'll treat it as bounded (with failure probability) - t = self.gaussian_tail_bound - p = self.gaussian_tail_prob + >>> from estimator import * + >>> ND.DiscreteGaussian(1.0, n=128).support_size(0.99) + 2.68643790357272e174 + """ + # We will treat this noise distribution as bounded with failure probability `1 - fraction`. + n = len(self) + t = self.gaussian_tail_bound + p = self.gaussian_tail_prob + + if p**n < fraction: + raise NotImplementedError( + f"TODO(DiscreteGaussian.support_size): raise t. {RR(p ** n)}, {n}, {fraction}" + ) - if p**n < fraction: - raise NotImplementedError( - f"TODO(nd.support-size): raise t. {RR(p ** n)}, {n}, {fraction}" - ) + b = 2 * t * sigmaf(self.stddev) + 1 + return RR(2.0 * b + 1)**n - b = 2 * t * sigmaf(self.stddev) + 1 - return (2 * b + 1) ** n - return ceil(size) - def get_hamming_weight(self, n=None): - if not n: - if not self.n: - raise ValueError("Length required to determine hamming weight.") - n = self.n +def DiscreteGaussianAlpha(alpha, q, mean=0, n=None): + """ + A discrete Gaussian distribution with standard deviation α⋅q/√(2π) per component. - return round(n * float(self.density)) + EXAMPLE:: - @staticmethod - def DiscreteGaussian(stddev, mean=0, n=None): - """ - A discrete Gaussian distribution with standard deviation ``stddev`` per component. + >>> from estimator import * + >>> alpha, q = 0.001, 2048 + >>> ND.DiscreteGaussianAlpha(alpha, q) + D(σ=0.82) + >>> ND.DiscreteGaussianAlpha(alpha, q) == ND.DiscreteGaussian(ND.stddevf(alpha * q)) + True + """ + return DiscreteGaussian(RR(stddevf(alpha * q)), RR(mean), n) - EXAMPLE:: - >>> from estimator.nd import NoiseDistribution as ND - >>> ND.DiscreteGaussian(3.0, 1.0) - D(σ=3.00, μ=1.00) +class CenteredBinomial(NoiseDistribution): + """ + Sample a_1, …, a_η, b_1, …, b_η uniformly from {0, 1}, and return Σ(a_i - b_i). - """ - b_val = oo if n is None else ceil(log(n, 2) * stddev) - return NoiseDistribution( - stddev=RR(stddev), - mean=RR(mean), + EXAMPLE:: + + >>> from estimator import * + >>> ND.CenteredBinomial(8) + D(σ=2.00) + """ + def __init__(self, eta, n=None): + density = 1 - binomial(2 * eta, eta) * 2 ** (-2 * eta) + + super().__init__( n=n, - bounds=(-b_val, b_val), - density=1 - min(RR(1 / (sqrt(2 * pi) * stddev)), 1.0), - tag="DiscreteGaussian", + mean=0, + stddev=RR(sqrt(eta / 2.0)), + bounds=(-eta, eta), + _density=density, + is_Gaussian_like=True, ) - @staticmethod - def DiscreteGaussianAlpha(alpha, q, mean=0, n=None): + def support_size(self, fraction=1.0): """ - A discrete Gaussian distribution with standard deviation α⋅q/√(2π) per component. + Compute the size of the support covering the probability given as fraction. EXAMPLE:: - >>> from estimator.nd import NoiseDistribution as ND - >>> ND.DiscreteGaussianAlpha(0.001, 2048) - D(σ=0.82) + >>> from estimator import * + >>> ND.CenteredBinomial(3, 10).support_size() + 282475249 + >>> ND.CenteredBinomial(3, 10).support_size(0.99) + 279650497 + """ + # TODO: this might be suboptimal/inaccurate for binomial distribution + a, b = self.bounds + return ceil(RR(fraction) * (b - a + 1)**len(self)) + + +class Uniform(NoiseDistribution): + """ + Uniform distribution ∈ ``ZZ ∩ [a, b]``, endpoints inclusive. + EXAMPLE:: + + >>> from estimator import * + >>> ND.Uniform(-3, 3) + D(σ=2.00) + >>> ND.Uniform(-4, 3) + D(σ=2.29, μ=-0.50) + """ + def __init__(self, a, b, n=None): + a, b = int(ceil(a)), int(floor(b)) + if b < a: + raise ValueError(f"upper limit must be larger than lower limit but got: {b} < {a}") + m = b - a + 1 + + super().__init__( + n=n, + mean=RR((a + b) / 2), + stddev=RR(sqrt((m**2 - 1) / 12)), + bounds=(a, b), + _density=(1 - 1 / m if a <= 0 and b >= 0 else 1), + ) + + def __hash__(self): + """ + EXAMPLE:: + + >>> from estimator import * + >>> hash(ND.Uniform(-10, 10)) == hash(("Uniform", (-10, 10), None)) + True """ - stddev = stddevf(alpha * q) - return NoiseDistribution.DiscreteGaussian(stddev=RR(stddev), mean=RR(mean), n=n) + return hash(("Uniform", self.bounds, self.n)) - @staticmethod - def CenteredBinomial(eta, n=None): + def support_size(self, fraction=1.0): """ - Sample a_1, …, a_η, b_1, …, b_η and return Σ(a_i - b_i). + Compute the size of the support covering the probability given as fraction. EXAMPLE:: - >>> from estimator.nd import NoiseDistribution as ND - >>> ND.CenteredBinomial(8) - D(σ=2.00) - + >>> from estimator import * + >>> ND.Uniform(-3, 3, 64).support_size(0.99) + 1207562882759477428726191443614714994252339953407098880 """ - stddev = sqrt(eta / 2.0) + # TODO: this might be suboptimal/inaccurate for binomial distribution + a, b = self.bounds + return ceil(RR(fraction) * (b - a + 1)**len(self)) + + +def UniformMod(q, n=None): + """ + Uniform mod ``q``, with balanced representation, i.e. values in ZZ ∩ [-q/2, q/2). + + EXAMPLE:: + + >>> from estimator import * + >>> ND.UniformMod(7) + D(σ=2.00) + >>> ND.UniformMod(8) + D(σ=2.29, μ=-0.50) + >>> ND.UniformMod(2) == ND.Uniform(-1, 0) + True + """ + a = -(q // 2) + b = a + q - 1 + return Uniform(a, b, n=n) - return NoiseDistribution( - stddev=RR(stddev), - density=1 - binomial(2 * eta, eta) * 2 ** (-2 * eta), - mean=RR(0), + +class SparseTernary(NoiseDistribution): + """ + Distribution of vectors of length ``n`` with ``p`` entries of 1 and ``m`` entries of -1, rest 0. + + EXAMPLE:: + + >>> from estimator import * + >>> ND.SparseTernary(10, n=100) + D(σ=0.45) + >>> ND.SparseTernary(10, 10, 100) + D(σ=0.45) + >>> ND.SparseTernary(10, 8, 100) + D(σ=0.42, μ=0.02) + >>> ND.SparseTernary(0, 0, 0).support_size() + 1 + """ + def __init__(self, p, m=None, n=None): + p, m = int(p), int(p if m is None else m) + self.p, self.m = p, m + + # Yes, n=0 might happen when estimating the cost of the dual attack! Support size is 1 + if n is None: + # Treat it the same as n=0. + n = 0 + mean = 0 if n == 0 else RR((p - m) / n) + density = 0 if n == 0 else RR((p + m) / n) + stddev = sqrt(density - mean**2) + + super().__init__( n=n, - bounds=(-eta, eta), - tag="CenteredBinomial", + mean=mean, + stddev=stddev, + bounds=(0 if m == 0 else -1, 0 if p == 0 else 1), + _density=density, ) - @staticmethod - def Uniform(a, b, n=None): + def __hash__(self): """ - Uniform distribution ∈ ``[a,b]``, endpoints inclusive. - EXAMPLE:: - >>> from estimator.nd import NoiseDistribution as ND - >>> ND.Uniform(-3, 3) - D(σ=2.00) - >>> ND.Uniform(-4, 3) - D(σ=2.29, μ=-0.50) + >>> from estimator import * + >>> hash(ND.SparseTernary(16, n=128)) == hash(("SparseTernary", 128, 16, 16)) + True + """ + return hash(("SparseTernary", self.n, self.p, self.m)) + def resize(self, new_n): """ - if b < a: - raise ValueError(f"upper limit must be larger than lower limit but got: {b} < {a}") - m = b - a + 1 - mean = (a + b) / RR(2) - stddev = sqrt((m**2 - 1) / RR(12)) + Return an altered distribution having a dimension `new_n`. + Assumes `p` and `m` stay the same. + """ + return SparseTernary(self.p, self.m, new_n) - if a <= 0 and b >= 0: - density = 1.0 - 1.0 / m - else: - density = 0.0 + def split_balanced(self, new_n, new_hw=None): + """ + Split the +1 and -1 entries in a balanced way, and return 2 SparseTernary distributions: + one of dimension `new_n` and the other of dimension `n - new_n`. - return NoiseDistribution( - n=n, stddev=stddev, mean=mean, bounds=(a, b), density=density, tag="Uniform" + :param new_n: dimension of the first noise distribution + :param new_hw: hamming weight of the first noise distribution. If none, we take the most likely weight. + :return: tuple of (SparseTernary, SparseTernary) + """ + n, hw = len(self), self.hamming_weight + if new_hw is None: + # Most likely split has same density: new_hw / new_n = hw / n. + new_hw = int(QQ(hw * new_n / n).round('down')) + + new_p = int((QQ(new_hw * self.p) / hw).round('down')) + new_m = new_hw - new_p + return ( + SparseTernary(new_p, new_m, new_n), + SparseTernary(self.p - new_p, self.m - new_m, n - new_n) ) - @staticmethod - def UniformMod(q, n=None): + def split_probability(self, new_n, new_hw=None): """ - Uniform mod ``q``, with balanced representation. - - EXAMPLE:: + Compute probability of splitting in a way that one half having `new_n` coefficients has + `new_hw` of the weight, and the remaining part the rest. This is naturally the proportion + of such splits divided this support size. + """ + left, right = self.split_balanced(new_n, new_hw) + return left.support_size() * right.support_size() / self.support_size() - >>> from estimator.nd import NoiseDistribution as ND - >>> ND.UniformMod(7) - D(σ=2.00) - >>> ND.UniformMod(8) - D(σ=2.29, μ=-0.50) + @property + def is_sparse(self): + """ + Always say this is a sparse distribution, even if p + m >= n/2, because there is correlation between the + coefficients: if you split the distribution into two of half the length, then you expect in each of them to be + half the weight. + """ + return True + @property + def hamming_weight(self): + return self.p + self.m + def support_size(self, fraction=1.0): """ - a = -(q // 2) - b = -a -1 if q % 2 == 0 else -a - return NoiseDistribution.Uniform(a, b, n=n) + Compute the size of the support covering the probability given as fraction. + + EXAMPLE:: - @staticmethod - def SparseTernary(n, p, m=None): + >>> from estimator import * + >>> ND.SparseTernary(8, 8, 64).support_size() + 6287341680214194176 """ - Distribution of vectors of length ``n`` with ``p`` entries of 1 and ``m`` entries of -1, rest 0. + n, p, m = len(self), self.p, self.m + return ceil(binomial(n, p) * binomial(n - p, m) * RR(fraction)) - EXAMPLE:: - >>> from estimator.nd import NoiseDistribution as ND - >>> ND.SparseTernary(100, p=10) - D(σ=0.45) - >>> ND.SparseTernary(100, p=10, m=10) - D(σ=0.45) - >>> ND.SparseTernary(100, p=10, m=8) - D(σ=0.42, μ=0.02) - - """ - if m is None: - m = p - - if n == 0: - # this might happen in the dual attack - return NoiseDistribution( - stddev=0, mean=0, density=0, bounds=(-1, 1), tag="SparseTernary", n=0 - ) - mean = RR(p / n - m / n) - stddev = sqrt(p / n * (1 - mean)**2 + - m / n * (-1 - mean)**2 + - (n - (p + m)) / n * (mean)**2) +def SparseBinary(hw, n=None): + """ + Sparse binary noise distribution having `hw` coefficients equal to 1, and the rest zero. - density = RR((p + m) / n) - return NoiseDistribution( - stddev=stddev, mean=mean, density=density, bounds=(-1, 1), tag="SparseTernary", n=n - ) + EXAMPLE:: + + >>> from estimator import * + >>> ND.SparseBinary(10).bounds + (0, 1) + """ + return SparseTernary(hw, 0, n) + + +""" +Binary noise uniform from {0, 1}^n +""" +Binary = Uniform(0, 1) + +""" +Ternary noise uniform from {-1, 0, 1}^n +""" +Ternary = Uniform(-1, 1) diff --git a/estimator/ntru.py b/estimator/ntru.py index 2cd27ef..b4bbb5d 100644 --- a/estimator/ntru.py +++ b/estimator/ntru.py @@ -46,6 +46,7 @@ def rough(self, params, jobs=1, catch_exceptions=True): >>> from estimator import * >>> _ = NTRU.estimate.rough(schemes.NTRUHPS2048509Enc) usvp :: rop: ≈2^109.2, red: ≈2^109.2, δ: 1.004171, β: 374, d: 643, tag: usvp + bdd_hybrid :: rop: ≈2^108.6, red: ≈2^107.7, svp: ≈2^107.5, β: 369, η: 368, ζ: 0, |S|: 1, ... """ params = params.normalize() @@ -61,8 +62,12 @@ def rough(self, params, jobs=1, catch_exceptions=True): ) if params.Xs.is_sparse: - algorithms["hybrid"] = partial( - primal_hybrid, red_cost_model=RC.ADPS16, red_shape_model="zgsa" + algorithms["bdd_hybrid"] = partial( + primal_hybrid, + mitm=False, + babai=False, + red_cost_model=RC.ADPS16, + red_shape_model="ZGSA", ) res_raw = batch_estimate( diff --git a/estimator/ntru_primal.py b/estimator/ntru_primal.py index 3b12732..5786fbb 100644 --- a/estimator/ntru_primal.py +++ b/estimator/ntru_primal.py @@ -363,21 +363,18 @@ def __call__( EXAMPLES:: >>> from estimator import * - >>> NTRU.primal_hybrid(schemes.NTRUHPS2048509Enc.updated(Xs=ND.SparseTernary(508,16)), - ... mitm = False, babai = False) - rop: ≈2^87.8, red: ≈2^87.0, svp: ≈2^86.6, β: 116, η: 21, ζ: 302, |S|: ≈2^39.2, d: 372, prob: ≈2^-22.3, ... + >>> params = schemes.NTRUHPS2048509Enc.updated(Xs=ND.SparseTernary(16)) + >>> NTRU.primal_hybrid(params, mitm=False, babai=False) + rop: ≈2^87.8, red: ≈2^87.0, svp: ≈2^86.6, β: 116, η: 21, ζ: 302, |S|: ≈2^39.2, d: 372, prob: ≈2^-22.3, ↻... - >>> NTRU.primal_hybrid(schemes.NTRUHPS2048509Enc.updated(Xs=ND.SparseTernary(508,16)), - ... mitm = False, babai = True) - rop: ≈2^88.0, red: ≈2^87.4, svp: ≈2^86.4, β: 98, η: 2, ζ: 318, |S|: ≈2^39.6, d: 328, prob: ≈2^-27.9, ... + >>> NTRU.primal_hybrid(params, mitm=False, babai=True) + rop: ≈2^88.0, red: ≈2^87.4, svp: ≈2^86.4, β: 98, η: 2, ζ: 318, |S|: ≈2^39.6, d: 328, prob: ≈2^-27.9, ↻: ... - >>> NTRU.primal_hybrid(schemes.NTRUHPS2048509Enc.updated(Xs=ND.SparseTernary(508,16)), - ... mitm = True, babai = False) - rop: ≈2^80.1, red: ≈2^79.7, svp: ≈2^78.3, β: 170, η: 22, ζ: 254, |S|: ≈2^103.7, d: 495, prob: 0.708, ... + >>> NTRU.primal_hybrid(params, mitm=True, babai=False) + rop: ≈2^80.1, red: ≈2^79.7, svp: ≈2^78.3, β: 170, η: 22, ζ: 254, |S|: ≈2^103.7, d: 495, prob: 0.708, ↻: ... - >>> NTRU.primal_hybrid(schemes.NTRUHPS2048509Enc.updated(Xs=ND.SparseTernary(508,16)), - ... mitm = True, babai = True) - rop: ≈2^85.1, red: ≈2^84.1, svp: ≈2^84.0, β: 105, η: 2, ζ: 363, |S|: ≈2^85.0, d: 294, prob: ≈2^-22.9, ... + >>> NTRU.primal_hybrid(params, mitm=True, babai=True) + rop: ≈2^85.1, red: ≈2^84.1, svp: ≈2^84.0, β: 105, η: 2, ζ: 363, |S|: ≈2^85.0, d: 294, prob: ≈2^-22.9, ↻:... TESTS: diff --git a/estimator/reduction.py b/estimator/reduction.py index 04ae761..d49a83b 100644 --- a/estimator/reduction.py +++ b/estimator/reduction.py @@ -515,8 +515,8 @@ def __call__(self, beta, d, B=None): >>> from sage.all import var, find_fit >>> dim = [100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 210, 220, 230, 240, 250] - >>> nodes = [39.0, 44.0, 49.0, 54.0, 60.0, 66.0, 72.0, 78.0, 84.0, 96.0, \ - 99.0, 105.0, 111.0, 120.0, 127.0, 134.0] + >>> nodes = [39.0, 44.0, 49.0, 54.0, 60.0, 66.0, 72.0, 78.0, 84.0, 96.0 ] + >>> nodes += [ 99.0, 105.0, 111.0, 120.0, 127.0, 134.0] # couldn't use \\ breaks stuff >>> times = [c + log(200,2).n() for c in nodes] >>> T = list(zip(dim, nodes)) >>> var("a,b,c,beta") diff --git a/estimator/schemes.py b/estimator/schemes.py index fd74e1c..c110775 100644 --- a/estimator/schemes.py +++ b/estimator/schemes.py @@ -1,5 +1,5 @@ from sage.all import oo -from .nd import NoiseDistribution, stddevf +from .nd import stddevf, Binary, CenteredBinomial, DiscreteGaussian, SparseTernary, UniformMod from .lwe_parameters import LWEParameters from .ntru_parameters import NTRUParameters from .sis_parameters import SISParameters @@ -19,8 +19,8 @@ Kyber512 = LWEParameters( n=2 * 256, q=3329, - Xs=NoiseDistribution.CenteredBinomial(3), - Xe=NoiseDistribution.CenteredBinomial(3), + Xs=CenteredBinomial(3), + Xe=CenteredBinomial(3), m=2 * 256, tag="Kyber 512", ) @@ -28,8 +28,8 @@ Kyber768 = LWEParameters( n=3 * 256, q=3329, - Xs=NoiseDistribution.CenteredBinomial(2), - Xe=NoiseDistribution.CenteredBinomial(2), + Xs=CenteredBinomial(2), + Xe=CenteredBinomial(2), m=3 * 256, tag="Kyber 768", ) @@ -37,8 +37,8 @@ Kyber1024 = LWEParameters( n=4 * 256, q=3329, - Xs=NoiseDistribution.CenteredBinomial(2), - Xe=NoiseDistribution.CenteredBinomial(2), + Xs=CenteredBinomial(2), + Xe=CenteredBinomial(2), m=4 * 256, tag="Kyber 1024", ) @@ -56,8 +56,8 @@ LightSaber = LWEParameters( n=2 * 256, q=8192, - Xs=NoiseDistribution.CenteredBinomial(5), - Xe=NoiseDistribution.UniformMod(8), + Xs=CenteredBinomial(5), + Xe=UniformMod(8), m=2 * 256, tag="LightSaber", ) @@ -65,8 +65,8 @@ Saber = LWEParameters( n=3 * 256, q=8192, - Xs=NoiseDistribution.CenteredBinomial(4), - Xe=NoiseDistribution.UniformMod(8), + Xs=CenteredBinomial(4), + Xe=UniformMod(8), m=3 * 256, tag="Saber", ) @@ -74,8 +74,8 @@ FireSaber = LWEParameters( n=4 * 256, q=8192, - Xs=NoiseDistribution.CenteredBinomial(3), - Xe=NoiseDistribution.UniformMod(8), + Xs=CenteredBinomial(3), + Xe=UniformMod(8), m=4 * 256, tag="FireSaber", ) @@ -88,8 +88,8 @@ NTRUHPS2048509Enc = NTRUParameters( n=508, q=2048, - Xe=NoiseDistribution.SparseTernary(508, 2048 / 16 - 1), - Xs=NoiseDistribution.UniformMod(3), + Xe=SparseTernary(2048 / 16 - 1), + Xs=UniformMod(3), m=508, tag="NTRUHPS2048509Enc", ) @@ -97,8 +97,8 @@ NTRUHPS2048677Enc = NTRUParameters( n=676, q=2048, - Xs=NoiseDistribution.UniformMod(3), - Xe=NoiseDistribution.SparseTernary(676, 2048 / 16 - 1), + Xs=UniformMod(3), + Xe=SparseTernary(2048 / 16 - 1), m=676, tag="NTRUHPS2048677Enc", ) @@ -106,8 +106,8 @@ NTRUHPS4096821Enc = NTRUParameters( n=820, q=4096, - Xs=NoiseDistribution.UniformMod(3), - Xe=NoiseDistribution.SparseTernary(820, 4096 / 16 - 1), + Xs=UniformMod(3), + Xe=SparseTernary(4096 / 16 - 1), m=820, tag="NTRUHPS4096821Enc", ) @@ -115,8 +115,8 @@ NTRUHRSS701Enc = NTRUParameters( n=700, q=8192, - Xs=NoiseDistribution.UniformMod(3), - Xe=NoiseDistribution.UniformMod(3), + Xs=UniformMod(3), + Xe=UniformMod(3), m=700, tag="NTRUHRSS701", ) @@ -214,8 +214,8 @@ Falcon512_SKR = NTRUParameters( n=512, q=12289, - Xs=NoiseDistribution.DiscreteGaussian(4.0532), - Xe=NoiseDistribution.DiscreteGaussian(4.0532), + Xs=DiscreteGaussian(4.0532), + Xe=DiscreteGaussian(4.0532), m=512, ntru_type='circulant', tag="Falcon512_SKR" @@ -233,8 +233,8 @@ Falcon1024_SKR = NTRUParameters( n=1024, q=12289, - Xs=NoiseDistribution.DiscreteGaussian(2.866), - Xe=NoiseDistribution.DiscreteGaussian(2.866), + Xs=DiscreteGaussian(2.866), + Xe=DiscreteGaussian(2.866), m=1024, ntru_type='circulant', tag="Falcon1024_SKR" @@ -246,8 +246,8 @@ Frodo640 = LWEParameters( n=640, q=2**15, - Xs=NoiseDistribution.DiscreteGaussian(2.8), - Xe=NoiseDistribution.DiscreteGaussian(2.8), + Xs=DiscreteGaussian(2.8), + Xe=DiscreteGaussian(2.8), m=640 + 16, tag="Frodo640", ) @@ -255,8 +255,8 @@ Frodo976 = LWEParameters( n=976, q=2**16, - Xs=NoiseDistribution.DiscreteGaussian(2.3), - Xe=NoiseDistribution.DiscreteGaussian(2.3), + Xs=DiscreteGaussian(2.3), + Xe=DiscreteGaussian(2.3), m=976 + 16, tag="Frodo976", ) @@ -264,8 +264,8 @@ Frodo1344 = LWEParameters( n=1344, q=2**16, - Xs=NoiseDistribution.DiscreteGaussian(1.4), - Xe=NoiseDistribution.DiscreteGaussian(1.4), + Xs=DiscreteGaussian(1.4), + Xe=DiscreteGaussian(1.4), m=1344 + 16, tag="Frodo1344", ) @@ -275,8 +275,8 @@ HESv111024128error = LWEParameters( n=1024, q=2**27, - Xs=NoiseDistribution.DiscreteGaussian(3.0), - Xe=NoiseDistribution.DiscreteGaussian(3.0), + Xs=DiscreteGaussian(3.0), + Xe=DiscreteGaussian(3.0), m=1024, tag="HESv11error", ) @@ -284,8 +284,8 @@ HESv111024128ternary = LWEParameters( n=1024, q=2**27, - Xs=NoiseDistribution.UniformMod(3), - Xe=NoiseDistribution.DiscreteGaussian(3.0), + Xs=UniformMod(3), + Xe=DiscreteGaussian(3.0), m=1024, tag="HESv11ternary", ) @@ -301,16 +301,16 @@ TFHE630 = LWEParameters( n=630, q=2**32, - Xs=NoiseDistribution.UniformMod(2), - Xe=NoiseDistribution.DiscreteGaussian(stddev=2 ** (-15) * 2**32), + Xs=Binary, + Xe=DiscreteGaussian(stddev=2 ** (-15) * 2**32), tag="TFHE630", ) # - Bootstrapping key (Ring-LWE) TFHE1024 = LWEParameters( n=1024, q=2**32, - Xs=NoiseDistribution.UniformMod(2), - Xe=NoiseDistribution.DiscreteGaussian(stddev=2 ** (-25) * 2**32), + Xs=Binary, + Xe=DiscreteGaussian(stddev=2 ** (-25) * 2**32), tag="TFHE1024", ) @@ -322,16 +322,16 @@ Concrete_TFHE586 = LWEParameters( n=586, q=2**32, - Xs=NoiseDistribution.UniformMod(2), - Xe=NoiseDistribution.DiscreteGaussian(stddev=2 ** (-13.4) * 2**32), + Xs=Binary, + Xe=DiscreteGaussian(stddev=2 ** (-13.4) * 2**32), tag="Concrete_TFHE586", ) # - Bootstrapping key (Ring-LWE) Concrete_TFHE512 = LWEParameters( n=512, q=2**32, - Xs=NoiseDistribution.UniformMod(2), - Xe=NoiseDistribution.DiscreteGaussian(stddev=2 ** (-24.8) * 2**32), + Xs=Binary, + Xe=DiscreteGaussian(stddev=2 ** (-24.8) * 2**32), tag="Concrete_TFHE512", ) @@ -341,16 +341,16 @@ TFHE16_500 = LWEParameters( n=500, q=2**32, - Xs=NoiseDistribution.UniformMod(2), - Xe=NoiseDistribution.DiscreteGaussian(stddev=2.43 * 10 ** (-5) * 2**32), + Xs=Binary, + Xe=DiscreteGaussian(stddev=2.43 * 10 ** (-5) * 2**32), tag="TFHE16_500", ) TFHE16_1024 = LWEParameters( n=1024, q=2**32, - Xs=NoiseDistribution.UniformMod(2), - Xe=NoiseDistribution.DiscreteGaussian(stddev=3.73 * 10 ** (-9) * 2**32), + Xs=Binary, + Xe=DiscreteGaussian(stddev=3.73 * 10 ** (-9) * 2**32), tag="TFHE16_1024", ) @@ -359,16 +359,16 @@ TFHE20_612 = LWEParameters( n=612, q=2**32, - Xs=NoiseDistribution.UniformMod(2), - Xe=NoiseDistribution.DiscreteGaussian(stddev=2 ** (-15) * 2**32), + Xs=Binary, + Xe=DiscreteGaussian(stddev=2 ** (-15) * 2**32), tag="TFHE20_612", ) TFHE20_1024 = LWEParameters( n=1024, q=2**32, - Xs=NoiseDistribution.UniformMod(2), - Xe=NoiseDistribution.DiscreteGaussian(stddev=2 ** (-26) * 2**32), + Xs=Binary, + Xe=DiscreteGaussian(stddev=2 ** (-26) * 2**32), tag="TFHE20_1024", ) @@ -379,8 +379,8 @@ FHEW = LWEParameters( n=500, q=2**32, - Xs=NoiseDistribution.UniformMod(2), - Xe=NoiseDistribution.DiscreteGaussian(stddev=2 ** (-15) * 2**32), + Xs=Binary, + Xe=DiscreteGaussian(stddev=2 ** (-15) * 2**32), tag="FHEW", ) @@ -393,40 +393,40 @@ SEAL20_1024 = LWEParameters( n=1024, q=2**48 - 2**20 + 1, - Xs=NoiseDistribution.UniformMod(3), - Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19), + Xs=UniformMod(3), + Xe=DiscreteGaussian(stddev=3.19), tag="SEAL20_1024", ) SEAL20_2048 = LWEParameters( n=2048, q=2**94 - 2**20 + 1, - Xs=NoiseDistribution.UniformMod(3), - Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19), + Xs=UniformMod(3), + Xe=DiscreteGaussian(stddev=3.19), tag="SEAL20_2048", ) SEAL20_4096 = LWEParameters( n=4096, q=2**190 - 2**30 + 1, - Xs=NoiseDistribution.UniformMod(3), - Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19), + Xs=UniformMod(3), + Xe=DiscreteGaussian(stddev=3.19), tag="SEAL20_4096", ) SEAL20_8192 = LWEParameters( n=8192, q=2**383 - 2**33 + 1, - Xs=NoiseDistribution.UniformMod(3), - Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19), + Xs=UniformMod(3), + Xe=DiscreteGaussian(stddev=3.19), tag="SEAL20_8192", ) SEAL20_16384 = LWEParameters( n=16384, q=2**767 - 2**56 + 1, - Xs=NoiseDistribution.UniformMod(3), - Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19), + Xs=UniformMod(3), + Xe=DiscreteGaussian(stddev=3.19), tag="SEAL20_16384", ) @@ -437,40 +437,40 @@ SEAL22_2048 = LWEParameters( n=2048, q=2**60 - 2**14 + 1, - Xs=NoiseDistribution.UniformMod(3), - Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19), + Xs=UniformMod(3), + Xe=DiscreteGaussian(stddev=3.19), tag="SEAL22_2048", ) SEAL22_4096 = LWEParameters( n=4096, q=2**116 - 2**18 + 1, - Xs=NoiseDistribution.UniformMod(3), - Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19), + Xs=UniformMod(3), + Xe=DiscreteGaussian(stddev=3.19), tag="SEAL22_4096", ) SEAL22_8192 = LWEParameters( n=8192, q=2**226 - 2**26 + 1, - Xs=NoiseDistribution.UniformMod(3), - Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19), + Xs=UniformMod(3), + Xe=DiscreteGaussian(stddev=3.19), tag="SEAL22_8192", ) SEAL22_16384 = LWEParameters( n=16384, q=2**435 - 2**33 + 1, - Xs=NoiseDistribution.UniformMod(3), - Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19), + Xs=UniformMod(3), + Xe=DiscreteGaussian(stddev=3.19), tag="SEAL22_16384", ) SEAL22_32768 = LWEParameters( n=32768, q=2**889 - 2**54 - 2**53 - 2**52 + 1, - Xs=NoiseDistribution.UniformMod(3), - Xe=NoiseDistribution.DiscreteGaussian(stddev=3.19), + Xs=UniformMod(3), + Xe=DiscreteGaussian(stddev=3.19), tag="SEAL22_32768", ) @@ -485,24 +485,24 @@ HElib80_1024 = LWEParameters( n=1024, q=2**47, - Xs=NoiseDistribution.SparseTernary(n=1024, p=32), - Xe=NoiseDistribution.DiscreteGaussian(stddev=3.2), + Xs=SparseTernary(32), + Xe=DiscreteGaussian(stddev=3.2), tag="HElib80_1024", ) HElib80_2048 = LWEParameters( n=2048, q=2**87, - Xs=NoiseDistribution.SparseTernary(n=2048, p=32), - Xe=NoiseDistribution.DiscreteGaussian(stddev=3.2), + Xs=SparseTernary(32), + Xe=DiscreteGaussian(stddev=3.2), tag="HElib80_2048", ) HElib80_4096 = LWEParameters( n=4096, q=2**167, - Xs=NoiseDistribution.SparseTernary(n=4096, p=32), - Xe=NoiseDistribution.DiscreteGaussian(stddev=3.2), + Xs=SparseTernary(32), + Xe=DiscreteGaussian(stddev=3.2), tag="HElib80_4096", ) @@ -511,24 +511,24 @@ HElib120_1024 = LWEParameters( n=1024, q=2**38, - Xs=NoiseDistribution.SparseTernary(n=1024, p=32), - Xe=NoiseDistribution.DiscreteGaussian(stddev=3.2), + Xs=SparseTernary(32), + Xe=DiscreteGaussian(stddev=3.2), tag="HElib80_1024", ) HElib120_2048 = LWEParameters( n=2048, q=2**70, - Xs=NoiseDistribution.SparseTernary(n=2048, p=32), - Xe=NoiseDistribution.DiscreteGaussian(stddev=3.2), + Xs=SparseTernary(32), + Xe=DiscreteGaussian(stddev=3.2), tag="HElib80_2048", ) HElib120_4096 = LWEParameters( n=4096, q=2**134, - Xs=NoiseDistribution.SparseTernary(n=4096, p=32), - Xe=NoiseDistribution.DiscreteGaussian(stddev=3.2), + Xs=SparseTernary(32), + Xe=DiscreteGaussian(stddev=3.2), tag="HElib80_4096", ) @@ -540,39 +540,39 @@ CHHS_1024_25 = LWEParameters( n=1024, q=2**25, - Xs=NoiseDistribution.SparseTernary(n=1024, p=32), - Xe=NoiseDistribution.DiscreteGaussian(stddev=stddevf(8)), + Xs=SparseTernary(32), + Xe=DiscreteGaussian(stddev=stddevf(8)), tag="CHHS_1024_25", ) CHHS_2048_38 = LWEParameters( n=2048, q=2**38, - Xs=NoiseDistribution.SparseTernary(n=2048, p=32), - Xe=NoiseDistribution.DiscreteGaussian(stddev=stddevf(8)), + Xs=SparseTernary(32), + Xe=DiscreteGaussian(stddev=stddevf(8)), tag="CHHS_2048_38", ) CHHS_2048_45 = LWEParameters( n=2048, q=2**45, - Xs=NoiseDistribution.SparseTernary(n=2048, p=32), - Xe=NoiseDistribution.DiscreteGaussian(stddev=stddevf(8)), + Xs=SparseTernary(32), + Xe=DiscreteGaussian(stddev=stddevf(8)), tag="CHHS_2048_45", ) CHHS_4096_67 = LWEParameters( n=4096, q=2**67, - Xs=NoiseDistribution.SparseTernary(n=4096, p=32), - Xe=NoiseDistribution.DiscreteGaussian(stddev=stddevf(8)), + Xs=SparseTernary(32), + Xe=DiscreteGaussian(stddev=stddevf(8)), tag="CHHS_4096_67", ) CHHS_4096_82 = LWEParameters( n=4096, q=2**82, - Xs=NoiseDistribution.SparseTernary(n=4096, p=32), - Xe=NoiseDistribution.DiscreteGaussian(stddev=stddevf(8)), + Xs=SparseTernary(32), + Xe=DiscreteGaussian(stddev=stddevf(8)), tag="CHHS_4096_82", ) diff --git a/estimator/util.py b/estimator/util.py index 3ecf34c..f726981 100644 --- a/estimator/util.py +++ b/estimator/util.py @@ -443,8 +443,9 @@ def batch_estimate(params, algorithm, jobs=1, log_level=0, catch_exceptions=True >>> from estimator import LWE >>> from estimator.schemes import Kyber512 - >>> _ = batch_estimate(Kyber512, [LWE.primal_usvp, LWE.primal_bdd]) - >>> _ = batch_estimate(Kyber512, [LWE.primal_usvp, LWE.primal_bdd], jobs=2) + >>> from estimator.util import batch_estimate + >>> _ = batch_estimate(Kyber512, [LWE.primal_usvp, LWE.primal_bdd], log_level=1) + >>> _ = batch_estimate(Kyber512, [LWE.primal_usvp, LWE.primal_bdd], jobs=2, log_level=1) """ diff --git a/param_sweep.py b/param_sweep.py index a541b5f..6d22399 100644 --- a/param_sweep.py +++ b/param_sweep.py @@ -22,9 +22,8 @@ import numpy as np from matplotlib import pyplot as plt -from estimator import LWE +from estimator import ND, LWE from estimator.io import Logging -from estimator.nd import NoiseDistribution as ND class ParameterSweep: @@ -71,7 +70,7 @@ def parameter_sweep( EXAMPLE :: - >>> from estimator import LWE, nd + >>> from estimator import LWE, ND >>> from param_sweep import ParameterSweep as PS >>> n_list = [600, 900] >>> e_list = [7, 9] @@ -81,7 +80,7 @@ def parameter_sweep( e=e_list,\ s=2,\ s_log=False,\ - Xs=nd.NoiseDistribution.UniformMod,\ + Xs=ND.UniformMod,\ f=LWE.estimate.rough,\ tag='test',\ log_level=2,\