Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Change memory usage of dual attack #89

Merged
merged 8 commits into from
Oct 31, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ Quick Start
bdd :: rop: ≈2^140.3, red: ≈2^139.7, svp: ≈2^138.8, β: 391, η: 421, d: 1013, tag: bdd
bdd_hybrid :: rop: ≈2^140.3, red: ≈2^139.7, svp: ≈2^138.8, β: 391, η: 421, ζ: 0, |S|: 1, d: 1016, prob: 1, ↻: 1, tag: hybrid
bdd_mitm_hybrid :: rop: ≈2^260.3, red: ≈2^259.4, svp: ≈2^259.3, β: 405, η: 2, ζ: 102, |S|: ≈2^247.2, d: 923, prob: ≈2^-113.8, ↻: ≈2^116.0, tag: hybrid
dual :: rop: ≈2^149.9, mem: ≈2^88.0, m: 512, β: 424, d: 1024, ↻: 1, tag: dual
dual :: rop: ≈2^149.9, mem: ≈2^97.1, m: 512, β: 424, d: 1024, ↻: 1, tag: dual
dual_hybrid :: rop: ≈2^145.6, mem: ≈2^140.5, m: 512, β: 408, d: 1004, ↻: 1, ζ: 20, tag: dual_hybrid

- `Try it in your browser <https://mybinder.org/v2/gh/malb/lattice-estimator/jupyter-notebooks?labpath=..%2F..%2Ftree%2Fprompt.ipynb>`__.
Expand Down Expand Up @@ -95,6 +95,7 @@ At present, this estimator is maintained by Martin Albrecht. Contributors are:
- Hamish Hunt
- James Owen
- Léo Ducas
- Ludo Pulles
- Markus Schmidt
- Martin Albrecht
- Michael Walter
Expand Down
2 changes: 1 addition & 1 deletion estimator/gb.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ def __call__(
.. [EPRINT:ACFP14] Martin R. Albrecht, Carlos Cid, Jean-Charles Faugère & Ludovic Perret. (2014).
Algebraic algorithms for LWE. https://eprint.iacr.org/2014/1018

.. [ICALP:AroGe11] Sanjeev Aror & Rong Ge. (2011). New algorithms for learning in presence of
.. [ICALP:AroGe11] Sanjeev Arora & Rong Ge. (2011). New algorithms for learning in presence of
errors. In L. Aceto, M. Henzinger, & J. Sgall, ICALP 2011, Part I (pp. 403–415).:
Springer, Heidelberg.
"""
Expand Down
2 changes: 1 addition & 1 deletion estimator/lwe.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def __call__(
bdd :: rop: ≈2^140.3, red: ≈2^139.7, svp: ≈2^138.8, β: 391, η: 421, d: 1013, tag: bdd
bdd_hybrid :: rop: ≈2^140.3, red: ≈2^139.7, svp: ≈2^138.8, β: 391, η: 421, ζ: 0, |S|: 1, ...
bdd_mitm_hybrid :: rop: ≈2^260.3, red: ≈2^259.4, svp: ≈2^259.3, β: 405, η: 2, ζ: 102, |S|: ≈2^247.2,...
dual :: rop: ≈2^149.9, mem: ≈2^88.0, m: 512, β: 424, d: 1024, ↻: 1, tag: dual
dual :: rop: ≈2^149.9, mem: ≈2^97.1, m: 512, β: 424, d: 1024, ↻: 1, tag: dual
dual_hybrid :: rop: ≈2^145.6, mem: ≈2^140.5, m: 512, β: 408, d: 1004, ↻: 1, ζ: 20, tag: dual_hybrid

"""
Expand Down
66 changes: 32 additions & 34 deletions estimator/lwe_dual.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,6 @@ def cost(
t: int = 0,
success_probability: float = 0.99,
red_cost_model=red_cost_model_default,
use_lll=True,
log_level=None,
):
"""
Expand All @@ -132,7 +131,6 @@ def cost(
:param h1: Number of non-zero components of the secret of the new LWE instance
:param success_probability: The success probability to target
:param red_cost_model: How to cost lattice reduction
:param use_lll: Use LLL calls to produce more small vectors

.. note :: This function assumes that the instance is normalized. It runs no optimization,
it merely reports costs.
Expand All @@ -154,22 +152,23 @@ def cost(
cost = DualHybrid.fft_solver(params_slv, success_probability, t)
else:
cost = solver(params_slv, success_probability)

Logging.log("dual", log_level + 2, f"solve: {cost!r}")
cost["beta"] = beta

if cost["rop"] == oo or cost["m"] == oo:
cost["beta"] = beta
return cost

d = m_ + params.n - zeta
cost_red = red_cost_model.short_vectors(beta, d, cost["m"])[1]
_, cost_red, N, sieve_dim = red_cost_model.short_vectors(beta, d, cost["m"])
Logging.log("dual", log_level + 2, f"red: {Cost(rop=cost_red)!r}")

# Add the runtime cost of sieving in dimension `sieve_dim` possibly multiple times.
cost["rop"] += cost_red

# Add the memory cost of storing the `N` dual vectors, using `sieve_dim` many coefficients
# (mod q) to represent them. Note that short dual vectors may actually be described by less
# bits because its coefficients are generally small, so this is really an upper bound here.
cost["mem"] += sieve_dim * N
cost["m"] = m_
cost["beta"] = beta
if t:
cost["t"] = t

if d < params.n - zeta:
raise RuntimeError(f"{d} < {params.n - zeta}, {params.n}, {zeta}, {m_}")
Expand Down Expand Up @@ -202,6 +201,7 @@ def fft_solver(params, success_probability, t=0):
- ``rop``: Total number of word operations (≈ CPU cycles).
- ``mem``: memory requirement in integers mod q.
- ``m``: Required number of samples to distinguish the correct solution with high probability.
- ``t``: the number of secret coordinates to guess mod 2.

.. note :: The parameter t only makes sense in the context of the dual attack,
which is why this function is here and not in the lwe_guess module.
Expand All @@ -220,6 +220,10 @@ def fft_solver(params, success_probability, t=0):
return Cost(rop=oo, mem=oo, m=1)

sigma = params.Xe.stddev / params.q

# Here, assume the Independence Heuristic, cf. [ia.cr/2023/302].
# The minimal number of short dual vectors that is required to distinguish the correct
# guess with probability at least `probability`:
m_required = RR(
4
* exp(4 * pi * pi * sigma * sigma)
Expand All @@ -230,12 +234,19 @@ def fft_solver(params, success_probability, t=0):
raise InsufficientSamplesError(
f"Exhaustive search: Need {m_required} samples but only {params.m} available."
)
else:
m = m_required

cost = size * (m + t * size_fft)
# Running a fast Walsh--Hadamard transform takes time proportional to t 2^t.
runtime_cost = size * (t * size_fft)
# Add the cost of updating the FFT tables for all of the enumeration targets.
# Use "Efficient Updating of the FFT Input", [MATZOV, §5.4]:
runtime_cost += size * (4 * m_required)

return Cost(rop=cost, mem=cost, m=m)
# This is the number of entries the table should have. Note that it should support
# (floating point) numbers in the range [-N, N], if ``N`` is the number of dual vectors.
# However 32-bit floats are good enough in practice.
memory_cost = size_fft

return Cost(rop=runtime_cost, mem=memory_cost, m=m_required, t=t)

@staticmethod
def optimize_blocksize(
Expand All @@ -245,7 +256,6 @@ def optimize_blocksize(
h1: int = 0,
success_probability: float = 0.99,
red_cost_model=red_cost_model_default,
use_lll=True,
log_level=5,
opt_step=8,
fft=False,
Expand All @@ -259,7 +269,6 @@ def optimize_blocksize(
:param h1: Number of non-zero components of the secret of the new LWE instance
:param success_probability: The success probability to target
:param red_cost_model: How to cost lattice reduction
:param use_lll: Use LLL calls to produce more small vectors
:param opt_step: control robustness of optimizer
:param fft: use the FFT distinguisher from [AC:GuoJoh21]_

Expand All @@ -275,7 +284,6 @@ def optimize_blocksize(
h1=h1,
success_probability=success_probability,
red_cost_model=red_cost_model,
use_lll=use_lll,
log_level=log_level,
)

Expand Down Expand Up @@ -316,7 +324,6 @@ def __call__(
params: LWEParameters,
success_probability: float = 0.99,
red_cost_model=red_cost_model_default,
use_lll=True,
opt_step=8,
log_level=1,
fft=False,
Expand All @@ -328,14 +335,13 @@ def __call__(
the cost function for the dual hybrid might only be convex in an approximate
sense, the parameter ``opt_step`` allows to make the optimization procedure more
robust against local irregularities (higher value) at the cost of a longer
running time. In a nutshell, if the cost of the dual hybrid seems suspiciosly
running time. In a nutshell, if the cost of the dual hybrid seems suspiciously
high, try a larger ``opt_step`` (e.g. 4 or 8).

:param solver: Algorithm for solving the reduced instance
:param params: LWE parameters
:param success_probability: The success probability to target
:param red_cost_model: How to cost lattice reduction
:param use_lll: use LLL calls to produce more small vectors [EC:Albrecht17]_
:param opt_step: control robustness of optimizer
:param fft: use the FFT distinguisher from [AC:GuoJoh21]_. (ignored for sparse secrets)

Expand Down Expand Up @@ -363,7 +369,7 @@ def __call__(
>>> from estimator import *
>>> params = LWE.Parameters(n=1024, q = 2**32, Xs=ND.Uniform(0,1), Xe=ND.DiscreteGaussian(3.0))
>>> LWE.dual(params)
rop: ≈2^107.0, mem: ≈2^58.0, m: 970, β: 264, d: 1994, ↻: 1, tag: dual
rop: ≈2^107.0, mem: ≈2^66.4, m: 970, β: 264, d: 1994, ↻: 1, tag: dual
>>> LWE.dual_hybrid(params)
rop: ≈2^103.2, mem: ≈2^97.4, m: 937, β: 250, d: 1919, ↻: 1, ζ: 42, tag: dual_hybrid
>>> LWE.dual_hybrid(params, mitm_optimization=True)
Expand All @@ -373,23 +379,23 @@ def __call__(

>>> params = params.updated(Xs=ND.SparseTernary(params.n, 32))
>>> LWE.dual(params)
rop: ≈2^103.4, mem: ≈2^55.4, m: 904, β: 251, d: 1928, ↻: 1, tag: dual
rop: ≈2^103.4, mem: ≈2^63.9, m: 904, β: 251, d: 1928, ↻: 1, tag: dual
>>> LWE.dual_hybrid(params)
rop: ≈2^92.1, mem: ≈2^78.2, m: 716, β: 170, d: 1464, ↻: 1989, ζ: 276, h1: 8, tag: dual_hybrid
>>> LWE.dual_hybrid(params, mitm_optimization=True)
rop: ≈2^98.2, mem: ≈2^78.6, m: 728, k: 292, ↻: ≈2^18.7, β: 180, d: 1267, ζ: 485, h1: 17, tag: ...

>>> params = params.updated(Xs=ND.CenteredBinomial(8))
>>> LWE.dual(params)
rop: ≈2^114.5, mem: ≈2^61.0, m: 1103, β: 291, d: 2127, ↻: 1, tag: dual
rop: ≈2^114.5, mem: ≈2^71.8, m: 1103, β: 291, d: 2127, ↻: 1, tag: dual
>>> LWE.dual_hybrid(params)
rop: ≈2^113.6, mem: ≈2^103.5, m: 1096, β: 288, d: 2110, ↻: 1, ζ: 10, tag: dual_hybrid
>>> LWE.dual_hybrid(params, mitm_optimization=True)
rop: ≈2^155.5, mem: ≈2^146.2, m: 1414, k: 34, ↻: 1, β: 438, d: 2404, ζ: 34, tag: dual_mitm_hybrid

>>> params = params.updated(Xs=ND.DiscreteGaussian(3.0))
>>> LWE.dual(params)
rop: ≈2^116.5, mem: ≈2^64.0, m: 1140, β: 298, d: 2164, ↻: 1, tag: dual
rop: ≈2^116.5, mem: ≈2^73.2, m: 1140, β: 298, d: 2164, ↻: 1, tag: dual
>>> LWE.dual_hybrid(params)
rop: ≈2^116.2, mem: ≈2^100.4, m: 1137, β: 297, d: 2155, ↻: 1, ζ: 6, tag: dual_hybrid
>>> LWE.dual_hybrid(params, mitm_optimization=True)
Expand All @@ -399,10 +405,11 @@ def __call__(
rop: ≈2^131.7, mem: ≈2^128.5, m: 436, β: 358, d: 906, ↻: 1, ζ: 38, tag: dual_hybrid

>>> LWE.dual(schemes.CHHS_4096_67)
rop: ≈2^206.9, mem: ≈2^126.0, m: ≈2^11.8, β: 616, d: 7779, ↻: 1, tag: dual
rop: ≈2^206.9, mem: ≈2^137.5, m: ≈2^11.8, β: 616, d: 7779, ↻: 1, tag: dual

>>> LWE.dual_hybrid(schemes.Kyber512, red_cost_model=RC.GJ21, fft=True)
rop: ≈2^149.6, mem: ≈2^145.7, m: 510, β: 399, t: 76, d: 1000, ↻: 1, ζ: 22, tag: dual_hybrid
rop: ≈2^149.8, mem: ≈2^92.1, m: 510, t: 76, β: 399, d: 1000, ↻: 1, ζ: 22, tag: dual_hybrid

"""

Cost.register_impermanent(
Expand Down Expand Up @@ -430,7 +437,6 @@ def _optimize_blocksize(
zeta: int = 0,
success_probability: float = 0.99,
red_cost_model=red_cost_model_default,
use_lll=True,
log_level=None,
fft=False,
):
Expand All @@ -450,7 +456,6 @@ def _optimize_blocksize(
zeta=zeta,
success_probability=success_probability,
red_cost_model=red_cost_model,
use_lll=use_lll,
log_level=log_level + 2,
)
it.update(cost)
Expand All @@ -465,7 +470,6 @@ def _optimize_blocksize(
params=params,
success_probability=success_probability,
red_cost_model=red_cost_model,
use_lll=use_lll,
log_level=log_level + 1,
fft=fft,
)
Expand All @@ -488,15 +492,13 @@ def dual(
params: LWEParameters,
success_probability: float = 0.99,
red_cost_model=red_cost_model_default,
use_lll=True,
):
"""
Dual hybrid attack as in [PQCBook:MicReg09]_.

:param params: LWE parameters.
:param success_probability: The success probability to target.
:param red_cost_model: How to cost lattice reduction.
:param use_lll: use LLL calls to produce more small vectors [EC:Albrecht17]_.

The returned cost dictionary has the following entries:

Expand Down Expand Up @@ -527,7 +529,6 @@ def dual(
h1=0,
success_probability=success_probability,
red_cost_model=red_cost_model,
use_lll=use_lll,
log_level=1,
)
del ret["zeta"]
Expand All @@ -541,7 +542,6 @@ def dual_hybrid(
params: LWEParameters,
success_probability: float = 0.99,
red_cost_model=red_cost_model_default,
use_lll=True,
mitm_optimization=False,
opt_step=8,
fft=False,
Expand All @@ -552,7 +552,6 @@ def dual_hybrid(
:param params: LWE parameters.
:param success_probability: The success probability to target.
:param red_cost_model: How to cost lattice reduction.
:param use_lll: Use LLL calls to produce more small vectors [EC:Albrecht17]_.
:param mitm_optimization: One of "analytical" or "numerical". If ``True`` a default from the
``conf`` module is picked, ``False`` disables MITM.
:param opt_step: Control robustness of optimizer.
Expand Down Expand Up @@ -586,7 +585,6 @@ def dual_hybrid(
params=params,
success_probability=success_probability,
red_cost_model=red_cost_model,
use_lll=use_lll,
opt_step=opt_step,
fft=fft,
)
Expand Down
16 changes: 7 additions & 9 deletions estimator/lwe_guess.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,15 +195,13 @@ def __call__(self, params: LWEParameters, success_probability=0.99, quantum: boo
raise InsufficientSamplesError(
f"Exhaustive search: Need {m_required} samples but only {params.m} available."
)
else:
m = m_required

# we can compute A*s for all candidate s in time 2*size*m using
# (the generalization [ia.cr/2021/152] of) the recursive algorithm
# from [ia.cr/2020/515]
cost = 2 * size * m
cost = 2 * size * m_required

ret = Cost(rop=cost, mem=cost / 2, m=m)
ret = Cost(rop=cost, mem=cost / 2, m=m_required)
return ret.sanity_check()

__name__ = "exhaustive_search"
Expand Down Expand Up @@ -253,15 +251,15 @@ def mitm_analytical(self, params: LWEParameters, success_probability=0.99):
success_probability_ = 1.0
logT = k * log(sd_rng, 2)

m_ = max(1, round(logT + log2(logT)))
if params.m < m_:
m_required = max(1, round(logT + log2(logT)))
if params.m < m_required:
raise InsufficientSamplesError(
f"MITM: Need {m_} samples but only {params.m} available."
f"MITM: Need {m_required} samples but only {params.m} available."
)

# since m = logT + loglogT and rop = T*m, we have rop=2^m
ret = Cost(rop=RR(2**m_), mem=2**logT * m_, m=m_, k=ZZ(k))
repeat = prob_amplify(success_probability, sd_p**n * nd_p**m_ * success_probability_)
ret = Cost(rop=RR(2**m_required), mem=2**logT * m_required, m=m_required, k=ZZ(k))
repeat = prob_amplify(success_probability, sd_p**n * nd_p**m_required * success_probability_)
return ret.repeat(times=repeat)

def cost(
Expand Down
2 changes: 1 addition & 1 deletion estimator/prob.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def amplify(target_success_probability, success_probability, majority=False):

:param target_success_probability: targeted success probability < 1
:param success_probability: targeted success probability < 1
:param majority: if `True` amplify a deicsional problem, not a computational one
:param majority: if `True` amplify a decisional problem, not a computational one
if `False` then we assume that we can check solutions, so one success suffices

:returns: number of required trials to amplify
Expand Down
Loading
Loading