Skip to content

Commit

Permalink
Cherry pick latest changes from #1179 (#1351)
Browse files Browse the repository at this point in the history
  • Loading branch information
lehins authored Dec 27, 2024
2 parents a50e092 + f2d134d commit f393abe
Show file tree
Hide file tree
Showing 10 changed files with 146 additions and 114 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,12 @@ module Test.Consensus.Genesis.Setup.GenChains (
, genChainsWithExtraHonestPeers
) where

import Cardano.Slotting.Time (SlotLength, getSlotLength,
slotLengthFromSec)
import Cardano.Slotting.Time (slotLengthFromSec)
import Control.Monad (replicateM)
import qualified Control.Monad.Except as Exn
import Data.List as List (foldl')
import Data.Proxy (Proxy (..))
import Data.Time.Clock (DiffTime, secondsToDiffTime)
import Data.Time.Clock (DiffTime)
import qualified Data.Vector.Unboxed as Vector
import Data.Word (Word8)
import Ouroboros.Consensus.Block.Abstract hiding (Header)
Expand Down Expand Up @@ -110,7 +109,7 @@ genChains = genChainsWithExtraHonestPeers (pure 0)
-- However, in the future it could also be used to generate "short forks" near the tip of the trunk.
genChainsWithExtraHonestPeers :: QC.Gen Word -> QC.Gen Word -> QC.Gen (GenesisTest TestBlock ())
genChainsWithExtraHonestPeers genNumExtraHonest genNumForks = do
(asc, honestRecipe, someHonestChainSchema) <- genHonestChainSchema
(_, honestRecipe, someHonestChainSchema) <- genHonestChainSchema

H.SomeHonestChainSchema _ _ honestChainSchema <- pure someHonestChainSchema
let ChainSchema _ vH = honestChainSchema
Expand All @@ -128,8 +127,8 @@ genChainsWithExtraHonestPeers genNumExtraHonest genNumForks = do
gtGenesisWindow = GenesisWindow (fromIntegral scg),
gtForecastRange = ForecastRange (fromIntegral scg), -- REVIEW: Do we want to generate those randomly?
gtDelay = delta,
gtSlotLength,
gtChainSyncTimeouts = chainSyncTimeouts gtSlotLength asc,
gtSlotLength = slotLengthFromSec 20,
gtChainSyncTimeouts = chainSyncTimeouts,
gtBlockFetchTimeouts = blockFetchTimeouts,
gtLoPBucketParams = LoPBucketParams { lbpCapacity = 50, lbpRate = 10 },
-- These values give little enough leeway (5s) so that some adversaries get disconnected
Expand All @@ -143,8 +142,6 @@ genChainsWithExtraHonestPeers genNumExtraHonest genNumForks = do
}

where
gtSlotLength = slotLengthFromSec 20

genAdversarialFragment :: [TestBlock] -> Int -> (Int, [S]) -> AnchoredFragment TestBlock
genAdversarialFragment goodBlocks forkNo (prefixCount, slotsA)
= mkTestFragment (mkTestBlocks prefix slotsA forkNo)
Expand All @@ -169,11 +166,8 @@ genChainsWithExtraHonestPeers genNumExtraHonest genNumForks = do
incSlot :: SlotNo -> TestBlock -> TestBlock
incSlot n b = b { tbSlot = tbSlot b + n }

chainSyncTimeouts ::
SlotLength ->
Asc ->
ChainSyncTimeout
chainSyncTimeouts t f =
chainSyncTimeouts :: ChainSyncTimeout
chainSyncTimeouts =
ChainSyncTimeout
{ canAwaitTimeout,
intersectTimeout,
Expand All @@ -186,21 +180,22 @@ chainSyncTimeouts t f =
intersectTimeout :: Maybe DiffTime
intersectTimeout = shortWait
idleTimeout :: Maybe DiffTime
idleTimeout = Just 3673 -- taken from Ouroboros.Consensus.Node.stdChainSyncTimeout
-- | The following timeout is derived from the average length of a streak of
-- empty slots. If the probability of the election of a leader is @f@ and
-- @Y@ is a probability, then a streak of empty slots will be shorter than
-- @log (1 - Y) / log (1 - f)@ with probability @Y@. Main net nodes pick a
-- random value for @Y@ between 99.9% and 99.999%. For our use case, we
-- choose the tightest bound of 99.9%.
-- | The default from 'Ouroboros.Consensus.Node.stdChainSyncTimeout' is
-- 3673s, which is virtually infinite, so let us make it actually infinite
-- for our test environment.
idleTimeout = Nothing
-- | The 'mustReplyTimeout' must be disabled in our context, because the
-- chains are finite, and therefore an honest peer can only serve it all,
-- then send 'MsgAwaitReply' (therefore entering 'StMustReply'), and then
-- stall forever, and it must not be killed for it.
--
-- Note that this allows the adversaries to stall us forever in that same
-- situation. However, that peer is only allowed to send 'MsgAwaitReply'
-- when they have served their tip, which leaves them fully vulnerable to
-- the Genesis Density Disconnection (GDD) logic. A bug related to this
-- disabled timeout is in fact either a bug in the GDD or in the tests.
mustReplyTimeout :: Maybe DiffTime
mustReplyTimeout =
Just $
secondsToDiffTime $
round $
realToFrac (getSlotLength t)
* log (1 - 0.999)
/ log (1 - ascVal f)
mustReplyTimeout = Nothing

blockFetchTimeouts :: BlockFetchTimeout
blockFetchTimeouts =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,7 @@ prop_leashingAttackStalling :: Property
prop_leashingAttackStalling =
forAllGenesisTest

(disableBoringTimeouts <$> genChains (QC.choose (1, 4)) `enrichedWith` genLeashingSchedule)
(genChains (QC.choose (1, 4)) `enrichedWith` genLeashingSchedule)

defaultSchedulerConfig
{ scTrace = False
Expand Down Expand Up @@ -260,9 +260,7 @@ prop_leashingAttackTimeLimited :: Property
prop_leashingAttackTimeLimited =
forAllGenesisTest

(disableCanAwaitTimeout . disableBoringTimeouts <$>
genChains (QC.choose (1, 4)) `enrichedWith` genTimeLimitedSchedule
)
(genChains (QC.choose (1, 4)) `enrichedWith` genTimeLimitedSchedule)

defaultSchedulerConfig
{ scTrace = False
Expand Down Expand Up @@ -336,15 +334,6 @@ prop_leashingAttackTimeLimited =
fromTipPoint (t, ScheduleTipPoint bp) = Just (t, bp)
fromTipPoint _ = Nothing

disableCanAwaitTimeout :: GenesisTest blk schedule -> GenesisTest blk schedule
disableCanAwaitTimeout gt =
gt
{ gtChainSyncTimeouts =
(gtChainSyncTimeouts gt)
{ canAwaitTimeout = Nothing
}
}

headCallStack :: HasCallStack => [a] -> a
headCallStack = \case
x:_ -> x
Expand Down Expand Up @@ -398,7 +387,7 @@ prop_loeStalling =
prop_downtime :: Property
prop_downtime = forAllGenesisTest

(disableBoringTimeouts <$> genChains (QC.choose (1, 4)) `enrichedWith` \ gt ->
(genChains (QC.choose (1, 4)) `enrichedWith` \ gt ->
ensureScheduleDuration gt <$> stToGen (uniformPoints (pointsGeneratorParams gt) (gtBlockTree gt)))

defaultSchedulerConfig
Expand Down Expand Up @@ -434,7 +423,7 @@ prop_downtime = forAllGenesisTest
prop_blockFetchLeashingAttack :: Property
prop_blockFetchLeashingAttack =
forAllGenesisTest
(disableBoringTimeouts <$> genChains (pure 0) `enrichedWith` genBlockFetchLeashingSchedule)
(genChains (pure 0) `enrichedWith` genBlockFetchLeashingSchedule)
defaultSchedulerConfig
{ scEnableLoE = True,
scEnableLoP = True,
Expand Down Expand Up @@ -481,13 +470,3 @@ prop_blockFetchLeashingAttack =
-- adversarial peer.
addGracePeriodDelay :: Int -> Time -> Time
addGracePeriodDelay adversaryCount = addTime (fromIntegral adversaryCount * 10)

disableBoringTimeouts :: GenesisTest blk schedule -> GenesisTest blk schedule
disableBoringTimeouts gt =
gt
{ gtChainSyncTimeouts =
(gtChainSyncTimeouts gt)
{ mustReplyTimeout = Nothing
, idleTimeout = Nothing
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ runChainSyncClient
res <-
try $
runPipelinedPeerWithLimits
nullTracer
(Tracer $ traceWith tracer . TraceChainSyncSendRecvEvent peerId "Client")
codecChainSyncId
chainSyncNoSizeLimits
(timeLimitsChainSync chainSyncTimeouts)
Expand Down Expand Up @@ -218,8 +218,8 @@ runChainSyncServer ::
ChainSyncServer (Header blk) (Point blk) (Tip blk) m () ->
Channel m (AnyMessage (ChainSync (Header blk) (Point blk) (Tip blk))) ->
m ()
runChainSyncServer _tracer peerId StateViewTracers {svtPeerSimulatorResultsTracer} server channel =
(try $ runPeer nullTracer codecChainSyncId channel (chainSyncServerPeer server)) >>= \case
runChainSyncServer tracer peerId StateViewTracers {svtPeerSimulatorResultsTracer} server channel =
(try $ runPeer sendRecvTracer codecChainSyncId channel (chainSyncServerPeer server)) >>= \case
Right ((), msgRes) -> traceWith svtPeerSimulatorResultsTracer $
PeerSimulatorResult peerId $ SomeChainSyncServerResult $ Right msgRes
Left exn -> do
Expand All @@ -228,3 +228,5 @@ runChainSyncServer _tracer peerId StateViewTracers {svtPeerSimulatorResultsTrace
-- NOTE: here we are able to trace exceptions, as what is done in `runChainSyncClient`
case fromException exn of
(_ :: Maybe SomeException) -> pure ()
where
sendRecvTracer = Tracer $ traceWith tracer . TraceChainSyncSendRecvEvent peerId "Server"
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,16 @@ module Test.Consensus.PeerSimulator.Tests.LinkedThreads (tests) where
import Control.Monad.Class.MonadAsync (AsyncCancelled (..))
import Control.Monad.Class.MonadTime.SI (Time (Time))
import Data.Functor (($>))
import Data.Maybe (fromJust)
import Ouroboros.Consensus.Util.IOLike (DiffTime, fromException)
import Ouroboros.Consensus.Util.IOLike (fromException)
import qualified Ouroboros.Network.AnchoredFragment as AF
import Ouroboros.Network.Driver.Limits
(ProtocolLimitFailure (ExceededTimeLimit))
import Ouroboros.Network.Protocol.ChainSync.Codec (mustReplyTimeout)
import Test.Consensus.BlockTree (BlockTree (..))
import Test.Consensus.Genesis.Setup
import Test.Consensus.PeerSimulator.Run (defaultSchedulerConfig)
import Test.Consensus.PeerSimulator.Run
(SchedulerConfig (scEnableChainSyncTimeouts),
defaultSchedulerConfig)
import Test.Consensus.PeerSimulator.StateView
import Test.Consensus.PointSchedule
import Test.Consensus.PointSchedule.Peers (peersOnlyHonest)
Expand All @@ -39,13 +40,15 @@ tests = testProperty "ChainSync kills BlockFetch" prop_chainSyncKillsBlockFetch
prop_chainSyncKillsBlockFetch :: Property
prop_chainSyncKillsBlockFetch = do
forAllGenesisTest
(do gt@GenesisTest{gtChainSyncTimeouts} <- genChains (pure 0)
let schedule = dullSchedule gt (fromJust $ mustReplyTimeout gtChainSyncTimeouts)
pure $ gt $> schedule
(do gt@GenesisTest{gtBlockTree} <- genChains (pure 0)
pure $ enableMustReplyTimeout $ gt $> dullSchedule (btTrunk gtBlockTree)
)
defaultSchedulerConfig

defaultSchedulerConfig {scEnableChainSyncTimeouts = True}

-- No shrinking because the schedule is tiny and hand-crafted
(\_ _ -> [])

( \_ stateView@StateView {svTipBlock} ->
svTipBlock == Nothing
&& case exceptionsByComponent ChainSyncClient stateView of
Expand All @@ -62,9 +65,11 @@ prop_chainSyncKillsBlockFetch = do
_ -> False
)
where
dullSchedule :: GenesisTest blk () -> DiffTime -> PointSchedule blk
dullSchedule GenesisTest {gtBlockTree} timeout =
let (firstBlock, secondBlock) = case AF.toOldestFirst $ btTrunk gtBlockTree of
timeout = 10

dullSchedule :: AF.AnchoredFragment blk -> PointSchedule blk
dullSchedule trunk =
let (firstBlock, secondBlock) = case AF.toOldestFirst trunk of
b1 : b2 : _ -> (b1, b2)
_ -> error "block tree must have two blocks"
psSchedule = peersOnlyHonest $
Expand All @@ -73,3 +78,6 @@ prop_chainSyncKillsBlockFetch = do
]
psMinEndTime = Time $ timeout + 1
in PointSchedule {psSchedule, psStartOrder = [], psMinEndTime}

enableMustReplyTimeout :: GenesisTest blk schedule -> GenesisTest blk schedule
enableMustReplyTimeout gt = gt { gtChainSyncTimeouts = (gtChainSyncTimeouts gt) { mustReplyTimeout = Just timeout } }
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@
module Test.Consensus.PeerSimulator.Tests.Timeouts (tests) where

import Data.Functor (($>))
import Data.Maybe (fromJust)
import Ouroboros.Consensus.Util.Condense
import Ouroboros.Consensus.Util.IOLike (DiffTime, Time (Time),
fromException)
Expand All @@ -15,7 +14,9 @@ import Ouroboros.Network.Driver.Limits
import Ouroboros.Network.Protocol.ChainSync.Codec (mustReplyTimeout)
import Test.Consensus.BlockTree (btTrunk)
import Test.Consensus.Genesis.Setup
import Test.Consensus.PeerSimulator.Run (defaultSchedulerConfig)
import Test.Consensus.PeerSimulator.Run
(SchedulerConfig (scEnableChainSyncTimeouts),
defaultSchedulerConfig)
import Test.Consensus.PeerSimulator.StateView
import Test.Consensus.PointSchedule
import Test.Consensus.PointSchedule.Peers (peersOnlyAdversary,
Expand All @@ -38,12 +39,11 @@ prop_timeouts :: Bool -> Property
prop_timeouts mustTimeout = do
forAllGenesisTest

(do gt@GenesisTest{gtChainSyncTimeouts, gtBlockTree} <- genChains (pure 0)
let schedule = dullSchedule (fromJust $ mustReplyTimeout gtChainSyncTimeouts) (btTrunk gtBlockTree)
pure $ gt $> schedule
(do gt@GenesisTest{gtBlockTree} <- genChains (pure 0)
pure $ enableMustReplyTimeout $ gt $> dullSchedule (btTrunk gtBlockTree)
)
-- Timeouts are enabled by default
defaultSchedulerConfig

defaultSchedulerConfig {scEnableChainSyncTimeouts = True}

-- Here we can't shrink because we exploit the properties of the point schedule to wait
-- at the end of the test for the adversaries to get disconnected, by adding an extra point.
Expand All @@ -60,9 +60,11 @@ prop_timeouts mustTimeout = do
)

where
dullSchedule :: AF.HasHeader blk => DiffTime -> AF.AnchoredFragment blk -> PointSchedule blk
dullSchedule _ (AF.Empty _) = error "requires a non-empty block tree"
dullSchedule timeout (_ AF.:> tipBlock) =
timeout = 10

dullSchedule :: AF.HasHeader blk => AF.AnchoredFragment blk -> PointSchedule blk
dullSchedule (AF.Empty _) = error "requires a non-empty block tree"
dullSchedule (_ AF.:> tipBlock) =
let offset :: DiffTime = if mustTimeout then 1 else -1
psSchedule = (if mustTimeout then peersOnlyAdversary else peersOnlyHonest) $ [
(Time 0, scheduleTipPoint tipBlock),
Expand All @@ -72,3 +74,6 @@ prop_timeouts mustTimeout = do
-- This keeps the test running long enough to pass the timeout by 'offset'.
psMinEndTime = Time $ timeout + offset
in PointSchedule {psSchedule, psStartOrder = [], psMinEndTime}

enableMustReplyTimeout :: GenesisTest blk schedule -> GenesisTest blk schedule
enableMustReplyTimeout gt = gt { gtChainSyncTimeouts = (gtChainSyncTimeouts gt) { mustReplyTimeout = Just timeout } }
Loading

0 comments on commit f393abe

Please sign in to comment.