Skip to content

Commit

Permalink
Fix ConnectionManager finally handle
Browse files Browse the repository at this point in the history
This includes changes to the way a node behaves when its InboundGovernor
dies: changed link to linkOnly (const True) to correctly propagate
AsyncCancelled exceptions to the ConnectionManager and thus terminate it
as well.

The ConnectionManager needs to take care of closing the connection
handlers of each connection in a way that does not make us go beyond the
enforced timeouts. Since closing a connection is blocking we need to
mask exceptions during the exception handler and since doing it
sequentially can lead to waits on atomically blocks, we close each
connection in parallel.
  • Loading branch information
bolt12 committed Jan 28, 2022
1 parent 704f14d commit d5b7352
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 10 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ import Data.Proxy (Proxy (..))
import Data.Typeable (Typeable)
import GHC.Stack (CallStack, HasCallStack, callStack)

import Data.Map (Map, traverseWithKey)
import Data.Map (Map)
import qualified Data.Map as Map
import qualified Data.Set as Set

Expand Down Expand Up @@ -652,11 +652,18 @@ withConnectionManager ConnectionManagerArguments {
}

k connectionManager
`finally` do
-- Since this exception handler is blocking it might receive exceptions
-- during its execution, which we want to avoid, so we wrap it around
-- uninterruptibleMask_.
`finally` uninterruptibleMask_ (do
traceWith tracer TrShutdown

state <- atomically $ readTMVar stateVar
void $ traverseWithKey
-- Spawning one thread for each connection cleanup avoids spending time
-- waiting for locks and cleanup logic that could delay closing the
-- connections and making us not respecting certain timeouts.
asyncs <- Map.elems
<$> Map.traverseMaybeWithKey
(\peerAddr MutableConnState { connVar } -> do
-- cleanup handler for that thread will close socket associated
-- with the thread. We put each connection in 'TerminatedState' to
Expand All @@ -679,11 +686,20 @@ withConnectionManager ConnectionManagerArguments {

when shouldTrace $
traceWith trTracer tr
-- using 'cancel' here, since we want to block until connection
-- handler thread terminates.
traverse_ cancel (getConnThread connState)
)
state
-- using 'throwTo' here, since we want to block only until connection
-- handler thread receives an exception so as to not take up extra
-- time and making us go above timeout schedules.
traverse
(\thread -> do
throwTo (asyncThreadId thread) AsyncCancelled
pure thread
)
(getConnThread connState)
) state

atomically $ runLastToFinishM
$ foldMap (LastToFinishM . void <$> waitCatchSTM) asyncs
)
where
traceCounters :: StrictTMVar m (ConnectionManagerState peerAddr handle handleError version m) -> m ()
traceCounters stateVar = do
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1680,7 +1680,7 @@ multinodeExperiment inboundTrTracer trTracer cmTracer inboundTracer
timeLimitsHandshake
acceptedConnLimit
( \ connectionManager _ serverAsync -> do
link serverAsync
linkOnly (const True) serverAsync
connectionLoop SingInitiatorResponderMode localAddr cc connectionManager Map.empty connVar
return Nothing
)
Expand Down Expand Up @@ -2478,7 +2478,8 @@ prop_timeouts_enforced serverAcc (ArbDataFlow dataFlow)
. getTraceEvents
$ trace

in verifyAllTimeouts transitionSignal
in counterexample (ppTrace trace)
$ verifyAllTimeouts transitionSignal
where
verifyAllTimeouts :: [[(Time , AbstractTransitionTrace SimAddr)]]
-> Property
Expand Down

0 comments on commit d5b7352

Please sign in to comment.