Skip to content

Commit

Permalink
fix logout notification, fix potential tailscaled panic when removing
Browse files Browse the repository at this point in the history
Signed-off-by: Kristoffer Dalby <[email protected]>
  • Loading branch information
kradalby committed Dec 6, 2023
1 parent af3c097 commit 3ca953a
Show file tree
Hide file tree
Showing 4 changed files with 68 additions and 45 deletions.
13 changes: 13 additions & 0 deletions hscontrol/auth.go
Original file line number Diff line number Diff line change
Expand Up @@ -500,6 +500,19 @@ func (h *Headscale) handleNodeLogOut(
return
}

stateUpdate := types.StateUpdate{
Type: types.StatePeerChangedPatch,
ChangePatches: []*tailcfg.PeerChange{
{
NodeID: tailcfg.NodeID(node.ID),
KeyExpiry: &now,
},
},
}
if stateUpdate.Valid() {
h.nodeNotifier.NotifyWithIgnore(stateUpdate, node.MachineKey.String())
}

resp.AuthURL = ""
resp.MachineAuthorized = false
resp.NodeKeyExpired = true
Expand Down
84 changes: 40 additions & 44 deletions hscontrol/db/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -856,58 +856,54 @@ func (hsdb *HSDatabase) ExpireExpiredNodes(lastCheck time.Time) time.Time {
// checked everything.
started := time.Now()

users, err := hsdb.listUsers()
expired := make([]*tailcfg.PeerChange, 0)

nodes, err := hsdb.listNodes()
if err != nil {
log.Error().Err(err).Msg("Error listing users")
log.Error().
Err(err).
Msg("Error listing nodes to find expired nodes")

return time.Unix(0, 0)
}
for index, node := range nodes {
if node.IsExpired() &&
// TODO(kradalby): Replace this, it is very spammy
// It will notify about all nodes that has been expired.
// It should only notify about expired nodes since _last check_.
node.Expiry.After(lastCheck) {
expired = append(expired, &tailcfg.PeerChange{
NodeID: tailcfg.NodeID(node.ID),
KeyExpiry: node.Expiry,
})

for _, user := range users {
nodes, err := hsdb.listNodesByUser(user.Name)
if err != nil {
log.Error().
Err(err).
Str("user", user.Name).
Msg("Error listing nodes in user")

return time.Unix(0, 0)
}

expired := make([]tailcfg.NodeID, 0)
for index, node := range nodes {
if node.IsExpired() &&
node.Expiry.After(lastCheck) {
expired = append(expired, tailcfg.NodeID(node.ID))

now := time.Now()
// Do not use setNodeExpiry as that has a notifier hook, which
// can cause a deadlock, we are updating all changed nodes later
// and there is no point in notifiying twice.
if err := hsdb.db.Model(nodes[index]).Updates(types.Node{
Expiry: &now,
}).Error; err != nil {
log.Error().
Err(err).
Str("node", node.Hostname).
Str("name", node.GivenName).
Msg("🤮 Cannot expire node")
} else {
log.Info().
Str("node", node.Hostname).
Str("name", node.GivenName).
Msg("Node successfully expired")
}
now := time.Now()
// Do not use setNodeExpiry as that has a notifier hook, which
// can cause a deadlock, we are updating all changed nodes later
// and there is no point in notifiying twice.
if err := hsdb.db.Model(nodes[index]).Updates(types.Node{
Expiry: &now,
}).Error; err != nil {
log.Error().
Err(err).
Str("node", node.Hostname).
Str("name", node.GivenName).
Msg("🤮 Cannot expire node")
} else {
log.Info().
Str("node", node.Hostname).
Str("name", node.GivenName).
Msg("Node successfully expired")
}
}
}

// TODO(kradalby): Should these be removed or changed(expired)?
if len(expired) > 0 {
hsdb.notifier.NotifyAll(types.StateUpdate{
Type: types.StatePeerRemoved,
Removed: expired,
})
}
stateUpdate := types.StateUpdate{
Type: types.StatePeerChangedPatch,
ChangePatches: expired,
}
if stateUpdate.Valid() {
hsdb.notifier.NotifyAll(stateUpdate)
}

return started
Expand Down
11 changes: 10 additions & 1 deletion hscontrol/mapper/mapper.go
Original file line number Diff line number Diff line change
Expand Up @@ -409,14 +409,23 @@ func (m *Mapper) PeerRemovedResponse(
m.mu.Lock()
defer m.mu.Unlock()

// Some nodes might have been removed already
// so we dont want to ask downstream to remove
// twice, than can cause a panic in tailscaled.
notYetRemoved := []tailcfg.NodeID{}

// remove from our internal map
for _, id := range removed {
if _, ok := m.peers[uint64(id)]; ok {
notYetRemoved = append(notYetRemoved, id)
}

delete(m.peers, uint64(id))
delete(m.patches, uint64(id))
}

resp := m.baseMapResponse()
resp.PeersRemoved = removed
resp.PeersRemoved = notYetRemoved

return m.marshalMapResponse(mapRequest, &resp, node, mapRequest.Compress)
}
Expand Down
5 changes: 5 additions & 0 deletions integration/scenario.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
"github.com/juanfont/headscale/integration/tsic"
"github.com/ory/dockertest/v3"
"github.com/puzpuzpuz/xsync/v3"
"github.com/samber/lo"
"golang.org/x/sync/errgroup"
)

Expand Down Expand Up @@ -296,11 +297,13 @@ func (s *Scenario) CreateTailscaleNodesInUser(
opts ...tsic.Option,
) error {
if user, ok := s.users[userStr]; ok {
var versions []string
for i := 0; i < count; i++ {
version := requestedVersion
if requestedVersion == "all" {
version = MustTestVersions[i%len(MustTestVersions)]
}
versions = append(versions, version)

headscale, err := s.Headscale()
if err != nil {
Expand Down Expand Up @@ -350,6 +353,8 @@ func (s *Scenario) CreateTailscaleNodesInUser(
return err
}

log.Printf("testing versions %v", lo.Uniq(versions))

return nil
}

Expand Down

0 comments on commit 3ca953a

Please sign in to comment.