Skip to content

Commit

Permalink
fix: added tests and fixe the case when server is changed inside a se…
Browse files Browse the repository at this point in the history
…cret

Signed-off-by: Lukas Wöhrl <[email protected]>
  • Loading branch information
woehrl01 committed Feb 13, 2024
1 parent 4fa4cc2 commit 81c920d
Show file tree
Hide file tree
Showing 3 changed files with 75 additions and 6 deletions.
2 changes: 1 addition & 1 deletion controller/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -751,7 +751,7 @@ func (c *liveStateCache) handleAddEvent(cluster *appv1.Cluster) {
}

func (c *liveStateCache) handleModEvent(oldCluster *appv1.Cluster, newCluster *appv1.Cluster) {
c.clusterSharding.Update(newCluster)
c.clusterSharding.Update(oldCluster, newCluster)
c.lock.Lock()
cluster, ok := c.clusters[newCluster.Server]
c.lock.Unlock()
Expand Down
17 changes: 12 additions & 5 deletions controller/sharding/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ type ClusterShardingCache interface {
Init(clusters *v1alpha1.ClusterList)
Add(c *v1alpha1.Cluster)
Delete(clusterServer string)
Update(c *v1alpha1.Cluster)
Update(oldCluster *v1alpha1.Cluster, newCluster *v1alpha1.Cluster)
IsManagedCluster(c *v1alpha1.Cluster) bool
GetDistribution() map[string]int
}
Expand Down Expand Up @@ -97,13 +97,16 @@ func (sharding *ClusterSharding) Delete(clusterServer string) {
}
}

func (sharding *ClusterSharding) Update(c *v1alpha1.Cluster) {
func (sharding *ClusterSharding) Update(oldCluster *v1alpha1.Cluster, newCluster *v1alpha1.Cluster) {
sharding.lock.Lock()
defer sharding.lock.Unlock()

old, ok := sharding.Clusters[c.Server]
sharding.Clusters[c.Server] = c
if !ok || hasShardingUpdates(old, c) {
if _, ok := sharding.Clusters[oldCluster.Server]; ok && oldCluster.Server != newCluster.Server {
delete(sharding.Clusters, oldCluster.Server)
delete(sharding.Shards, oldCluster.Server)
}
sharding.Clusters[newCluster.Server] = newCluster
if hasShardingUpdates(oldCluster, newCluster) {
sharding.updateDistribution()
} else {
log.Debugf("Skipping sharding distribution update. No relevant changes")
Expand Down Expand Up @@ -159,6 +162,10 @@ func hasShardingUpdates(old, new *v1alpha1.Cluster) bool {
return true
}

if old.Server != new.Server {
return true
}

// return false if the shard field has not been modified
if old.Shard == nil && new.Shard == nil {
return false
Expand Down
62 changes: 62 additions & 0 deletions controller/sharding/cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,9 @@ func TestClusterSharding_Update(t *testing.T) {
assert.Equal(t, 0, distributionA)

sharding.Update(&v1alpha1.Cluster{
ID: "1",
Server: "https://kubernetes.default.svc",
}, &v1alpha1.Cluster{
ID: "4",
Server: "https://kubernetes.default.svc",
})
Expand All @@ -186,6 +189,51 @@ func TestClusterSharding_Update(t *testing.T) {
assert.Equal(t, 1, distributionA)
}

func TestClusterSharding_UpdateServerName(t *testing.T) {
shard := 1
replicas := 2
sharding := setupTestSharding(shard, replicas)

sharding.Init(
&v1alpha1.ClusterList{
Items: []v1alpha1.Cluster{
{
ID: "2",
Server: "https://127.0.0.1:6443",
},
{
ID: "1",
Server: "https://kubernetes.default.svc",
},
},
},
)

distributionBefore := sharding.GetDistribution()
assert.Equal(t, 2, len(distributionBefore))

distributionA, ok := distributionBefore["https://kubernetes.default.svc"]
assert.True(t, ok)
assert.Equal(t, 0, distributionA)

sharding.Update(&v1alpha1.Cluster{
ID: "1",
Server: "https://kubernetes.default.svc",
}, &v1alpha1.Cluster{
ID: "1",
Server: "https://server2",
})

distributionAfter := sharding.GetDistribution()
assert.Equal(t, 2, len(distributionAfter))

_, ok = distributionAfter["https://kubernetes.default.svc"]
assert.False(t, ok) // the old server name should not be present anymore

_, ok = distributionAfter["https://server2"]
assert.True(t, ok) // the new server name should be present
}

func TestClusterSharding_IsManagedCluster(t *testing.T) {
replicas := 2
sharding0 := setupTestSharding(0, replicas)
Expand Down Expand Up @@ -403,6 +451,20 @@ func TestHasShardingUpdates(t *testing.T) {
},
expected: true,
},
{
name: "Server has changed",
old: &v1alpha1.Cluster{
ID: "1",
Server: "https://server1",
Shard: Int64Ptr(2),
},
new: &v1alpha1.Cluster{
ID: "1",
Server: "https://server2",
Shard: Int64Ptr(2),
},
expected: true,
},
}

for _, tc := range testCases {
Expand Down

0 comments on commit 81c920d

Please sign in to comment.