Skip to content

Commit

Permalink
fix: remove condition where header routes can stay directed at empty …
Browse files Browse the repository at this point in the history
…service in preemption (#3898)

* fix: remove condition where header routes can stay directed at empty service in preemption

Signed-off-by: Dylan Schlager <[email protected]>

* add unit test

Signed-off-by: Dylan Schlager <[email protected]>

* lint

Signed-off-by: Dylan Schlager <[email protected]>

---------

Signed-off-by: Dylan Schlager <[email protected]>
  • Loading branch information
schlags authored Nov 3, 2024
1 parent 15c723f commit 53c4f12
Show file tree
Hide file tree
Showing 2 changed files with 97 additions and 0 deletions.
9 changes: 9 additions & 0 deletions rollout/trafficrouting.go
Original file line number Diff line number Diff line change
Expand Up @@ -198,6 +198,15 @@ func (c *rolloutContext) reconcileTrafficRouting() error {
} else if c.newRS == nil || c.newRS.Status.AvailableReplicas == 0 {
// when newRS is not available or replicas num is 0. never weight to canary
weightDestinations = append(weightDestinations, c.calculateWeightDestinationsFromExperiment()...)
// If a user changes their mind in the middle of an V1 -> V2 update, and then applies a V3
// there might have been a V2 ReplicaSet that was scaled up, but is now defunct.
// During the V2 rollout, managed routes could have been setup and would continue
// to direct traffic to the canary service which is now in front of 0 available replicas.
// We want to remove these managed routes alongside the safety here of never weighting to the canary.
err := reconciler.RemoveManagedRoutes()
if err != nil {
return err
}
} else if c.rollout.Status.PromoteFull {
// on a promote full, desired stable weight should be 0 (100% to canary),
// But we can only increase canary weight according to available replica counts of the canary.
Expand Down
88 changes: 88 additions & 0 deletions rollout/trafficrouting_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,10 @@ import (
traefikMocks "github.com/argoproj/argo-rollouts/rollout/trafficrouting/traefik/mocks"
testutil "github.com/argoproj/argo-rollouts/test/util"
"github.com/argoproj/argo-rollouts/utils/conditions"
ingressutil "github.com/argoproj/argo-rollouts/utils/ingress"
istioutil "github.com/argoproj/argo-rollouts/utils/istio"
logutil "github.com/argoproj/argo-rollouts/utils/log"
replicasetutil "github.com/argoproj/argo-rollouts/utils/replicaset"
timeutil "github.com/argoproj/argo-rollouts/utils/time"
)

Expand Down Expand Up @@ -1343,3 +1345,89 @@ func TestDontWeightToZeroWhenDynamicallyRollingBackToStable(t *testing.T) {
rs1Updated := f.getUpdatedReplicaSet(scaleUpIndex)
assert.Equal(t, int32(10), *rs1Updated.Spec.Replicas)
}

// TestDontWeightOrHaveManagedRoutesDuringInterruptedUpdate builds off of TestCanaryDontScaleDownOldRsDuringInterruptedUpdate
// in canary_test when we scale down an intermediate V2 ReplicaSet when applying a V3 spec in the middle of updating.
// We want to make sure that traffic routing is cleared in both weight AND managed routes when the V2 rs is
// nil or has 0 available replicas.
func TestDontWeightOrHaveManagedRoutesDuringInterruptedUpdate(t *testing.T) {
f := newFixture(t)
defer f.Close()

steps := []v1alpha1.CanaryStep{
{
SetHeaderRoute: &v1alpha1.SetHeaderRoute{
Name: "test-header",
Match: []v1alpha1.HeaderRoutingMatch{
{
HeaderName: "test",
HeaderValue: &v1alpha1.StringMatch{
Exact: "test",
},
},
},
},
},
{
SetWeight: pointer.Int32(90),
},
{
Pause: &v1alpha1.RolloutPause{},
},
}
r1 := newCanaryRollout("foo", 5, nil, steps, pointer.Int32Ptr(1), intstr.FromInt(1), intstr.FromInt(0))
r1.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{
ALB: &v1alpha1.ALBTrafficRouting{
Ingress: "test-ingress",
},
ManagedRoutes: []v1alpha1.MangedRoutes{
{Name: "test-header"},
},
}

r1.Spec.Strategy.Canary.StableService = "stable-svc"
r1.Spec.Strategy.Canary.CanaryService = "canary-svc"
r2 := bumpVersion(r1)
r3 := bumpVersion(r2)

stableSvc := newService("stable-svc", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: r1.Status.CurrentPodHash}, r1)
canarySvc := newService("canary-svc", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: r3.Status.CurrentPodHash}, r3)
r3.Status.StableRS = r1.Status.CurrentPodHash

ingress := newIngress("test-ingress", canarySvc, stableSvc)
ingress.Spec.Rules[0].HTTP.Paths[0].Backend.ServiceName = stableSvc.Name

rs1 := newReplicaSetWithStatus(r1, 5, 5)
rs2 := newReplicaSetWithStatus(r2, 5, 5)
rs3 := newReplicaSetWithStatus(r3, 5, 0)
r3.Status.Canary.Weights = &v1alpha1.TrafficWeights{
Canary: v1alpha1.WeightDestination{
PodTemplateHash: replicasetutil.GetPodTemplateHash(rs2),
},
}

f.objects = append(f.objects, r3)
f.kubeobjects = append(f.kubeobjects, rs1, rs2, rs3, canarySvc, stableSvc, ingress)
f.replicaSetLister = append(f.replicaSetLister, rs1, rs2, rs3)
f.serviceLister = append(f.serviceLister, canarySvc, stableSvc)
f.ingressLister = append(f.ingressLister, ingressutil.NewLegacyIngress(ingress))

f.expectPatchRolloutAction(r3)
f.run(getKey(r3, t))

r3.Status.Canary.Weights = &v1alpha1.TrafficWeights{
Canary: v1alpha1.WeightDestination{
PodTemplateHash: replicasetutil.GetPodTemplateHash(rs3),
},
}

f.expectUpdateReplicaSetAction(rs3)
f.run(getKey(r3, t))

// Make sure that our weight is zero
assert.Equal(t, int32(0), r3.Status.Canary.Weights.Canary.Weight)
assert.Equal(t, replicasetutil.GetPodTemplateHash(rs3), r3.Status.Canary.Weights.Canary.PodTemplateHash)
// Make sure that RemoveManagedRoutes was called
f.fakeTrafficRouting.AssertCalled(t, "RemoveManagedRoutes", mock.Anything, mock.Anything)

}

0 comments on commit 53c4f12

Please sign in to comment.