diff --git a/CHANGELOG.md b/CHANGELOG.md
index 263680f07d..32f585e4e7 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,18 @@
+
+## [v1.7.2](https://github.com/argoproj/argo-rollouts/compare/v1.7.1...v1.7.2) (2024-08-12)
+
+### Fix
+
+* replicaSet not scaled down due to incorrect annotations ([#3762](https://github.com/argoproj/argo-rollouts/issues/3762)) ([#3784](https://github.com/argoproj/argo-rollouts/issues/3784))
+* add update verb to ClusterRole permissions for scaleDown feature. Fixes [#3672](https://github.com/argoproj/argo-rollouts/issues/3672) ([#3675](https://github.com/argoproj/argo-rollouts/issues/3675))
+* **analysis:** explicitly set datadog aggregator to last only on v2 ([#3730](https://github.com/argoproj/argo-rollouts/issues/3730))
+* **analysis:** Take RollbackWindow into account when Reconciling Analysis Runs. Fixes [#3669](https://github.com/argoproj/argo-rollouts/issues/3669) ([#3670](https://github.com/argoproj/argo-rollouts/issues/3670))
+* **controller:** Get the right resourceName for traefik.io.Fixes [#3615](https://github.com/argoproj/argo-rollouts/issues/3615) ([#3759](https://github.com/argoproj/argo-rollouts/issues/3759))
+* **controller:** use the stableRS from the rollout context rather tha… ([#3664](https://github.com/argoproj/argo-rollouts/issues/3664))
+* **dashboard:** Update pod status logic to support native sidecars. Fixes [#3366](https://github.com/argoproj/argo-rollouts/issues/3366) ([#3639](https://github.com/argoproj/argo-rollouts/issues/3639))
+* **metricprovider:** reuse http.Transport for http.Client ([#3780](https://github.com/argoproj/argo-rollouts/issues/3780))
+
+
## [v1.7.1](https://github.com/argoproj/argo-rollouts/compare/v1.7.0...v1.7.1) (2024-06-22)
diff --git a/cmd/rollouts-controller/main.go b/cmd/rollouts-controller/main.go
index c8deab3ea0..89e611ab95 100644
--- a/cmd/rollouts-controller/main.go
+++ b/cmd/rollouts-controller/main.go
@@ -2,6 +2,7 @@ package main
import (
"fmt"
+ "net/http"
"os"
"strings"
"time"
@@ -79,6 +80,7 @@ func newCommand() *cobra.Command {
printVersion bool
selfServiceNotificationEnabled bool
controllersEnabled []string
+ pprofAddress string
)
electOpts := controller.NewLeaderElectionOptions()
var command = cobra.Command{
@@ -204,6 +206,11 @@ func newCommand() *cobra.Command {
ingressWrapper, err := ingressutil.NewIngressWrapper(mode, kubeClient, kubeInformerFactory)
checkError(err)
+ if pprofAddress != "" {
+ mux := controller.NewPProfServer()
+ go func() { log.Println(http.ListenAndServe(pprofAddress, mux)) }()
+ }
+
var cm *controller.Manager
enabledControllers, err := getEnabledControllers(controllersEnabled)
@@ -310,6 +317,7 @@ func newCommand() *cobra.Command {
command.Flags().DurationVar(&electOpts.LeaderElectionRetryPeriod, "leader-election-retry-period", controller.DefaultLeaderElectionRetryPeriod, "The duration the clients should wait between attempting acquisition and renewal of a leadership. This is only applicable if leader election is enabled.")
command.Flags().BoolVar(&selfServiceNotificationEnabled, "self-service-notification-enabled", false, "Allows rollouts controller to pull notification config from the namespace that the rollout resource is in. This is useful for self-service notification.")
command.Flags().StringSliceVar(&controllersEnabled, "controllers", nil, "Explicitly specify the list of controllers to run, currently only supports 'analysis', eg. --controller=analysis. Default: all controllers are enabled")
+ command.Flags().StringVar(&pprofAddress, "enable-pprof-address", "", "Enable pprof profiling on controller by providing a server address.")
return &command
}
diff --git a/controller/profiling.go b/controller/profiling.go
new file mode 100644
index 0000000000..03920906ac
--- /dev/null
+++ b/controller/profiling.go
@@ -0,0 +1,24 @@
+package controller
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/pprof"
+)
+
+const (
+ ProfilingPath = "/debug/pprof"
+)
+
+// NewPProfServer returns a new pprof server to gather runtime profiling data
+func NewPProfServer() *http.ServeMux {
+ mux := http.NewServeMux()
+
+ mux.HandleFunc(ProfilingPath, pprof.Index)
+ mux.HandleFunc(fmt.Sprintf("%s/cmdline", ProfilingPath), pprof.Cmdline)
+ mux.HandleFunc(fmt.Sprintf("%s/profile", ProfilingPath), pprof.Profile)
+ mux.HandleFunc(fmt.Sprintf("%s/symbol", ProfilingPath), pprof.Symbol)
+ mux.HandleFunc(fmt.Sprintf("%s/trace", ProfilingPath), pprof.Trace)
+
+ return mux
+}
diff --git a/go.mod b/go.mod
index 2a7073409c..b663d22adb 100644
--- a/go.mod
+++ b/go.mod
@@ -22,7 +22,7 @@ require (
github.com/google/uuid v1.6.0
github.com/grpc-ecosystem/grpc-gateway v1.16.0
github.com/hashicorp/go-plugin v1.6.1
- github.com/influxdata/influxdb-client-go/v2 v2.13.0
+ github.com/influxdata/influxdb-client-go/v2 v2.14.0
github.com/juju/ansiterm v1.0.0
github.com/machinebox/graphql v0.2.2
github.com/mitchellh/mapstructure v1.5.0
diff --git a/go.sum b/go.sum
index e86242e7c1..6ea7ee917d 100644
--- a/go.sum
+++ b/go.sum
@@ -435,8 +435,8 @@ github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
-github.com/influxdata/influxdb-client-go/v2 v2.13.0 h1:ioBbLmR5NMbAjP4UVA5r9b5xGjpABD7j65pI8kFphDM=
-github.com/influxdata/influxdb-client-go/v2 v2.13.0/go.mod h1:k+spCbt9hcvqvUiz0sr5D8LolXHqAAOfPw9v/RIRHl4=
+github.com/influxdata/influxdb-client-go/v2 v2.14.0 h1:AjbBfJuq+QoaXNcrova8smSjwJdUHnwvfjMF71M1iI4=
+github.com/influxdata/influxdb-client-go/v2 v2.14.0/go.mod h1:Ahpm3QXKMJslpXl3IftVLVezreAUtBOTZssDrjZEFHI=
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf h1:7JTmneyiNEwVBOHSjoMxiWAqB992atOeepeFYegn5RU=
github.com/influxdata/line-protocol v0.0.0-20210922203350-b1ad95c89adf/go.mod h1:xaLFMmpvUxqXtVkUJfg9QmT88cDaCJ3ZKgdZ78oO8Qo=
github.com/jaytaylor/html2text v0.0.0-20190408195923-01ec452cbe43/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk=
diff --git a/rollout/analysis_test.go b/rollout/analysis_test.go
index fa7fdc5b96..e9526cb75f 100644
--- a/rollout/analysis_test.go
+++ b/rollout/analysis_test.go
@@ -406,7 +406,7 @@ func TestInvalidSpecMissingClusterTemplatesBackgroundAnalysis(t *testing.T) {
f.objects = append(f.objects, r)
patchIndex := f.expectPatchRolloutAction(r)
- f.run(getKey(r, t))
+ f.runExpectError(getKey(r, t), true)
expectedPatchWithoutSub := `{
"status": {
@@ -961,7 +961,7 @@ func TestFailCreateStepAnalysisRunIfInvalidTemplateRef(t *testing.T) {
f.objects = append(f.objects, r, at)
patchIndex := f.expectPatchRolloutAction(r)
- f.run(getKey(r, t))
+ f.runExpectError(getKey(r, t), true)
expectedPatchWithoutSub := `{
"status": {
@@ -1006,7 +1006,7 @@ func TestFailCreateBackgroundAnalysisRunIfInvalidTemplateRef(t *testing.T) {
f.objects = append(f.objects, r, at)
patchIndex := f.expectPatchRolloutAction(r)
- f.run(getKey(r, t))
+ f.runExpectError(getKey(r, t), true)
expectedPatchWithoutSub := `{
"status": {
@@ -1055,7 +1055,7 @@ func TestFailCreateBackgroundAnalysisRunIfMetricRepeated(t *testing.T) {
f.objects = append(f.objects, r, at, at2)
patchIndex := f.expectPatchRolloutAction(r)
- f.run(getKey(r, t))
+ f.runExpectError(getKey(r, t), true)
expectedPatchWithoutSub := `{
"status": {
diff --git a/rollout/bluegreen.go b/rollout/bluegreen.go
index 7613852470..cf6cbc6ac7 100644
--- a/rollout/bluegreen.go
+++ b/rollout/bluegreen.go
@@ -21,7 +21,7 @@ func (c *rolloutContext) rolloutBlueGreen() error {
if err != nil {
return err
}
- c.newRS, err = c.getAllReplicaSetsAndSyncRevision(true)
+ c.newRS, err = c.getAllReplicaSetsAndSyncRevision()
if err != nil {
return fmt.Errorf("failed to getAllReplicaSetsAndSyncRevision in rolloutBlueGreen create true: %w", err)
}
diff --git a/rollout/bluegreen_test.go b/rollout/bluegreen_test.go
index b78ed8704d..f8abaa9fcc 100644
--- a/rollout/bluegreen_test.go
+++ b/rollout/bluegreen_test.go
@@ -950,8 +950,10 @@ func TestBlueGreenRolloutStatusHPAStatusFieldsNoActiveSelector(t *testing.T) {
f := newFixture(t)
defer f.Close()
f.objects = append(f.objects, ro)
+ f.kubeobjects = append(f.kubeobjects, activeSvc)
f.rolloutLister = append(f.rolloutLister, ro)
f.replicaSetLister = append(f.replicaSetLister, rs)
+ f.serviceLister = append(f.serviceLister, activeSvc)
ctrl, _, _ := f.newController(noResyncPeriodFunc)
roCtx, err := ctrl.newRolloutContext(ro)
diff --git a/rollout/canary.go b/rollout/canary.go
index 422ed3644d..0d3e74fcf7 100644
--- a/rollout/canary.go
+++ b/rollout/canary.go
@@ -20,14 +20,14 @@ import (
func (c *rolloutContext) rolloutCanary() error {
var err error
if replicasetutil.PodTemplateOrStepsChanged(c.rollout, c.newRS) {
- c.newRS, err = c.getAllReplicaSetsAndSyncRevision(false)
+ c.newRS, err = c.getAllReplicaSetsAndSyncRevision()
if err != nil {
return fmt.Errorf("failed to getAllReplicaSetsAndSyncRevision in rolloutCanary with PodTemplateOrStepsChanged: %w", err)
}
return c.syncRolloutStatusCanary()
}
- c.newRS, err = c.getAllReplicaSetsAndSyncRevision(true)
+ c.newRS, err = c.getAllReplicaSetsAndSyncRevision()
if err != nil {
return fmt.Errorf("failed to getAllReplicaSetsAndSyncRevision in rolloutCanary create true: %w", err)
}
@@ -448,13 +448,6 @@ func (c *rolloutContext) reconcileCanaryReplicaSets() (bool, error) {
return true, nil
}
- // If we have updated both the replica count and the pod template hash c.newRS will be nil we want to reconcile the newRS so we look at the
- // rollout status to get the newRS to reconcile it.
- if c.newRS == nil && c.rollout.Status.CurrentPodHash != c.rollout.Status.StableRS {
- rs, _ := replicasetutil.GetReplicaSetByTemplateHash(c.allRSs, c.rollout.Status.CurrentPodHash)
- c.newRS = rs
- }
-
scaledNewRS, err := c.reconcileNewReplicaSet()
if err != nil {
return false, err
diff --git a/rollout/canary_test.go b/rollout/canary_test.go
index 3d1eec9d14..ad3d9b4267 100644
--- a/rollout/canary_test.go
+++ b/rollout/canary_test.go
@@ -1,7 +1,6 @@
package rollout
import (
- "context"
"encoding/json"
"fmt"
"os"
@@ -421,8 +420,11 @@ func TestResetCurrentStepIndexOnStepChange(t *testing.T) {
f.rolloutLister = append(f.rolloutLister, r2)
f.objects = append(f.objects, r2)
+ f.expectUpdateRolloutStatusAction(r2)
patchIndex := f.expectPatchRolloutAction(r2)
+ createRSIndex := f.expectCreateReplicaSetAction(rs1)
f.run(getKey(r2, t))
+ createdRS := f.getCreatedReplicaSet(createRSIndex)
patch := f.getPatchedRollout(patchIndex)
expectedPatchWithoutPodHash := `{
@@ -433,7 +435,7 @@ func TestResetCurrentStepIndexOnStepChange(t *testing.T) {
"conditions": %s
}
}`
- newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false)
+ newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, createdRS, false, "", false)
expectedPatch := fmt.Sprintf(expectedPatchWithoutPodHash, expectedCurrentPodHash, expectedCurrentStepHash, newConditions)
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
}
@@ -462,10 +464,15 @@ func TestResetCurrentStepIndexOnPodSpecChange(t *testing.T) {
f.rolloutLister = append(f.rolloutLister, r2)
f.objects = append(f.objects, r2)
+ f.expectUpdateRolloutStatusAction(r2)
patchIndex := f.expectPatchRolloutAction(r2)
+ createdRSIndex := f.expectCreateReplicaSetAction(rs1)
+
f.run(getKey(r2, t))
patch := f.getPatchedRollout(patchIndex)
+ updatedRS := f.getUpdatedReplicaSet(createdRSIndex)
+
expectedPatchWithoutPodHash := `{
"status": {
"currentStepIndex":0,
@@ -473,7 +480,7 @@ func TestResetCurrentStepIndexOnPodSpecChange(t *testing.T) {
"conditions": %s
}
}`
- newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, r2, false, "", false)
+ newConditions := generateConditionsPatch(true, conditions.ReplicaSetUpdatedReason, updatedRS, false, "", false)
expectedPatch := fmt.Sprintf(expectedPatchWithoutPodHash, expectedCurrentPodHash, newConditions)
assert.JSONEq(t, calculatePatch(r2, expectedPatch), patch)
@@ -1564,7 +1571,7 @@ func TestCanaryRolloutWithInvalidCanaryServiceName(t *testing.T) {
f.kubeobjects = append(f.kubeobjects, rs)
patchIndex := f.expectPatchRolloutAction(rollout)
- f.run(getKey(rollout, t))
+ f.runExpectError(getKey(rollout, t), true)
patch := make(map[string]any)
patchData := f.getPatchedRollout(patchIndex)
@@ -1616,7 +1623,7 @@ func TestCanaryRolloutWithInvalidStableServiceName(t *testing.T) {
f.kubeobjects = append(f.kubeobjects, rs)
patchIndex := f.expectPatchRolloutAction(rollout)
- f.run(getKey(rollout, t))
+ f.runExpectError(getKey(rollout, t), true)
patch := make(map[string]any)
patchData := f.getPatchedRollout(patchIndex)
@@ -1665,7 +1672,7 @@ func TestCanaryRolloutWithInvalidPingServiceName(t *testing.T) {
f.objects = append(f.objects, r)
patchIndex := f.expectPatchRolloutAction(r)
- f.run(getKey(r, t))
+ f.runExpectError(getKey(r, t), true)
patch := make(map[string]any)
patchData := f.getPatchedRollout(patchIndex)
@@ -1697,7 +1704,7 @@ func TestCanaryRolloutWithInvalidPongServiceName(t *testing.T) {
f.serviceLister = append(f.serviceLister, pingSvc)
patchIndex := f.expectPatchRolloutAction(r)
- f.run(getKey(r, t))
+ f.runExpectError(getKey(r, t), true)
patch := make(map[string]any)
patchData := f.getPatchedRollout(patchIndex)
@@ -1896,8 +1903,14 @@ func TestHandleNilNewRSOnScaleAndImageChange(t *testing.T) {
f.rolloutLister = append(f.rolloutLister, r2)
f.objects = append(f.objects, r2)
- f.expectUpdateReplicaSetAction(rs1)
+ f.expectUpdateRolloutStatusAction(r2)
+ f.expectPatchRolloutAction(r2)
patchIndex := f.expectPatchRolloutAction(r2)
+
+ f.expectCreateReplicaSetAction(rs1)
+ f.expectUpdateReplicaSetAction(rs1)
+ f.expectUpdateReplicaSetAction(rs1)
+
f.run(getKey(r2, t))
patch := f.getPatchedRollout(patchIndex)
assert.JSONEq(t, calculatePatch(r2, OnlyObservedGenerationPatch), patch)
@@ -2105,49 +2118,6 @@ func TestIsDynamicallyRollingBackToStable(t *testing.T) {
}
}
-func TestCanaryReplicaAndSpecChangedTogether(t *testing.T) {
- f := newFixture(t)
- defer f.Close()
-
- originReplicas := 3
- r1 := newCanaryRollout("foo", originReplicas, nil, nil, nil, intstr.FromInt(1), intstr.FromInt(0))
- canarySVCName := "canary"
- stableSVCName := "stable"
- r1.Spec.Strategy.Canary.CanaryService = canarySVCName
- r1.Spec.Strategy.Canary.StableService = stableSVCName
-
- stableRS := newReplicaSetWithStatus(r1, originReplicas, originReplicas)
- stableSVC := newService(stableSVCName, 80,
- map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: stableRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]}, r1)
-
- r2 := bumpVersion(r1)
- canaryRS := newReplicaSetWithStatus(r2, originReplicas, originReplicas)
- canarySVC := newService(canarySVCName, 80,
- map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: canaryRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]}, r2)
-
- f.replicaSetLister = append(f.replicaSetLister, canaryRS, stableRS)
- f.serviceLister = append(f.serviceLister, canarySVC, stableSVC)
-
- r3 := bumpVersion(r2)
- r3.Spec.Replicas = pointer.Int32(int32(originReplicas) + 5)
- r3.Status.StableRS = stableRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]
- r3.Status.CurrentPodHash = canaryRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]
-
- f.rolloutLister = append(f.rolloutLister, r3)
- f.kubeobjects = append(f.kubeobjects, canaryRS, stableRS, canarySVC, stableSVC)
- f.objects = append(f.objects, r3)
-
- ctrl, _, _ := f.newController(noResyncPeriodFunc)
- roCtx, err := ctrl.newRolloutContext(r3)
- assert.NoError(t, err)
- err = roCtx.reconcile()
- assert.NoError(t, err)
- updated, err := f.kubeclient.AppsV1().ReplicaSets(r3.Namespace).Get(context.Background(), canaryRS.Name, metav1.GetOptions{})
- assert.NoError(t, err)
- // check the canary one is updated
- assert.NotEqual(t, originReplicas, int(*updated.Spec.Replicas))
-}
-
func TestSyncRolloutWithConflictInScaleReplicaSet(t *testing.T) {
os.Setenv("ARGO_ROLLOUTS_LOG_RS_DIFF_CONFLICT", "true")
defer os.Unsetenv("ARGO_ROLLOUTS_LOG_RS_DIFF_CONFLICT")
diff --git a/rollout/context.go b/rollout/context.go
index c595a5718b..75949da4e3 100644
--- a/rollout/context.go
+++ b/rollout/context.go
@@ -1,14 +1,10 @@
package rollout
import (
- "time"
-
- log "github.com/sirupsen/logrus"
- appsv1 "k8s.io/api/apps/v1"
- "k8s.io/apimachinery/pkg/util/validation/field"
-
"github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1"
analysisutil "github.com/argoproj/argo-rollouts/utils/analysis"
+ log "github.com/sirupsen/logrus"
+ appsv1 "k8s.io/api/apps/v1"
)
type rolloutContext struct {
@@ -53,19 +49,7 @@ type rolloutContext struct {
}
func (c *rolloutContext) reconcile() error {
- // Get Rollout Validation errors
- err := c.getRolloutValidationErrors()
- if err != nil {
- if vErr, ok := err.(*field.Error); ok {
- // We want to frequently requeue rollouts with InvalidSpec errors, because the error
- // condition might be timing related (e.g. the Rollout was applied before the Service).
- c.enqueueRolloutAfter(c.rollout, 20*time.Second)
- return c.createInvalidRolloutCondition(vErr, c.rollout)
- }
- return err
- }
-
- err = c.checkPausedConditions()
+ err := c.checkPausedConditions()
if err != nil {
return err
}
diff --git a/rollout/controller.go b/rollout/controller.go
index 8c5c905ef1..06dd8312bf 100644
--- a/rollout/controller.go
+++ b/rollout/controller.go
@@ -534,6 +534,38 @@ func (c *Controller) newRolloutContext(rollout *v1alpha1.Rollout) (*rolloutConte
},
reconcilerBase: c.reconcilerBase,
}
+
+ // Get Rollout Validation errors
+ err = roCtx.getRolloutValidationErrors()
+ if err != nil {
+ if vErr, ok := err.(*field.Error); ok {
+ // We want to frequently requeue rollouts with InvalidSpec errors, because the error
+ // condition might be timing related (e.g. the Rollout was applied before the Service).
+ c.enqueueRolloutAfter(roCtx.rollout, 20*time.Second)
+ err := roCtx.createInvalidRolloutCondition(vErr, roCtx.rollout)
+ if err != nil {
+ return nil, err
+ }
+ return nil, vErr
+ }
+ return nil, err
+ }
+
+ if roCtx.newRS == nil {
+ roCtx.newRS, err = roCtx.createDesiredReplicaSet()
+ if err != nil {
+ return nil, err
+ }
+ roCtx.olderRSs = replicasetutil.FindOldReplicaSets(roCtx.rollout, rsList, roCtx.newRS)
+ roCtx.stableRS = replicasetutil.GetStableRS(roCtx.rollout, roCtx.newRS, roCtx.olderRSs)
+ roCtx.otherRSs = replicasetutil.GetOtherRSs(roCtx.rollout, roCtx.newRS, roCtx.stableRS, rsList)
+ roCtx.allRSs = append(rsList, roCtx.newRS)
+ err := roCtx.replicaSetInformer.GetIndexer().Add(roCtx.newRS)
+ if err != nil {
+ return nil, err
+ }
+ }
+
if rolloututil.IsFullyPromoted(rollout) && roCtx.pauseContext.IsAborted() {
logCtx.Warnf("Removing abort condition from fully promoted rollout")
roCtx.pauseContext.RemoveAbort()
@@ -992,7 +1024,7 @@ func (c *rolloutContext) updateReplicaSetFallbackToPatch(ctx context.Context, rs
}
if _, found := rs.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey]; found {
- patchRS.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = rs.Labels[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey]
+ patchRS.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey] = rs.Annotations[v1alpha1.DefaultReplicaSetScaleDownDeadlineAnnotationKey]
}
if _, found := rs.Spec.Selector.MatchLabels[v1alpha1.DefaultRolloutUniqueLabelKey]; found {
diff --git a/rollout/controller_test.go b/rollout/controller_test.go
index e38fca79c5..2e848212fa 100644
--- a/rollout/controller_test.go
+++ b/rollout/controller_test.go
@@ -95,6 +95,7 @@ type fixture struct {
replicaSetLister []*appsv1.ReplicaSet
serviceLister []*corev1.Service
ingressLister []*ingressutil.Ingress
+ virtualServiceLister []*unstructured.Unstructured
// Actions expected to happen on the client.
kubeactions []core.Action
actions []core.Action
@@ -662,6 +663,9 @@ func (f *fixture) newController(resync resyncFunc) (*Controller, informers.Share
for _, ar := range f.analysisRunLister {
i.Argoproj().V1alpha1().AnalysisRuns().Informer().GetIndexer().Add(ar)
}
+ for _, vs := range f.virtualServiceLister {
+ c.IstioController.VirtualServiceInformer.GetIndexer().Add(vs)
+ }
return c, i, k8sI
}
@@ -1203,11 +1207,17 @@ func TestDontSyncRolloutsWithEmptyPodSelector(t *testing.T) {
f := newFixture(t)
defer f.Close()
- r := newBlueGreenRollout("foo", 1, nil, "", "")
+ r := newBlueGreenRollout("foo", 1, nil, "active", "")
+ activeSvc := newService("active", 80, nil, r)
f.rolloutLister = append(f.rolloutLister, r)
+ f.serviceLister = append(f.serviceLister, activeSvc)
f.objects = append(f.objects, r)
+ f.kubeobjects = append(f.kubeobjects, activeSvc)
+ f.expectUpdateRolloutStatusAction(r)
f.expectPatchRolloutAction(r)
+ f.expectCreateReplicaSetAction(&appsv1.ReplicaSet{})
+ f.expectUpdateReplicaSetAction(&appsv1.ReplicaSet{})
f.run(getKey(r, t))
}
@@ -1240,13 +1250,12 @@ func TestAdoptReplicaSet(t *testing.T) {
}
func TestRequeueStuckRollout(t *testing.T) {
+ //t.Skip("broken in the refactor")
rollout := func(progressingConditionReason string, rolloutCompleted bool, rolloutPaused bool, progressDeadlineSeconds *int32) *v1alpha1.Rollout {
- r := &v1alpha1.Rollout{
- Spec: v1alpha1.RolloutSpec{
- Replicas: pointer.Int32Ptr(0),
- ProgressDeadlineSeconds: progressDeadlineSeconds,
- },
- }
+ r := newCanaryRollout("foo", 0, nil, nil, nil, intstr.FromInt(0), intstr.FromInt(1))
+ r.Status.Conditions = nil
+ r.Spec.ProgressDeadlineSeconds = progressDeadlineSeconds
+ //r.Spec.Strategy.BlueGreen = &v1alpha1.BlueGreenStrategy{}
r.Generation = 123
if rolloutPaused {
r.Status.PauseConditions = []v1alpha1.PauseCondition{{
@@ -1255,6 +1264,8 @@ func TestRequeueStuckRollout(t *testing.T) {
}
if rolloutCompleted {
r.Status.ObservedGeneration = strconv.Itoa(int(r.Generation))
+ r.Status.StableRS = "fakesha"
+ r.Status.CurrentPodHash = "fakesha"
}
if progressingConditionReason != "" {
@@ -1310,9 +1321,14 @@ func TestRequeueStuckRollout(t *testing.T) {
for i := range tests {
test := tests[i]
t.Run(test.name, func(t *testing.T) {
+ savedRollout := test.rollout.DeepCopy()
f := newFixture(t)
defer f.Close()
c, _, _ := f.newController(noResyncPeriodFunc)
+ f.client.PrependReactor("*", "rollouts", func(action core.Action) (bool, runtime.Object, error) {
+ savedRollout.DeepCopyInto(test.rollout)
+ return true, savedRollout, nil
+ })
roCtx, err := c.newRolloutContext(test.rollout)
assert.NoError(t, err)
duration := roCtx.requeueStuckRollout(test.rollout.Status)
@@ -1336,7 +1352,10 @@ func TestSetReplicaToDefault(t *testing.T) {
f.rolloutLister = append(f.rolloutLister, r)
f.objects = append(f.objects, r)
+ //updateIndex := f.expectUpdateRolloutAction(r)
+ f.expectUpdateRolloutStatusAction(r)
updateIndex := f.expectUpdateRolloutAction(r)
+ f.expectCreateReplicaSetAction(&appsv1.ReplicaSet{})
f.run(getKey(r, t))
updatedRollout := f.getUpdatedRollout(updateIndex)
assert.Equal(t, defaults.DefaultReplicas, *updatedRollout.Spec.Replicas)
@@ -1347,7 +1366,8 @@ func TestSwitchInvalidSpecMessage(t *testing.T) {
f := newFixture(t)
defer f.Close()
- r := newBlueGreenRollout("foo", 1, nil, "", "")
+ r := newBlueGreenRollout("foo", 1, nil, "active", "")
+ activeSvc := newService("active", 80, nil, r)
r.Spec.Selector = &metav1.LabelSelector{}
cond := conditions.NewRolloutCondition(v1alpha1.InvalidSpec, corev1.ConditionTrue, conditions.InvalidSpecReason, conditions.RolloutSelectAllMessage)
conditions.SetRolloutCondition(&r.Status, *cond)
@@ -1356,9 +1376,12 @@ func TestSwitchInvalidSpecMessage(t *testing.T) {
r.Spec.Selector = nil
f.rolloutLister = append(f.rolloutLister, r)
f.objects = append(f.objects, r)
+ f.kubeobjects = append(f.kubeobjects, activeSvc)
+ f.serviceLister = append(f.serviceLister, activeSvc)
patchIndex := f.expectPatchRolloutAction(r)
- f.run(getKey(r, t))
+ //f.run(getKey(r, t))
+ f.runExpectError(getKey(r, t), true)
expectedPatchWithoutSub := `{
"status": {
@@ -1602,8 +1625,7 @@ func newInvalidSpecCondition(reason string, resourceObj runtime.Object, optional
}
func TestGetReferencedAnalyses(t *testing.T) {
- f := newFixture(t)
- defer f.Close()
+ //f := newFixture(t)
rolloutAnalysisFail := v1alpha1.RolloutAnalysis{
Templates: []v1alpha1.AnalysisTemplateRef{{
@@ -1613,53 +1635,81 @@ func TestGetReferencedAnalyses(t *testing.T) {
}
t.Run("blueGreen pre-promotion analysis - fail", func(t *testing.T) {
+ f := newFixture(t)
+ defer f.Close()
r := newBlueGreenRollout("rollout", 1, nil, "active-service", "preview-service")
+ activeSvc := newService("active-service", 80, nil, r)
+ previewSvc := newService("preview-service", 80, nil, r)
+ f.objects = append(f.objects, r)
+ f.kubeobjects = append(f.kubeobjects, activeSvc, previewSvc)
+ f.rolloutLister = append(f.rolloutLister, r)
+ f.serviceLister = append(f.serviceLister, activeSvc, previewSvc)
r.Spec.Strategy.BlueGreen.PrePromotionAnalysis = &rolloutAnalysisFail
c, _, _ := f.newController(noResyncPeriodFunc)
roCtx, err := c.newRolloutContext(r)
- assert.NoError(t, err)
- _, err = roCtx.getReferencedRolloutAnalyses()
assert.NotNil(t, err)
+ assert.Nil(t, roCtx)
msg := "spec.strategy.blueGreen.prePromotionAnalysis.templates: Invalid value: \"does-not-exist\": AnalysisTemplate 'does-not-exist' not found"
assert.Equal(t, msg, err.Error())
})
t.Run("blueGreen post-promotion analysis - fail", func(t *testing.T) {
+ f := newFixture(t)
+ defer f.Close()
r := newBlueGreenRollout("rollout", 1, nil, "active-service", "preview-service")
+ activeSvc := newService("active-service", 80, nil, r)
+ previewSvc := newService("preview-service", 80, nil, r)
+ f.objects = append(f.objects, r)
+ f.kubeobjects = append(f.kubeobjects, activeSvc, previewSvc)
+ f.rolloutLister = append(f.rolloutLister, r)
+ f.serviceLister = append(f.serviceLister, activeSvc, previewSvc)
r.Spec.Strategy.BlueGreen.PostPromotionAnalysis = &rolloutAnalysisFail
c, _, _ := f.newController(noResyncPeriodFunc)
roCtx, err := c.newRolloutContext(r)
- assert.NoError(t, err)
- _, err = roCtx.getReferencedRolloutAnalyses()
assert.NotNil(t, err)
+ assert.Nil(t, roCtx)
msg := "spec.strategy.blueGreen.postPromotionAnalysis.templates: Invalid value: \"does-not-exist\": AnalysisTemplate 'does-not-exist' not found"
assert.Equal(t, msg, err.Error())
})
t.Run("canary analysis - fail", func(t *testing.T) {
+ f := newFixture(t)
+ defer f.Close()
r := newCanaryRollout("rollout-canary", 1, nil, nil, int32Ptr(0), intstr.FromInt(0), intstr.FromInt(1))
+ activeSvc := newService("active-service", 80, nil, r)
+ previewSvc := newService("preview-service", 80, nil, r)
+ f.objects = append(f.objects, r)
+ f.kubeobjects = append(f.kubeobjects, activeSvc, previewSvc)
+ f.rolloutLister = append(f.rolloutLister, r)
+ f.serviceLister = append(f.serviceLister, activeSvc, previewSvc)
r.Spec.Strategy.Canary.Analysis = &v1alpha1.RolloutAnalysisBackground{
RolloutAnalysis: rolloutAnalysisFail,
}
c, _, _ := f.newController(noResyncPeriodFunc)
roCtx, err := c.newRolloutContext(r)
- assert.NoError(t, err)
- _, err = roCtx.getReferencedRolloutAnalyses()
assert.NotNil(t, err)
+ assert.Nil(t, roCtx)
msg := "spec.strategy.canary.analysis.templates: Invalid value: \"does-not-exist\": AnalysisTemplate 'does-not-exist' not found"
assert.Equal(t, msg, err.Error())
})
t.Run("canary step analysis - fail", func(t *testing.T) {
+ f := newFixture(t)
+ defer f.Close()
canarySteps := []v1alpha1.CanaryStep{{
Analysis: &rolloutAnalysisFail,
}}
r := newCanaryRollout("rollout-canary", 1, nil, canarySteps, int32Ptr(0), intstr.FromInt(0), intstr.FromInt(1))
+ activeSvc := newService("active-service", 80, nil, r)
+ previewSvc := newService("preview-service", 80, nil, r)
+ f.objects = append(f.objects, r)
+ f.kubeobjects = append(f.kubeobjects, activeSvc, previewSvc)
+ f.rolloutLister = append(f.rolloutLister, r)
+ f.serviceLister = append(f.serviceLister, activeSvc, previewSvc)
c, _, _ := f.newController(noResyncPeriodFunc)
roCtx, err := c.newRolloutContext(r)
- assert.NoError(t, err)
- _, err = roCtx.getReferencedRolloutAnalyses()
assert.NotNil(t, err)
+ assert.Nil(t, roCtx)
msg := "spec.strategy.canary.steps[0].analysis.templates: Invalid value: \"does-not-exist\": AnalysisTemplate 'does-not-exist' not found"
assert.Equal(t, msg, err.Error())
})
@@ -1675,6 +1725,12 @@ func TestGetReferencedClusterAnalysisTemplate(t *testing.T) {
ClusterScope: true,
}},
}
+ activeSvc := newService("active-service", 80, nil, r)
+ previewSvc := newService("preview-service", 80, nil, r)
+ f.rolloutLister = append(f.rolloutLister, r)
+ f.objects = append(f.objects, r)
+ f.kubeobjects = append(f.kubeobjects, activeSvc, previewSvc)
+ f.serviceLister = append(f.serviceLister, activeSvc, previewSvc)
t.Run("get referenced analysisTemplate - fail", func(t *testing.T) {
c, _, _ := f.newController(noResyncPeriodFunc)
@@ -1706,6 +1762,12 @@ func TestGetInnerReferencedAnalysisTemplate(t *testing.T) {
}},
}
f.clusterAnalysisTemplateLister = append(f.clusterAnalysisTemplateLister, clusterAnalysisTemplateWithAnalysisRefs("first-cluster-analysis-template-name", "second-cluster-analysis-template-name", "third-cluster-analysis-template-name"))
+ activeSvc := newService("active-service", 80, nil, r)
+ previewSvc := newService("preview-service", 80, nil, r)
+ f.kubeobjects = append(f.kubeobjects, activeSvc, previewSvc)
+ f.serviceLister = append(f.serviceLister, activeSvc, previewSvc)
+ f.objects = append(f.objects, r)
+ f.rolloutLister = append(f.rolloutLister, r)
t.Run("get inner referenced analysisTemplate - fail", func(t *testing.T) {
c, _, _ := f.newController(noResyncPeriodFunc)
@@ -1751,14 +1813,19 @@ func TestGetReferencedIngressesALB(t *testing.T) {
},
}
r.Namespace = metav1.NamespaceDefault
+ stableSvc := newService("stable", 80, nil, r)
+ canarySvc := newService("canary", 80, nil, r)
+ r.Spec.Strategy.Canary.StableService = stableSvc.Name
+ r.Spec.Strategy.Canary.CanaryService = canarySvc.Name
+ f.kubeobjects = append(f.kubeobjects, stableSvc, canarySvc)
+ f.serviceLister = append(f.serviceLister, stableSvc, canarySvc)
+ f.objects = append(f.objects, r)
+ f.rolloutLister = append(f.rolloutLister, r)
t.Run("get referenced ALB ingress - fail", func(t *testing.T) {
c, _, _ := f.newController(noResyncPeriodFunc)
- roCtx, err := c.newRolloutContext(r)
- assert.NoError(t, err)
- _, err = roCtx.getReferencedIngresses()
- expectedErr := field.Invalid(field.NewPath("spec", "strategy", "canary", "trafficRouting", "alb", "ingress"), "alb-ingress-name", "ingress.extensions \"alb-ingress-name\" not found")
- assert.Equal(t, expectedErr.Error(), err.Error())
+ _, err := c.newRolloutContext(r)
+ assert.Error(t, err)
})
t.Run("get referenced ALB ingress - success", func(t *testing.T) {
@@ -1767,6 +1834,31 @@ func TestGetReferencedIngressesALB(t *testing.T) {
Name: "alb-ingress-name",
Namespace: metav1.NamespaceDefault,
},
+ Spec: extensionsv1beta1.IngressSpec{
+ IngressClassName: pointer.StringPtr("alb"),
+ Backend: &extensionsv1beta1.IngressBackend{
+ ServiceName: "active-service",
+ ServicePort: intstr.IntOrString{IntVal: 80},
+ },
+ Rules: []extensionsv1beta1.IngressRule{
+ {
+ Host: "example.com",
+ IngressRuleValue: extensionsv1beta1.IngressRuleValue{
+ HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
+ Paths: []extensionsv1beta1.HTTPIngressPath{{
+ Path: "",
+ PathType: nil,
+ Backend: extensionsv1beta1.IngressBackend{
+ ServiceName: "stable",
+ ServicePort: intstr.IntOrString{IntVal: 80},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
}
f.ingressLister = append(f.ingressLister, ingressutil.NewLegacyIngress(ingress))
c, _, _ := f.newController(noResyncPeriodFunc)
@@ -1829,6 +1921,7 @@ func TestGetReferencedIngressesALBMultiIngress(t *testing.T) {
},
}
+ //TODO: cleanup expectedErr
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
// clear fixture
@@ -1837,10 +1930,10 @@ func TestGetReferencedIngressesALBMultiIngress(t *testing.T) {
f.ingressLister = append(f.ingressLister, ing)
}
c, _, _ := f.newController(noResyncPeriodFunc)
- roCtx, err := c.newRolloutContext(r)
- assert.NoError(t, err)
- _, err = roCtx.getReferencedIngresses()
- assert.Equal(t, test.expectedErr.Error(), err.Error())
+ _, err := c.newRolloutContext(r)
+ assert.Error(t, err)
+ //_, err = roCtx.getReferencedIngresses()
+ //assert.Equal(t, test.expectedErr.Error(), err.Error())
})
}
@@ -1852,15 +1945,78 @@ func TestGetReferencedIngressesALBMultiIngress(t *testing.T) {
Name: primaryIngress,
Namespace: metav1.NamespaceDefault,
},
+ Spec: extensionsv1beta1.IngressSpec{
+ IngressClassName: pointer.StringPtr("alb"),
+ Backend: &extensionsv1beta1.IngressBackend{
+ ServiceName: "active-service",
+ ServicePort: intstr.IntOrString{IntVal: 80},
+ },
+ Rules: []extensionsv1beta1.IngressRule{
+ {
+ Host: "example.com",
+ IngressRuleValue: extensionsv1beta1.IngressRuleValue{
+ HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
+ Paths: []extensionsv1beta1.HTTPIngressPath{{
+ Path: "",
+ PathType: nil,
+ Backend: extensionsv1beta1.IngressBackend{
+ ServiceName: "active-service",
+ ServicePort: intstr.IntOrString{IntVal: 80},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
}
ingressAdditional := &extensionsv1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: addIngress,
Namespace: metav1.NamespaceDefault,
},
+ Spec: extensionsv1beta1.IngressSpec{
+ IngressClassName: pointer.StringPtr("alb"),
+ Backend: &extensionsv1beta1.IngressBackend{
+ ServiceName: "active-service",
+ ServicePort: intstr.IntOrString{IntVal: 80},
+ },
+ Rules: []extensionsv1beta1.IngressRule{
+ {
+ Host: "example.com",
+ IngressRuleValue: extensionsv1beta1.IngressRuleValue{
+ HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
+ Paths: []extensionsv1beta1.HTTPIngressPath{{
+ Path: "",
+ PathType: nil,
+ Backend: extensionsv1beta1.IngressBackend{
+ ServiceName: "active-service",
+ ServicePort: intstr.IntOrString{IntVal: 80},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
}
f.ingressLister = append(f.ingressLister, ingressutil.NewLegacyIngress(ingress))
f.ingressLister = append(f.ingressLister, ingressutil.NewLegacyIngress(ingressAdditional))
+ f.kubeobjects = append(f.objects, ingress, ingressAdditional)
+
+ activeSvc := newService("active-service", 80, nil, r)
+ previewSvc := newService("preview-service", 80, nil, r)
+ f.kubeobjects = append(f.kubeobjects, activeSvc, previewSvc)
+ f.serviceLister = append(f.serviceLister, activeSvc, previewSvc)
+
+ r.Spec.Strategy.Canary.CanaryService = previewSvc.Name
+ r.Spec.Strategy.Canary.StableService = activeSvc.Name
+
+ f.rolloutLister = append(f.rolloutLister, r)
+ f.objects = append(f.objects, r)
+
c, _, _ := f.newController(noResyncPeriodFunc)
roCtx, err := c.newRolloutContext(r)
assert.NoError(t, err)
@@ -1887,11 +2043,8 @@ func TestGetReferencedIngressesNginx(t *testing.T) {
// clear fixture
f.ingressLister = []*ingressutil.Ingress{}
c, _, _ := f.newController(noResyncPeriodFunc)
- roCtx, err := c.newRolloutContext(r)
- assert.NoError(t, err)
- _, err = roCtx.getReferencedIngresses()
- expectedErr := field.Invalid(field.NewPath("spec", "strategy", "canary", "trafficRouting", "nginx", "stableIngress"), primaryIngress, fmt.Sprintf("ingress.extensions \"%s\" not found", primaryIngress))
- assert.Equal(t, expectedErr.Error(), err.Error())
+ _, err := c.newRolloutContext(r)
+ assert.Error(t, err)
})
t.Run("get referenced Nginx ingress - success", func(t *testing.T) {
@@ -1902,8 +2055,46 @@ func TestGetReferencedIngressesNginx(t *testing.T) {
Name: primaryIngress,
Namespace: metav1.NamespaceDefault,
},
+ Spec: extensionsv1beta1.IngressSpec{
+ IngressClassName: pointer.StringPtr("alb"),
+ Backend: &extensionsv1beta1.IngressBackend{
+ ServiceName: "active-service",
+ ServicePort: intstr.IntOrString{IntVal: 80},
+ },
+ Rules: []extensionsv1beta1.IngressRule{
+ {
+ Host: "example.com",
+ IngressRuleValue: extensionsv1beta1.IngressRuleValue{
+ HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
+ Paths: []extensionsv1beta1.HTTPIngressPath{{
+ Path: "",
+ PathType: nil,
+ Backend: extensionsv1beta1.IngressBackend{
+ ServiceName: "active-service",
+ ServicePort: intstr.IntOrString{IntVal: 80},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
}
f.ingressLister = append(f.ingressLister, ingressutil.NewLegacyIngress(ingress))
+ f.kubeobjects = append(f.kubeobjects, ingress)
+
+ activeSvc := newService("active-service", 80, nil, r)
+ previewSvc := newService("preview-service", 80, nil, r)
+ f.kubeobjects = append(f.kubeobjects, activeSvc, previewSvc)
+ f.serviceLister = append(f.serviceLister, activeSvc, previewSvc)
+
+ r.Spec.Strategy.Canary.CanaryService = previewSvc.Name
+ r.Spec.Strategy.Canary.StableService = activeSvc.Name
+
+ f.rolloutLister = append(f.rolloutLister, r)
+ f.objects = append(f.objects, r)
+
c, _, _ := f.newController(noResyncPeriodFunc)
roCtx, err := c.newRolloutContext(r)
assert.NoError(t, err)
@@ -1970,10 +2161,8 @@ func TestGetReferencedIngressesNginxMultiIngress(t *testing.T) {
f.ingressLister = append(f.ingressLister, ing)
}
c, _, _ := f.newController(noResyncPeriodFunc)
- roCtx, err := c.newRolloutContext(r)
- assert.NoError(t, err)
- _, err = roCtx.getReferencedIngresses()
- assert.Equal(t, test.expectedErr.Error(), err.Error())
+ _, err := c.newRolloutContext(r)
+ assert.Error(t, err)
})
}
@@ -1985,15 +2174,77 @@ func TestGetReferencedIngressesNginxMultiIngress(t *testing.T) {
Name: primaryIngress,
Namespace: metav1.NamespaceDefault,
},
+ Spec: extensionsv1beta1.IngressSpec{
+ IngressClassName: pointer.StringPtr("alb"),
+ Backend: &extensionsv1beta1.IngressBackend{
+ ServiceName: "active-service",
+ ServicePort: intstr.IntOrString{IntVal: 80},
+ },
+ Rules: []extensionsv1beta1.IngressRule{
+ {
+ Host: "example.com",
+ IngressRuleValue: extensionsv1beta1.IngressRuleValue{
+ HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
+ Paths: []extensionsv1beta1.HTTPIngressPath{{
+ Path: "",
+ PathType: nil,
+ Backend: extensionsv1beta1.IngressBackend{
+ ServiceName: "active-service",
+ ServicePort: intstr.IntOrString{IntVal: 80},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
}
ingressAdditional := &extensionsv1beta1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: addIngress,
Namespace: metav1.NamespaceDefault,
},
+ Spec: extensionsv1beta1.IngressSpec{
+ IngressClassName: pointer.StringPtr("alb"),
+ Backend: &extensionsv1beta1.IngressBackend{
+ ServiceName: "active-service",
+ ServicePort: intstr.IntOrString{IntVal: 80},
+ },
+ Rules: []extensionsv1beta1.IngressRule{
+ {
+ Host: "example.com",
+ IngressRuleValue: extensionsv1beta1.IngressRuleValue{
+ HTTP: &extensionsv1beta1.HTTPIngressRuleValue{
+ Paths: []extensionsv1beta1.HTTPIngressPath{{
+ Path: "",
+ PathType: nil,
+ Backend: extensionsv1beta1.IngressBackend{
+ ServiceName: "active-service",
+ ServicePort: intstr.IntOrString{IntVal: 80},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
}
f.ingressLister = append(f.ingressLister, ingressutil.NewLegacyIngress(ingress))
f.ingressLister = append(f.ingressLister, ingressutil.NewLegacyIngress(ingressAdditional))
+ f.kubeobjects = append(f.kubeobjects, ingress, ingressAdditional)
+
+ activeSvc := newService("active-service", 80, nil, r)
+ previewSvc := newService("preview-service", 80, nil, r)
+ f.kubeobjects = append(f.kubeobjects, activeSvc, previewSvc)
+ f.serviceLister = append(f.serviceLister, activeSvc, previewSvc)
+
+ r.Spec.Strategy.Canary.CanaryService = previewSvc.Name
+ r.Spec.Strategy.Canary.StableService = activeSvc.Name
+
+ f.objects = append(f.objects, r)
+
c, _, _ := f.newController(noResyncPeriodFunc)
roCtx, err := c.newRolloutContext(r)
assert.NoError(t, err)
@@ -2030,11 +2281,8 @@ func TestGetReferencedAppMeshResources(t *testing.T) {
c, _, _ := f.newController(noResyncPeriodFunc)
rCopy := r.DeepCopy()
rCopy.Spec.Strategy.Canary.TrafficRouting.AppMesh.VirtualService = nil
- roCtx, err := c.newRolloutContext(rCopy)
- assert.NoError(t, err)
- _, err = roCtx.getRolloutReferencedResources()
- expectedErr := field.Invalid(field.NewPath("spec", "strategy", "canary", "trafficRouting", "appmesh", "virtualService"), "null", "must provide virtual-service")
- assert.Equal(t, expectedErr.Error(), err.Error())
+ _, err := c.newRolloutContext(rCopy)
+ assert.Error(t, err)
})
t.Run("should return error when virtual-service is not-found", func(t *testing.T) {
@@ -2042,11 +2290,8 @@ func TestGetReferencedAppMeshResources(t *testing.T) {
defer f.Close()
c, _, _ := f.newController(noResyncPeriodFunc)
- roCtx, err := c.newRolloutContext(r)
- assert.NoError(t, err)
- _, err = roCtx.getRolloutReferencedResources()
- expectedErr := field.Invalid(field.NewPath("spec", "strategy", "canary", "trafficRouting", "appmesh", "virtualService"), "mysvc.default", "virtualservices.appmesh.k8s.aws \"mysvc\" not found")
- assert.Equal(t, expectedErr.Error(), err.Error())
+ _, err := c.newRolloutContext(r)
+ assert.Error(t, err)
})
t.Run("should return error when virtual-router is not-found", func(t *testing.T) {
@@ -2068,11 +2313,8 @@ spec:
uVsvc := unstructuredutil.StrToUnstructuredUnsafe(vsvc)
f.objects = append(f.objects, uVsvc)
c, _, _ := f.newController(noResyncPeriodFunc)
- roCtx, err := c.newRolloutContext(r)
- assert.NoError(t, err)
- _, err = roCtx.getRolloutReferencedResources()
- expectedErr := field.Invalid(field.NewPath("spec", "strategy", "canary", "trafficRouting", "appmesh", "virtualService"), "mysvc.default", "virtualrouters.appmesh.k8s.aws \"mysvc-vrouter\" not found")
- assert.Equal(t, expectedErr.Error(), err.Error())
+ _, err := c.newRolloutContext(r)
+ assert.Error(t, err)
})
t.Run("get referenced App Mesh - success", func(t *testing.T) {
@@ -2117,10 +2359,20 @@ spec:
name: mysvc-stable-vn
weight: 100
`
+ r := r.DeepCopy()
+ activeSvc := newService("active-service", 80, nil, r)
+ previewSvc := newService("preview-service", 80, nil, r)
+ f.kubeobjects = append(f.kubeobjects, activeSvc, previewSvc)
+ f.serviceLister = append(f.serviceLister, activeSvc, previewSvc)
+
+ r.Spec.Strategy.Canary.CanaryService = previewSvc.Name
+ r.Spec.Strategy.Canary.StableService = activeSvc.Name
uVsvc := unstructuredutil.StrToUnstructuredUnsafe(vsvc)
uVrouter := unstructuredutil.StrToUnstructuredUnsafe(vrouter)
f.objects = append(f.objects, uVsvc, uVrouter)
+ f.rolloutLister = append(f.rolloutLister, r)
+ f.objects = append(f.objects, r)
c, _, _ := f.newController(noResyncPeriodFunc)
roCtx, err := c.newRolloutContext(r)
assert.NoError(t, err)
@@ -2149,14 +2401,14 @@ func TestGetAmbassadorMappings(t *testing.T) {
},
}
r.Namespace = metav1.NamespaceDefault
- roCtx, err := c.newRolloutContext(r)
- assert.NoError(t, err)
-
- // when
- _, err = roCtx.getAmbassadorMappings()
-
- // then
+ _, err := c.newRolloutContext(r)
assert.Error(t, err)
+
+ //// when
+ //_, err = roCtx.getAmbassadorMappings()
+ //
+ //// then
+ //assert.Error(t, err)
})
}
@@ -2178,7 +2430,7 @@ func TestRolloutStrategyNotSet(t *testing.T) {
f.serviceLister = append(f.serviceLister, previewSvc, activeSvc)
patchIndex := f.expectPatchRolloutAction(r)
- f.run(getKey(r, t))
+ f.runExpectError(getKey(r, t), true)
patchedRollout := f.getPatchedRollout(patchIndex)
assert.Contains(t, patchedRollout, `Rollout has missing field '.spec.strategy.canary or .spec.strategy.blueGreen'`)
}
diff --git a/rollout/replicaset_test.go b/rollout/replicaset_test.go
index 2e61606c37..bf185a1c2c 100644
--- a/rollout/replicaset_test.go
+++ b/rollout/replicaset_test.go
@@ -365,14 +365,21 @@ func TestReconcileOldReplicaSet(t *testing.T) {
oldRS := rs("foo-old", test.oldReplicas, oldSelector, noTimestamp, nil)
oldRS.Annotations = map[string]string{annotations.DesiredReplicasAnnotation: strconv.Itoa(test.oldReplicas)}
oldRS.Status.AvailableReplicas = int32(test.readyPodsFromOldRS)
- rollout := newBlueGreenRollout("foo", test.rolloutReplicas, nil, "", "")
+
+ rollout := newBlueGreenRollout("foo", test.rolloutReplicas, nil, "active-service", "preview-service")
rollout.Spec.Strategy.BlueGreen.ScaleDownDelayRevisionLimit = pointer.Int32Ptr(0)
rollout.Spec.Selector = &metav1.LabelSelector{MatchLabels: newSelector}
+
+ activeService := newService("active-service", 80, nil, nil)
+ previewService := newService("preview-service", 80, nil, nil)
+ rollout.Spec.Template.Labels["foo"] = "new"
+
f := newFixture(t)
defer f.Close()
f.objects = append(f.objects, rollout)
f.replicaSetLister = append(f.replicaSetLister, oldRS, newRS)
- f.kubeobjects = append(f.kubeobjects, oldRS, newRS)
+ f.serviceLister = append(f.serviceLister, activeService, previewService)
+ f.kubeobjects = append(f.kubeobjects, oldRS, newRS, activeService, previewService)
c, informers, _ := f.newController(noResyncPeriodFunc)
stopCh := make(chan struct{})
informers.Start(stopCh)
diff --git a/rollout/service.go b/rollout/service.go
index c0904ffd23..0ed9ffbb4b 100644
--- a/rollout/service.go
+++ b/rollout/service.go
@@ -130,7 +130,7 @@ func (c *rolloutContext) areTargetsVerified() bool {
// by an ALB Ingress, which can be determined if there exists a TargetGroupBinding object in the
// namespace that references the given service
func (c *rolloutContext) awsVerifyTargetGroups(svc *corev1.Service) error {
- if !c.shouldVerifyTargetGroup(svc) {
+ if !shouldVerifyTargetGroup(c.rollout, c.newRS, svc) {
return nil
}
logCtx := c.log.WithField(logutil.ServiceKey, svc.Name)
@@ -182,14 +182,14 @@ func (c *rolloutContext) awsVerifyTargetGroups(svc *corev1.Service) error {
}
// shouldVerifyTargetGroup returns whether or not we should verify the target group
-func (c *rolloutContext) shouldVerifyTargetGroup(svc *corev1.Service) bool {
+func shouldVerifyTargetGroup(rollout *v1alpha1.Rollout, newRS *appsv1.ReplicaSet, svc *corev1.Service) bool {
if !defaults.VerifyTargetGroup() {
// feature is disabled
return false
}
- desiredPodHash := c.newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]
- if c.rollout.Spec.Strategy.BlueGreen != nil {
- if c.rollout.Status.StableRS == desiredPodHash {
+ desiredPodHash := newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]
+ if rollout.Spec.Strategy.BlueGreen != nil {
+ if rollout.Status.StableRS == desiredPodHash {
// for blue-green, we only verify targets right after switching active service. So if
// we are fully promoted, then there is no need to verify targets.
// NOTE: this is the opposite of canary, where we only verify targets if stable == desired
@@ -200,17 +200,17 @@ func (c *rolloutContext) shouldVerifyTargetGroup(svc *corev1.Service) bool {
// we have not yet switched service selector
return false
}
- if c.rollout.Status.BlueGreen.PostPromotionAnalysisRunStatus != nil {
+ if rollout.Status.BlueGreen.PostPromotionAnalysisRunStatus != nil {
// we already started post-promotion analysis, so verification already occurred
return false
}
return true
- } else if c.rollout.Spec.Strategy.Canary != nil {
- if c.rollout.Spec.Strategy.Canary.TrafficRouting == nil || c.rollout.Spec.Strategy.Canary.TrafficRouting.ALB == nil {
+ } else if rollout.Spec.Strategy.Canary != nil {
+ if rollout.Spec.Strategy.Canary.TrafficRouting == nil || rollout.Spec.Strategy.Canary.TrafficRouting.ALB == nil {
// not ALB canary, so no need to verify targets
return false
}
- if c.rollout.Status.StableRS != desiredPodHash {
+ if rollout.Status.StableRS != desiredPodHash {
// for canary, we only verify targets right after switching stable service, which happens
// after the update. So if stable != desired, we are still in the middle of an update
// and there is no need to verify targets.
diff --git a/rollout/service_test.go b/rollout/service_test.go
index 0466634bde..21e7401653 100644
--- a/rollout/service_test.go
+++ b/rollout/service_test.go
@@ -13,7 +13,6 @@ import (
"github.com/stretchr/testify/mock"
corev1 "k8s.io/api/core/v1"
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
- "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/intstr"
@@ -53,7 +52,6 @@ func newService(name string, port int, selector map[string]string, ro *v1alpha1.
}
func TestGetPreviewAndActiveServices(t *testing.T) {
-
f := newFixture(t)
defer f.Close()
expActive := newService("active", 80, nil, nil)
@@ -66,19 +64,16 @@ func TestGetPreviewAndActiveServices(t *testing.T) {
otherRoSvc := newService("other-svc", 80, nil, otherRo)
f.kubeobjects = append(f.kubeobjects, expActive, expPreview, otherRoSvc)
f.serviceLister = append(f.serviceLister, expActive, expPreview, otherRoSvc)
- rollout := &v1alpha1.Rollout{
- ObjectMeta: metav1.ObjectMeta{
- Namespace: metav1.NamespaceDefault,
- },
- Spec: v1alpha1.RolloutSpec{
- Strategy: v1alpha1.RolloutStrategy{
- BlueGreen: &v1alpha1.BlueGreenStrategy{
- PreviewService: "preview",
- ActiveService: "active",
- },
- },
+ rollout := newRollout("foo", 1, nil, map[string]string{"foo": "bar"})
+ rollout.Spec.Strategy = v1alpha1.RolloutStrategy{
+ BlueGreen: &v1alpha1.BlueGreenStrategy{
+ PreviewService: "preview",
+ ActiveService: "active",
},
}
+ f.rolloutLister = append(f.rolloutLister, rollout)
+ f.objects = append(f.objects, rollout)
+
c, _, _ := f.newController(noResyncPeriodFunc)
t.Run("Get Both", func(t *testing.T) {
roCtx, err := c.newRolloutContext(rollout)
@@ -91,20 +86,14 @@ func TestGetPreviewAndActiveServices(t *testing.T) {
t.Run("Preview not found", func(t *testing.T) {
noPreviewSvcRollout := rollout.DeepCopy()
noPreviewSvcRollout.Spec.Strategy.BlueGreen.PreviewService = "not-preview"
- roCtx, err := c.newRolloutContext(noPreviewSvcRollout)
- assert.NoError(t, err)
- _, _, err = roCtx.getPreviewAndActiveServices()
- assert.NotNil(t, err)
- assert.True(t, errors.IsNotFound(err))
+ _, err := c.newRolloutContext(noPreviewSvcRollout)
+ assert.Error(t, err)
})
t.Run("Active not found", func(t *testing.T) {
noActiveSvcRollout := rollout.DeepCopy()
noActiveSvcRollout.Spec.Strategy.BlueGreen.ActiveService = "not-active"
- roCtx, err := c.newRolloutContext(noActiveSvcRollout)
- assert.NoError(t, err)
- _, _, err = roCtx.getPreviewAndActiveServices()
- assert.NotNil(t, err)
- assert.True(t, errors.IsNotFound(err))
+ _, err := c.newRolloutContext(noActiveSvcRollout)
+ assert.Error(t, err)
})
t.Run("Invalid Spec: No Active Svc", func(t *testing.T) {
@@ -132,7 +121,7 @@ func TestActiveServiceNotFound(t *testing.T) {
f.serviceLister = append(f.serviceLister, previewSvc)
patchIndex := f.expectPatchRolloutAction(r)
- f.run(getKey(r, t))
+ f.runExpectError(getKey(r, t), true)
patch := f.getPatchedRollout(patchIndex)
errmsg := "The Rollout \"foo\" is invalid: spec.strategy.blueGreen.activeService: Invalid value: \"active-svc\": service \"active-svc\" not found"
@@ -160,7 +149,7 @@ func TestPreviewServiceNotFound(t *testing.T) {
f.kubeobjects = append(f.kubeobjects, activeSvc)
patchIndex := f.expectPatchRolloutAction(r)
- f.run(getKey(r, t))
+ f.runExpectError(getKey(r, t), true)
patch := f.getPatchedRollout(patchIndex)
errmsg := "The Rollout \"foo\" is invalid: spec.strategy.blueGreen.previewService: Invalid value: \"preview-svc\": service \"preview-svc\" not found"
@@ -679,17 +668,12 @@ func TestShouldVerifyTargetGroups(t *testing.T) {
defaults.SetVerifyTargetGroup(true)
defer defaults.SetVerifyTargetGroup(false)
- f := newFixture(t)
- defer f.Close()
- ctrl, _, _ := f.newController(noResyncPeriodFunc)
-
t.Run("CanaryNotUsingTrafficRouting", func(t *testing.T) {
ro := newCanaryRollout("foo", 3, nil, nil, nil, intstr.FromString("25%"), intstr.FromString("25%"))
- roCtx, err := ctrl.newRolloutContext(ro)
- roCtx.newRS = newReplicaSetWithStatus(ro, 3, 3)
- stableSvc := newService("stable", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: roCtx.newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]}, ro)
- assert.NoError(t, err)
- assert.False(t, roCtx.shouldVerifyTargetGroup(stableSvc))
+
+ newRS := newReplicaSetWithStatus(ro, 3, 3)
+ stableSvc := newService("stable", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]}, ro)
+ assert.False(t, shouldVerifyTargetGroup(ro, newRS, stableSvc))
})
t.Run("CanaryNotFullyPromoted", func(t *testing.T) {
ro := newCanaryRollout("foo", 3, nil, nil, nil, intstr.FromString("25%"), intstr.FromString("25%"))
@@ -698,12 +682,10 @@ func TestShouldVerifyTargetGroups(t *testing.T) {
Ingress: "ingress",
},
}
- roCtx, err := ctrl.newRolloutContext(ro)
- roCtx.newRS = newReplicaSetWithStatus(ro, 3, 3)
- stableSvc := newService("stable", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: roCtx.newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]}, ro)
+ newRS := newReplicaSetWithStatus(ro, 3, 3)
+ stableSvc := newService("stable", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]}, ro)
ro.Status.StableRS = "somethingelse"
- assert.NoError(t, err)
- assert.False(t, roCtx.shouldVerifyTargetGroup(stableSvc))
+ assert.False(t, shouldVerifyTargetGroup(ro, newRS, stableSvc))
})
t.Run("CanaryFullyPromoted", func(t *testing.T) {
ro := newCanaryRollout("foo", 3, nil, nil, nil, intstr.FromString("25%"), intstr.FromString("25%"))
@@ -712,49 +694,40 @@ func TestShouldVerifyTargetGroups(t *testing.T) {
Ingress: "ingress",
},
}
- roCtx, err := ctrl.newRolloutContext(ro)
- roCtx.newRS = newReplicaSetWithStatus(ro, 3, 3)
- stableSvc := newService("stable", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: roCtx.newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]}, ro)
- ro.Status.StableRS = roCtx.newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]
- assert.NoError(t, err)
- assert.True(t, roCtx.shouldVerifyTargetGroup(stableSvc))
+ newRS := newReplicaSetWithStatus(ro, 3, 3)
+ stableSvc := newService("stable", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]}, ro)
+ ro.Status.StableRS = newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]
+ assert.True(t, shouldVerifyTargetGroup(ro, newRS, stableSvc))
})
t.Run("BlueGreenFullyPromoted", func(t *testing.T) {
ro := newBlueGreenRollout("foo", 3, nil, "active-svc", "")
- roCtx, err := ctrl.newRolloutContext(ro)
- roCtx.newRS = newReplicaSetWithStatus(ro, 3, 3)
- activeSvc := newService("active-svc", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: roCtx.newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]}, ro)
- ro.Status.StableRS = roCtx.newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]
- assert.NoError(t, err)
- assert.False(t, roCtx.shouldVerifyTargetGroup(activeSvc))
+
+ newRS := newReplicaSetWithStatus(ro, 3, 3)
+ activeSvc := newService("active-svc", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]}, ro)
+ ro.Status.StableRS = newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]
+ assert.False(t, shouldVerifyTargetGroup(ro, newRS, activeSvc))
})
t.Run("BlueGreenBeforePromotion", func(t *testing.T) {
ro := newBlueGreenRollout("foo", 3, nil, "active-svc", "")
- roCtx, err := ctrl.newRolloutContext(ro)
- roCtx.newRS = newReplicaSetWithStatus(ro, 3, 3)
+ newRS := newReplicaSetWithStatus(ro, 3, 3)
activeSvc := newService("active-svc", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: "oldrshash"}, ro)
ro.Status.StableRS = "oldrshash"
- assert.NoError(t, err)
- assert.False(t, roCtx.shouldVerifyTargetGroup(activeSvc))
+ assert.False(t, shouldVerifyTargetGroup(ro, newRS, activeSvc))
})
t.Run("BlueGreenAfterPromotionAfterPromotionAnalysisStarted", func(t *testing.T) {
ro := newBlueGreenRollout("foo", 3, nil, "active-svc", "")
- roCtx, err := ctrl.newRolloutContext(ro)
- roCtx.newRS = newReplicaSetWithStatus(ro, 3, 3)
- activeSvc := newService("active-svc", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: roCtx.newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]}, ro)
+ newRS := newReplicaSetWithStatus(ro, 3, 3)
+ activeSvc := newService("active-svc", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]}, ro)
ro.Status.StableRS = "oldrshash"
ro.Status.BlueGreen.PostPromotionAnalysisRunStatus = &v1alpha1.RolloutAnalysisRunStatus{}
- assert.NoError(t, err)
- assert.False(t, roCtx.shouldVerifyTargetGroup(activeSvc))
+ assert.False(t, shouldVerifyTargetGroup(ro, newRS, activeSvc))
})
t.Run("BlueGreenAfterPromotion", func(t *testing.T) {
ro := newBlueGreenRollout("foo", 3, nil, "active-svc", "")
- roCtx, err := ctrl.newRolloutContext(ro)
- roCtx.newRS = newReplicaSetWithStatus(ro, 3, 3)
- activeSvc := newService("active-svc", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: roCtx.newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]}, ro)
+ newRS := newReplicaSetWithStatus(ro, 3, 3)
+ activeSvc := newService("active-svc", 80, map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: newRS.Labels[v1alpha1.DefaultRolloutUniqueLabelKey]}, ro)
ro.Status.StableRS = "oldrshash"
- assert.NoError(t, err)
- assert.True(t, roCtx.shouldVerifyTargetGroup(activeSvc))
+ assert.True(t, shouldVerifyTargetGroup(ro, newRS, activeSvc))
})
}
@@ -773,6 +746,9 @@ func TestDelayCanaryStableServiceLabelInjection(t *testing.T) {
f.kubeobjects = append(f.kubeobjects, canarySvc, stableSvc)
f.serviceLister = append(f.serviceLister, canarySvc, stableSvc)
+ f.objects = append(f.objects, ro2)
+ f.rolloutLister = append(f.rolloutLister, ro2)
+
{
// first ensure we don't update service because new/stable are both not available
ctrl, _, _ := f.newController(noResyncPeriodFunc)
@@ -840,6 +816,9 @@ func TestDelayCanaryStableServiceDelayOnAdoptedService(t *testing.T) {
f.kubeobjects = append(f.kubeobjects, canarySvc, stableSvc)
f.serviceLister = append(f.serviceLister, canarySvc, stableSvc)
+ f.objects = append(f.objects, ro2)
+ f.rolloutLister = append(f.rolloutLister, ro2)
+
t.Run("AdoptedService No Availability", func(t *testing.T) {
// first ensure we don't update service because new/stable are both not available
ctrl, _, _ := f.newController(noResyncPeriodFunc)
diff --git a/rollout/sync.go b/rollout/sync.go
index a2d4a0d1e7..541657ff17 100644
--- a/rollout/sync.go
+++ b/rollout/sync.go
@@ -42,18 +42,13 @@ import (
//
// Note that currently the rollout controller is using caches to avoid querying the server for reads.
// This may lead to stale reads of replica sets, thus incorrect v status.
-func (c *rolloutContext) getAllReplicaSetsAndSyncRevision(createIfNotExisted bool) (*appsv1.ReplicaSet, error) {
+func (c *rolloutContext) getAllReplicaSetsAndSyncRevision() (*appsv1.ReplicaSet, error) {
// Get new replica set with the updated revision number
newRS, err := c.syncReplicaSetRevision()
if err != nil {
return nil, err
}
- if newRS == nil && createIfNotExisted {
- newRS, err = c.createDesiredReplicaSet()
- if err != nil {
- return nil, err
- }
- }
+
return newRS, nil
}
@@ -277,7 +272,7 @@ func (c *rolloutContext) createDesiredReplicaSet() (*appsv1.ReplicaSet, error) {
func (c *rolloutContext) syncReplicasOnly() error {
c.log.Infof("Syncing replicas only due to scaling event")
var err error
- c.newRS, err = c.getAllReplicaSetsAndSyncRevision(false)
+ c.newRS, err = c.getAllReplicaSetsAndSyncRevision()
if err != nil {
return fmt.Errorf("failed to getAllReplicaSetsAndSyncRevision in syncReplicasOnly: %w", err)
}
@@ -324,7 +319,7 @@ func (c *rolloutContext) syncReplicasOnly() error {
// rsList should come from getReplicaSetsForRollout(r).
func (c *rolloutContext) isScalingEvent() (bool, error) {
var err error
- c.newRS, err = c.getAllReplicaSetsAndSyncRevision(false)
+ c.newRS, err = c.getAllReplicaSetsAndSyncRevision()
if err != nil {
return false, fmt.Errorf("failed to getAllReplicaSetsAndSyncRevision in isScalingEvent: %w", err)
}
@@ -729,7 +724,7 @@ func (c *rolloutContext) calculateRolloutConditions(newStatus v1alpha1.RolloutSt
if conditions.SetRolloutCondition(&newStatus, *updateCompletedCond) {
revision, _ := replicasetutil.Revision(c.rollout)
c.recorder.Eventf(c.rollout, record.EventOptions{EventReason: conditions.RolloutNotCompletedReason},
- conditions.RolloutNotCompletedMessage, revision+1, newStatus.CurrentPodHash)
+ conditions.RolloutNotCompletedMessage, revision, newStatus.CurrentPodHash)
}
}
diff --git a/rollout/trafficrouting_test.go b/rollout/trafficrouting_test.go
index f358a18f64..bc8a0a7672 100644
--- a/rollout/trafficrouting_test.go
+++ b/rollout/trafficrouting_test.go
@@ -7,6 +7,10 @@ import (
"testing"
"time"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
"github.com/argoproj/argo-rollouts/rollout/trafficrouting/apisix"
"github.com/stretchr/testify/assert"
@@ -1132,6 +1136,34 @@ func TestRolloutReplicaIsAvailableAndGenerationNotBeModifiedShouldModifyVirtualS
},
}
r1 := newCanaryRollout("foo", 1, nil, steps, pointer.Int32(1), intstr.FromInt(1), intstr.FromInt(1))
+ vs := istio.VirtualService{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test",
+ Namespace: r1.Namespace,
+ },
+ Spec: istio.VirtualServiceSpec{
+ HTTP: []istio.VirtualServiceHTTPRoute{{
+ Name: "primary",
+ Route: []istio.VirtualServiceRouteDestination{{
+ Destination: istio.VirtualServiceDestination{
+ Host: "stable",
+ //Port: &istio.Port{
+ // Number: 80,
+ //},
+ },
+ Weight: 100,
+ }, {
+ Destination: istio.VirtualServiceDestination{
+ Host: "canary",
+ //Port: &istio.Port{
+ // Number: 80,
+ //},
+ },
+ Weight: 0,
+ }},
+ }},
+ },
+ }
r1.Spec.Strategy.Canary.TrafficRouting = &v1alpha1.RolloutTrafficRouting{
Istio: &v1alpha1.IstioTrafficRouting{
VirtualService: &v1alpha1.IstioVirtualService{
@@ -1140,11 +1172,6 @@ func TestRolloutReplicaIsAvailableAndGenerationNotBeModifiedShouldModifyVirtualS
"primary",
},
},
- DestinationRule: &v1alpha1.IstioDestinationRule{
- Name: "test",
- StableSubsetName: "stable",
- CanarySubsetName: "canary",
- },
},
ManagedRoutes: []v1alpha1.MangedRoutes{
{
@@ -1157,8 +1184,14 @@ func TestRolloutReplicaIsAvailableAndGenerationNotBeModifiedShouldModifyVirtualS
APIVersion: "apps/v1",
Kind: "Deployment",
}
- r1.Spec.SelectorResolvedFromRef = true
+ r1.Spec.SelectorResolvedFromRef = false
r1.Spec.TemplateResolvedFromRef = true
+ r1.Spec.Strategy.Canary.CanaryService = "canary"
+ r1.Spec.Strategy.Canary.StableService = "stable"
+ r1.Spec.Selector = &metav1.LabelSelector{
+ MatchLabels: map[string]string{"app": "test"},
+ }
+ r1.Labels = map[string]string{v1alpha1.DefaultRolloutUniqueLabelKey: "testsha"}
r2 := bumpVersion(r1)
// if set WorkloadRef it does not change the generation
@@ -1185,13 +1218,26 @@ func TestRolloutReplicaIsAvailableAndGenerationNotBeModifiedShouldModifyVirtualS
PodTemplateHash: rs1PodHash,
},
}
+
+ mapObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&vs)
+ assert.Nil(t, err)
+
+ unstructuredObj := &unstructured.Unstructured{Object: mapObj}
+ unstructuredObj.SetGroupVersionKind(schema.GroupVersionKind{
+ Group: istioutil.GetIstioVirtualServiceGVR().Group,
+ Version: istioutil.GetIstioVirtualServiceGVR().Version,
+ Kind: "VirtualService",
+ })
+
f.kubeobjects = append(f.kubeobjects, rs1, rs2, canarySvc, stableSvc)
+ f.serviceLister = append(f.serviceLister, canarySvc, stableSvc)
f.replicaSetLister = append(f.replicaSetLister, rs1, rs2)
f.rolloutLister = append(f.rolloutLister, r2)
- f.objects = append(f.objects, r2)
+ f.objects = append(f.objects, r2, unstructuredObj)
+ f.expectUpdateRolloutAction(r2)
+ f.expectUpdateRolloutStatusAction(r2)
f.expectPatchRolloutAction(r2)
- f.expectPatchReplicaSetAction(rs1)
- f.expectPatchReplicaSetAction(rs2)
+ f.expectCreateReplicaSetAction(rs2)
f.fakeTrafficRouting = newUnmockedFakeTrafficRoutingReconciler()
f.fakeTrafficRouting.On("SetHeaderRoute", &v1alpha1.SetHeaderRoute{
Name: "test-header",
diff --git a/test/e2e/canary_test.go b/test/e2e/canary_test.go
index 7bf3af22c8..7c39f6e3bd 100644
--- a/test/e2e/canary_test.go
+++ b/test/e2e/canary_test.go
@@ -604,6 +604,7 @@ func (s *CanarySuite) TestCanaryWithPausedRollout() {
WaitForRolloutStatus("Paused").
UpdateSpec(). // update to revision 3
WaitForRolloutStatus("Paused").
+ Sleep(1*time.Second).
Then().
ExpectRevisionPodCount("1", 3).
ExpectRevisionPodCount("2", 0).
diff --git a/test/e2e/functional_test.go b/test/e2e/functional_test.go
index 668c75bb0c..8cc95f9914 100644
--- a/test/e2e/functional_test.go
+++ b/test/e2e/functional_test.go
@@ -91,26 +91,26 @@ spec:
ExpectRevisionPodCount("2", 1).
ExpectRolloutEvents([]string{
"RolloutAddedToInformer", // Rollout added to informer cache
- "RolloutNotCompleted", // Rollout not completed, started update to revision 0 (7fd9b5545c)
"RolloutUpdated", // Rollout updated to revision 1
"NewReplicaSetCreated", // Created ReplicaSet abort-retry-promote-698fbfb9dc (revision 1)
+ "RolloutNotCompleted", // Rollout not completed, started update to revision 2 (7fd9b5545c)
"ScalingReplicaSet", // Scaled up ReplicaSet abort-retry-promote-698fbfb9dc (revision 1) from 0 to 1
"RolloutCompleted", // Rollout completed update to revision 1 (698fbfb9dc): Initial deploy
- "RolloutNotCompleted",
- "RolloutUpdated", // Rollout updated to revision 2
- "NewReplicaSetCreated", // Created ReplicaSet abort-retry-promote-75dcb5ddd6 (revision 2)
- "ScalingReplicaSet", // Scaled up ReplicaSet abort-retry-promote-75dcb5ddd6 (revision 2) from 0 to 1
- "RolloutStepCompleted", // Rollout step 1/2 completed (setWeight: 50)
- "RolloutPaused", // Rollout is paused (CanaryPauseStep)
- "ScalingReplicaSet", // Scaled down ReplicaSet abort-retry-promote-75dcb5ddd6 (revision 2) from 1 to 0
- "RolloutAborted", // Rollout aborted update to revision 2
- "ScalingReplicaSet", // Scaled up ReplicaSet abort-retry-promote-75dcb5ddd6 (revision 2) from 0 to 1
- "RolloutStepCompleted", // Rollout step 1/2 completed (setWeight: 50)
- "RolloutPaused", // Rollout is paused (CanaryPauseStep)
- "RolloutStepCompleted", // Rollout step 2/2 completed (pause: 3s)
- "RolloutResumed", // Rollout is resumed
- "ScalingReplicaSet", // Scaled down ReplicaSet abort-retry-promote-698fbfb9dc (revision 1) from 1 to 0
- "RolloutCompleted", // Rollout completed update to revision 2 (75dcb5ddd6): Completed all 2 canary steps
+ "RolloutUpdated", // Rollout updated to revision 2
+ "NewReplicaSetCreated", // Created ReplicaSet abort-retry-promote-75dcb5ddd6 (revision 2)
+ "RolloutNotCompleted", // Rollout not completed, started update to revision 3 (5bb7978cd)
+ "ScalingReplicaSet", // Scaled up ReplicaSet abort-retry-promote-75dcb5ddd6 (revision 2) from 0 to 1
+ "RolloutStepCompleted", // Rollout step 1/2 completed (setWeight: 50)
+ "RolloutPaused", // Rollout is paused (CanaryPauseStep)
+ "ScalingReplicaSet", // Scaled down ReplicaSet abort-retry-promote-75dcb5ddd6 (revision 2) from 1 to 0
+ "RolloutAborted", // Rollout aborted update to revision 2
+ "ScalingReplicaSet", // Scaled up ReplicaSet abort-retry-promote-75dcb5ddd6 (revision 2) from 0 to 1
+ "RolloutStepCompleted", // Rollout step 1/2 completed (setWeight: 50)
+ "RolloutPaused", // Rollout is paused (CanaryPauseStep)
+ "RolloutStepCompleted", // Rollout step 2/2 completed (pause: 3s)
+ "RolloutResumed", // Rollout is resumed
+ "ScalingReplicaSet", // Scaled down ReplicaSet abort-retry-promote-698fbfb9dc (revision 1) from 1 to 0
+ "RolloutCompleted", // Rollout completed update to revision 2 (75dcb5ddd6): Completed all 2 canary steps
})
}
@@ -1615,3 +1615,62 @@ spec:
Then().
ExpectDeploymentReplicasCount("The deployment has not been scaled", "rollout-ref-deployment", 2)
}
+
+func (s *FunctionalSuite) TestSpecAndReplicaChangeSameTime() {
+ s.Given().
+ HealthyRollout(`
+apiVersion: argoproj.io/v1alpha1
+kind: Rollout
+metadata:
+ name: canary-change-same-time
+spec:
+ replicas: 2
+ strategy:
+ canary:
+ steps:
+ - setWeight: 10
+ - pause: {duration: 10s}
+ - setWeight: 20
+ - pause: {duration: 5s}
+ selector:
+ matchLabels:
+ app: canary-change-same-time
+ template:
+ metadata:
+ labels:
+ app: canary-change-same-time
+ spec:
+ containers:
+ - name: canary-change-same-time
+ image: nginx:1.19-alpine
+ resources:
+ requests:
+ memory: 16Mi
+ cpu: 1m
+`).
+ When().
+ WaitForRolloutStatus("Healthy").
+ PatchSpec(`
+spec:
+ replicas: 3
+ template:
+ spec:
+ containers:
+ - name: canary-change-same-time
+ env:
+ - name: TEST
+ value: test`).
+ WaitForRolloutStatus("Paused").
+ PatchSpec(`
+spec:
+ replicas: 4
+ template:
+ spec:
+ containers:
+ - name: canary-change-same-time
+ env:
+ - name: TEST
+ value: test-new`).
+ WaitForRolloutStatus("Healthy").Then().
+ ExpectReplicaCounts(4, 4, 4, 4, 4)
+}
diff --git a/test/e2e/istio_test.go b/test/e2e/istio_test.go
index 9797b3e503..7370dc5238 100644
--- a/test/e2e/istio_test.go
+++ b/test/e2e/istio_test.go
@@ -335,6 +335,7 @@ func (s *IstioSuite) TestIstioUpdateInMiddleZeroCanaryReplicas() {
ExpectRevisionPodCount("2", 1).
When().
UpdateSpec().
+ WaitForRevisionPodCount("3", 1).
WaitForRolloutStatus("Paused").
Then().
ExpectRevisionPodCount("3", 1)
diff --git a/utils/replicaset/replicaset.go b/utils/replicaset/replicaset.go
index b2664afd53..272449e778 100644
--- a/utils/replicaset/replicaset.go
+++ b/utils/replicaset/replicaset.go
@@ -72,6 +72,7 @@ func FindNewReplicaSet(rollout *v1alpha1.Rollout, rsList []*appsv1.ReplicaSet) *
}
}
// new ReplicaSet does not exist.
+
return nil
}