diff --git a/analysis/analysis.go b/analysis/analysis.go index 496dcd7846..1f17c85fd1 100644 --- a/analysis/analysis.go +++ b/analysis/analysis.go @@ -62,7 +62,7 @@ func generateMetricTasks(run *v1alpha1.AnalysisRun) []metricTask { continue } lastMeasurement := analysisutil.LastMeasurement(run, metric.Name) - if lastMeasurement != nil && lastMeasurement.FinishedAt.IsZero() { + if lastMeasurement != nil && lastMeasurement.FinishedAt == nil { // last measurement is still in-progress. need to complete it log.WithField("metric", metric.Name).Infof("resuming in-progress measurement") tasks = append(tasks, metricTask{ @@ -217,25 +217,28 @@ func assessMetricStatus(metric *v1alpha1.Metric, result *v1alpha1.MetricResult, } // calculateNextReconcileTime calculates the next time that this AnalysisRun should be reconciled, -// based on the earliest time of all metrics intervals and their finishedAt timestamps +// based on the earliest time of all metrics intervals, counts, and their finishedAt timestamps func calculateNextReconcileTime(run *v1alpha1.AnalysisRun) *time.Time { log := logutil.WithAnalysisRun(run) var reconcileTime *time.Time for _, metric := range run.Spec.AnalysisSpec.Metrics { if analysisutil.MetricCompleted(run, metric.Name) { + // NOTE: this also coveres the case where metric.Count is reached continue } lastMeasurement := analysisutil.LastMeasurement(run, metric.Name) if lastMeasurement == nil { - // no measurement was started. we should not get here + // no measurement was started. we should never get here log.WithField("metric", metric.Name).Warnf("metric never started. not factored into enqueue time") continue } - if lastMeasurement.FinishedAt.IsZero() { + if lastMeasurement.FinishedAt == nil { + // unfinished in-flight measurement. + // TODO(jessesuen) perhaps ask provider for an appropriate time to poll? continue } if metric.Interval == nil { - // a measurement was already taken, and reoccurrence was not desired. no need to re-enqueue + // a measurement was already taken, and reoccurrence was not desired continue } // Take the earliest time of all metrics @@ -244,5 +247,8 @@ func calculateNextReconcileTime(run *v1alpha1.AnalysisRun) *time.Time { reconcileTime = &metricReconcileTime } } - return reconcileTime + if reconcileTime != nil { + return reconcileTime + } + return nil } diff --git a/analysis/analysis_test.go b/analysis/analysis_test.go index 7f6c11f4bc..af36d3fdff 100644 --- a/analysis/analysis_test.go +++ b/analysis/analysis_test.go @@ -11,6 +11,10 @@ import ( "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" ) +func timePtr(t metav1.Time) *metav1.Time { + return &t +} + func TestGenerateMetricTasksInterval(t *testing.T) { run := &v1alpha1.AnalysisRun{ Spec: v1alpha1.AnalysisRunSpec{ @@ -32,8 +36,8 @@ func TestGenerateMetricTasksInterval(t *testing.T) { { Value: "99", Status: v1alpha1.AnalysisStatusSuccessful, - StartedAt: metav1.NewTime(time.Now().Add(-50 * time.Second)), - FinishedAt: metav1.NewTime(time.Now().Add(-50 * time.Second)), + StartedAt: timePtr(metav1.NewTime(time.Now().Add(-50 * time.Second))), + FinishedAt: timePtr(metav1.NewTime(time.Now().Add(-50 * time.Second))), }, }, }, @@ -48,8 +52,8 @@ func TestGenerateMetricTasksInterval(t *testing.T) { { // ensure we do take measurements when outside interval successRate := run.Status.MetricResults["success-rate"] - successRate.Measurements[0].StartedAt = metav1.NewTime(time.Now().Add(-61 * time.Second)) - successRate.Measurements[0].FinishedAt = metav1.NewTime(time.Now().Add(-61 * time.Second)) + successRate.Measurements[0].StartedAt = timePtr(metav1.NewTime(time.Now().Add(-61 * time.Second))) + successRate.Measurements[0].FinishedAt = timePtr(metav1.NewTime(time.Now().Add(-61 * time.Second))) run.Status.MetricResults["success-rate"] = successRate tasks := generateMetricTasks(run) assert.Equal(t, 1, len(tasks)) @@ -108,8 +112,8 @@ func TestGenerateMetricTasksNoInterval(t *testing.T) { { Value: "99", Status: v1alpha1.AnalysisStatusSuccessful, - StartedAt: metav1.NewTime(time.Now().Add(-50 * time.Second)), - FinishedAt: metav1.NewTime(time.Now().Add(-50 * time.Second)), + StartedAt: timePtr(metav1.NewTime(time.Now().Add(-50 * time.Second))), + FinishedAt: timePtr(metav1.NewTime(time.Now().Add(-50 * time.Second))), }, }, }, @@ -151,7 +155,7 @@ func TestGenerateMetricTasksIncomplete(t *testing.T) { { Value: "99", Status: v1alpha1.AnalysisStatusSuccessful, - StartedAt: metav1.NewTime(time.Now().Add(-50 * time.Second)), + StartedAt: timePtr(metav1.NewTime(time.Now().Add(-50 * time.Second))), }, }, }, @@ -234,13 +238,13 @@ func TestAssessMetricStatusInFlightMeasurement(t *testing.T) { { Value: "99", Status: v1alpha1.AnalysisStatusSuccessful, - StartedAt: metav1.NewTime(time.Now().Add(-60 * time.Second)), - FinishedAt: metav1.NewTime(time.Now().Add(-60 * time.Second)), + StartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), + FinishedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), }, { Value: "99", Status: v1alpha1.AnalysisStatusRunning, - StartedAt: metav1.NewTime(time.Now()), + StartedAt: timePtr(metav1.NewTime(time.Now())), }, }, } @@ -258,8 +262,8 @@ func TestAssessMetricStatusMaxFailures(t *testing.T) { // max failures { Value: "99", Status: v1alpha1.AnalysisStatusFailed, - StartedAt: metav1.NewTime(time.Now().Add(-60 * time.Second)), - FinishedAt: metav1.NewTime(time.Now().Add(-60 * time.Second)), + StartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), + FinishedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), }, }, } @@ -274,28 +278,28 @@ func TestAssessMetricStatusConsecutiveErrors(t *testing.T) { Measurements: []v1alpha1.Measurement{ { Status: v1alpha1.AnalysisStatusError, - StartedAt: metav1.NewTime(time.Now().Add(-60 * time.Second)), - FinishedAt: metav1.NewTime(time.Now().Add(-60 * time.Second)), + StartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), + FinishedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), }, { Status: v1alpha1.AnalysisStatusError, - StartedAt: metav1.NewTime(time.Now().Add(-50 * time.Second)), - FinishedAt: metav1.NewTime(time.Now().Add(-50 * time.Second)), + StartedAt: timePtr(metav1.NewTime(time.Now().Add(-50 * time.Second))), + FinishedAt: timePtr(metav1.NewTime(time.Now().Add(-50 * time.Second))), }, { Status: v1alpha1.AnalysisStatusError, - StartedAt: metav1.NewTime(time.Now().Add(-40 * time.Second)), - FinishedAt: metav1.NewTime(time.Now().Add(-40 * time.Second)), + StartedAt: timePtr(metav1.NewTime(time.Now().Add(-40 * time.Second))), + FinishedAt: timePtr(metav1.NewTime(time.Now().Add(-40 * time.Second))), }, { Status: v1alpha1.AnalysisStatusError, - StartedAt: metav1.NewTime(time.Now().Add(-30 * time.Second)), - FinishedAt: metav1.NewTime(time.Now().Add(-30 * time.Second)), + StartedAt: timePtr(metav1.NewTime(time.Now().Add(-30 * time.Second))), + FinishedAt: timePtr(metav1.NewTime(time.Now().Add(-30 * time.Second))), }, { Status: v1alpha1.AnalysisStatusError, - StartedAt: metav1.NewTime(time.Now().Add(-20 * time.Second)), - FinishedAt: metav1.NewTime(time.Now().Add(-20 * time.Second)), + StartedAt: timePtr(metav1.NewTime(time.Now().Add(-20 * time.Second))), + FinishedAt: timePtr(metav1.NewTime(time.Now().Add(-20 * time.Second))), }, }, } @@ -303,8 +307,8 @@ func TestAssessMetricStatusConsecutiveErrors(t *testing.T) { assert.Equal(t, v1alpha1.AnalysisStatusError, assessMetricStatus(metric, result, true)) result.Measurements[2] = v1alpha1.Measurement{ Status: v1alpha1.AnalysisStatusSuccessful, - StartedAt: metav1.NewTime(time.Now().Add(-40 * time.Second)), - FinishedAt: metav1.NewTime(time.Now().Add(-40 * time.Second)), + StartedAt: timePtr(metav1.NewTime(time.Now().Add(-40 * time.Second))), + FinishedAt: timePtr(metav1.NewTime(time.Now().Add(-40 * time.Second))), } assert.Equal(t, v1alpha1.AnalysisStatusSuccessful, assessMetricStatus(metric, result, true)) assert.Equal(t, v1alpha1.AnalysisStatusRunning, assessMetricStatus(metric, result, false)) @@ -322,8 +326,8 @@ func TestAssessMetricStatusCountReached(t *testing.T) { { Value: "99", Status: v1alpha1.AnalysisStatusSuccessful, - StartedAt: metav1.NewTime(time.Now().Add(-60 * time.Second)), - FinishedAt: metav1.NewTime(time.Now().Add(-60 * time.Second)), + StartedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), + FinishedAt: timePtr(metav1.NewTime(time.Now().Add(-60 * time.Second))), }, }, } @@ -332,3 +336,140 @@ func TestAssessMetricStatusCountReached(t *testing.T) { result.Inconclusive = 5 assert.Equal(t, v1alpha1.AnalysisStatusInconclusive, assessMetricStatus(metric, result, false)) } + +func TestCalculateNextReconcileTimeInterval(t *testing.T) { + now := metav1.Now() + nowMinus30 := metav1.NewTime(now.Add(time.Second * -30)) + run := &v1alpha1.AnalysisRun{ + Spec: v1alpha1.AnalysisRunSpec{ + AnalysisSpec: v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{ + { + Name: "success-rate", + Interval: pointer.Int32Ptr(60), + }, + }, + }, + }, + Status: &v1alpha1.AnalysisRunStatus{ + Status: v1alpha1.AnalysisStatusRunning, + MetricResults: map[string]v1alpha1.MetricResult{ + "success-rate": { + Status: v1alpha1.AnalysisStatusRunning, + Measurements: []v1alpha1.Measurement{ + { + Value: "99", + Status: v1alpha1.AnalysisStatusSuccessful, + StartedAt: &nowMinus30, + FinishedAt: &nowMinus30, + }, + }, + }, + }, + }, + } + // ensure we requeue at correct interval + assert.Equal(t, now.Add(time.Second*30), *calculateNextReconcileTime(run)) + // when in-flight is not set, we do not requeue + run.Status.MetricResults["success-rate"].Measurements[0].FinishedAt = nil + run.Status.MetricResults["success-rate"].Measurements[0].Status = v1alpha1.AnalysisStatusRunning + assert.Nil(t, calculateNextReconcileTime(run)) + // do not queue completed metrics + nowMinus120 := metav1.NewTime(now.Add(time.Second * -120)) + run.Status.MetricResults["success-rate"] = v1alpha1.MetricResult{ + Status: v1alpha1.AnalysisStatusSuccessful, + Measurements: []v1alpha1.Measurement{ + { + Value: "99", + Status: v1alpha1.AnalysisStatusSuccessful, + StartedAt: &nowMinus120, + FinishedAt: &nowMinus120, + }, + }, + } + assert.Nil(t, calculateNextReconcileTime(run)) +} + +func TestCalculateNextReconcileTimeNoInterval(t *testing.T) { + now := metav1.Now() + run := &v1alpha1.AnalysisRun{ + Spec: v1alpha1.AnalysisRunSpec{ + AnalysisSpec: v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{ + { + Name: "success-rate", + Count: 1, + }, + }, + }, + }, + Status: &v1alpha1.AnalysisRunStatus{ + Status: v1alpha1.AnalysisStatusRunning, + MetricResults: map[string]v1alpha1.MetricResult{ + "success-rate": { + Status: v1alpha1.AnalysisStatusSuccessful, + Measurements: []v1alpha1.Measurement{ + { + Value: "99", + Status: v1alpha1.AnalysisStatusSuccessful, + StartedAt: &now, + FinishedAt: &now, + }, + }, + }, + }, + }, + } + assert.Nil(t, calculateNextReconcileTime(run)) +} + +func TestCalculateNextReconcileEarliestMetric(t *testing.T) { + now := metav1.Now() + nowMinus30 := metav1.NewTime(now.Add(time.Second * -30)) + nowMinus50 := metav1.NewTime(now.Add(time.Second * -50)) + run := &v1alpha1.AnalysisRun{ + Spec: v1alpha1.AnalysisRunSpec{ + AnalysisSpec: v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{ + { + Name: "success-rate", + Interval: pointer.Int32Ptr(60), + }, + { + Name: "latency", + Interval: pointer.Int32Ptr(60), + }, + }, + }, + }, + Status: &v1alpha1.AnalysisRunStatus{ + Status: v1alpha1.AnalysisStatusRunning, + MetricResults: map[string]v1alpha1.MetricResult{ + "success-rate": { + Status: v1alpha1.AnalysisStatusRunning, + Measurements: []v1alpha1.Measurement{ + { + Value: "99", + Status: v1alpha1.AnalysisStatusSuccessful, + StartedAt: &nowMinus30, + FinishedAt: &nowMinus30, + }, + }, + }, + "latency": { + Status: v1alpha1.AnalysisStatusRunning, + Measurements: []v1alpha1.Measurement{ + { + Value: "1", + Status: v1alpha1.AnalysisStatusSuccessful, + StartedAt: &nowMinus50, + FinishedAt: &nowMinus50, + }, + }, + }, + }, + }, + } + // ensure we requeue at correct interval + assert.Equal(t, now.Add(time.Second*10), *calculateNextReconcileTime(run)) +} diff --git a/manifests/crds/analysis-run-crd.yaml b/manifests/crds/analysis-run-crd.yaml index 016a0b70cc..5b59f9e652 100644 --- a/manifests/crds/analysis-run-crd.yaml +++ b/manifests/crds/analysis-run-crd.yaml @@ -115,6 +115,8 @@ spec: - status type: object type: array + message: + type: string name: type: string status: diff --git a/manifests/install.yaml b/manifests/install.yaml index 2d7ce74847..54b800cd21 100644 --- a/manifests/install.yaml +++ b/manifests/install.yaml @@ -116,6 +116,8 @@ spec: - status type: object type: array + message: + type: string name: type: string status: diff --git a/manifests/namespace-install.yaml b/manifests/namespace-install.yaml index 996b74ee61..266f983da5 100644 --- a/manifests/namespace-install.yaml +++ b/manifests/namespace-install.yaml @@ -116,6 +116,8 @@ spec: - status type: object type: array + message: + type: string name: type: string status: diff --git a/pkg/apis/rollouts/v1alpha1/analysis_types.go b/pkg/apis/rollouts/v1alpha1/analysis_types.go index 6521fc95f2..b118124923 100644 --- a/pkg/apis/rollouts/v1alpha1/analysis_types.go +++ b/pkg/apis/rollouts/v1alpha1/analysis_types.go @@ -161,9 +161,9 @@ type Measurement struct { // Status is the status of this single measurement Status AnalysisStatus `json:"status"` // StartedAt is the timestamp in which this measurement started to be measured - StartedAt metav1.Time `json:"startedAt,omitempty"` + StartedAt *metav1.Time `json:"startedAt,omitempty"` // FinishedAt is the timestamp in which this measurement completed and value was collected - FinishedAt metav1.Time `json:"finishedAt,omitempty"` + FinishedAt *metav1.Time `json:"finishedAt,omitempty"` // Value is the measured value of the metric Value string `json:"value,omitempty"` // Metadata stores additional metadata about this metric result, used by the different providers diff --git a/pkg/apis/rollouts/v1alpha1/openapi_generated.go b/pkg/apis/rollouts/v1alpha1/openapi_generated.go index b80c73f19a..f6ddd5e914 100644 --- a/pkg/apis/rollouts/v1alpha1/openapi_generated.go +++ b/pkg/apis/rollouts/v1alpha1/openapi_generated.go @@ -985,9 +985,16 @@ func schema_pkg_apis_rollouts_v1alpha1_MetricResult(ref common.ReferenceCallback }, }, }, + "message": { + SchemaProps: spec.SchemaProps{ + Description: "Message contains a message describing current condition (e.g. error messages)", + Type: []string{"string"}, + Format: "", + }, + }, "count": { SchemaProps: spec.SchemaProps{ - Description: "Count is the total number of measurements that have been taken", + Description: "Count is the number of times the metric was measured without Error This is equal to the sum of Successful, Failed, Inconclusive", Type: []string{"integer"}, Format: "int32", }, @@ -1006,16 +1013,16 @@ func schema_pkg_apis_rollouts_v1alpha1_MetricResult(ref common.ReferenceCallback Format: "int32", }, }, - "error": { + "inconclusive": { SchemaProps: spec.SchemaProps{ - Description: "Error is the number of times an error was encountered during measurement", + Description: "Inconclusive is the number of times the metric was measured Inconclusive", Type: []string{"integer"}, Format: "int32", }, }, - "inconclusive": { + "error": { SchemaProps: spec.SchemaProps{ - Description: "Inconclusive is the number of times the metric was measured Inconclusive", + Description: "Error is the number of times an error was encountered during measurement", Type: []string{"integer"}, Format: "int32", }, diff --git a/pkg/apis/rollouts/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/rollouts/v1alpha1/zz_generated.deepcopy.go index 346df02c82..06e5bfa8ce 100644 --- a/pkg/apis/rollouts/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/rollouts/v1alpha1/zz_generated.deepcopy.go @@ -530,8 +530,14 @@ func (in *ExperimentStatus) DeepCopy() *ExperimentStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Measurement) DeepCopyInto(out *Measurement) { *out = *in - in.StartedAt.DeepCopyInto(&out.StartedAt) - in.FinishedAt.DeepCopyInto(&out.FinishedAt) + if in.StartedAt != nil { + in, out := &in.StartedAt, &out.StartedAt + *out = (*in).DeepCopy() + } + if in.FinishedAt != nil { + in, out := &in.FinishedAt, &out.FinishedAt + *out = (*in).DeepCopy() + } if in.Metadata != nil { in, out := &in.Metadata, &out.Metadata *out = make(map[string]string, len(*in)) diff --git a/utils/analysis/analysis.go b/utils/analysis/analysis.go index b3cfdd0f95..6464b32f9d 100644 --- a/utils/analysis/analysis.go +++ b/utils/analysis/analysis.go @@ -11,7 +11,12 @@ func ValidateAnalysisTemplateSpec(spec v1alpha1.AnalysisTemplateSpec) error { if len(spec.Metrics) == 0 { return fmt.Errorf("no metrics specified") } + duplicateNames := make(map[string]bool) for i, metric := range spec.Metrics { + if _, ok := duplicateNames[metric.Name]; ok { + return fmt.Errorf("metrics[%d]: duplicate name '%s", i, metric.Name) + } + duplicateNames[metric.Name] = true if err := ValidateMetric(metric); err != nil { return fmt.Errorf("metrics[%d]: %v", i, err) } @@ -24,6 +29,9 @@ func ValidateMetric(metric v1alpha1.Metric) error { if metric.Count < metric.MaxFailures { return fmt.Errorf("count must be >= maxFailures") } + if metric.Count > 1 && metric.Interval == nil { + return fmt.Errorf("interval must be specified when count > 1") + } return nil } diff --git a/utils/analysis/analysis_test.go b/utils/analysis/analysis_test.go index b75da2d5fb..9bb5191ae5 100644 --- a/utils/analysis/analysis_test.go +++ b/utils/analysis/analysis_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "k8s.io/utils/pointer" "github.com/argoproj/argo-rollouts/pkg/apis/rollouts/v1alpha1" ) @@ -13,19 +14,22 @@ func TestValidateMetrics(t *testing.T) { spec := v1alpha1.AnalysisTemplateSpec{ Metrics: []v1alpha1.Metric{ { + Name: "success-rate", Count: 1, MaxFailures: 2, }, }, } err := ValidateAnalysisTemplateSpec(spec) - assert.Error(t, err) + assert.EqualError(t, err, "metrics[0]: count must be >= maxFailures") } { spec := v1alpha1.AnalysisTemplateSpec{ Metrics: []v1alpha1.Metric{ { + Name: "success-rate", Count: 2, + Interval: pointer.Int32Ptr(60), MaxFailures: 2, }, }, @@ -38,7 +42,33 @@ func TestValidateMetrics(t *testing.T) { Metrics: []v1alpha1.Metric{}, } err := ValidateAnalysisTemplateSpec(spec) - assert.Error(t, err) + assert.EqualError(t, err, "no metrics specified") + } + { + spec := v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{ + { + Name: "success-rate", + Count: 2, + }, + }, + } + err := ValidateAnalysisTemplateSpec(spec) + assert.EqualError(t, err, "metrics[0]: interval must be specified when count > 1") + } + { + spec := v1alpha1.AnalysisTemplateSpec{ + Metrics: []v1alpha1.Metric{ + { + Name: "success-rate", + }, + { + Name: "success-rate", + }, + }, + } + err := ValidateAnalysisTemplateSpec(spec) + assert.EqualError(t, err, "metrics[1]: duplicate name 'success-rate") } }