diff --git a/cmd/olm/main.go b/cmd/olm/main.go index 313de3530b7..9324020e951 100644 --- a/cmd/olm/main.go +++ b/cmd/olm/main.go @@ -12,7 +12,6 @@ import ( log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" configv1 "github.com/openshift/api/config/v1" @@ -26,12 +25,14 @@ import ( "github.com/operator-framework/operator-lifecycle-manager/pkg/lib/signals" "github.com/operator-framework/operator-lifecycle-manager/pkg/metrics" olmversion "github.com/operator-framework/operator-lifecycle-manager/pkg/version" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" "k8s.io/client-go/tools/clientcmd" ) const ( defaultWakeupInterval = 5 * time.Minute - defaultOperatorName = "operator-lifecycle-manager" + defaultOperatorName = "" ) // config flags defined globally so that they appear on the test binary as well @@ -131,84 +132,91 @@ func main() { <-ready if *writeStatusName != "" { - existing, err := configClient.ClusterOperators().Get(*writeStatusName, metav1.GetOptions{}) - if meta.IsNoMatchError(err) { - log.Infof("ClusterOperator api not present, skipping update") - } else if k8serrors.IsNotFound(err) { - log.Info("Existing cluster operator not found, creating") - created, err := configClient.ClusterOperators().Create(&configv1.ClusterOperator{ - ObjectMeta: metav1.ObjectMeta{ - Name: *writeStatusName, - }, - }) - if err != nil { - log.Fatalf("ClusterOperator create failed: %v\n", err) - } - - created.Status = configv1.ClusterOperatorStatus{ - Conditions: []configv1.ClusterOperatorStatusCondition{ - configv1.ClusterOperatorStatusCondition{ - Type: configv1.OperatorProgressing, - Status: configv1.ConditionFalse, - Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion), - LastTransitionTime: metav1.Now(), - }, - configv1.ClusterOperatorStatusCondition{ - Type: configv1.OperatorFailing, - Status: configv1.ConditionFalse, - Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion), - LastTransitionTime: metav1.Now(), + opStatusGV := schema.GroupVersion{ + Group: "config.openshift.io", + Version: "v1", + } + err := discovery.ServerSupportsVersion(opClient.KubernetesInterface().Discovery(), opStatusGV) + if err != nil { + log.Infof("ClusterOperator api not present, skipping update (%v)", err) + } else { + existing, err := configClient.ClusterOperators().Get(*writeStatusName, metav1.GetOptions{}) + if k8serrors.IsNotFound(err) { + log.Info("Existing operator status not found, creating") + created, err := configClient.ClusterOperators().Create(&configv1.ClusterOperator{ + ObjectMeta: metav1.ObjectMeta{ + Name: *writeStatusName, }, - configv1.ClusterOperatorStatusCondition{ - Type: configv1.OperatorAvailable, - Status: configv1.ConditionTrue, - Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion), - LastTransitionTime: metav1.Now(), + }) + if err != nil { + log.Fatalf("ClusterOperator create failed: %v\n", err) + } + + created.Status = configv1.ClusterOperatorStatus{ + Conditions: []configv1.ClusterOperatorStatusCondition{ + configv1.ClusterOperatorStatusCondition{ + Type: configv1.OperatorProgressing, + Status: configv1.ConditionFalse, + Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion), + LastTransitionTime: metav1.Now(), + }, + configv1.ClusterOperatorStatusCondition{ + Type: configv1.OperatorFailing, + Status: configv1.ConditionFalse, + Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion), + LastTransitionTime: metav1.Now(), + }, + configv1.ClusterOperatorStatusCondition{ + Type: configv1.OperatorAvailable, + Status: configv1.ConditionTrue, + Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion), + LastTransitionTime: metav1.Now(), + }, }, - }, - Versions: []configv1.OperandVersion{{ - Name: "operator", - Version: olmversion.Full(), - }}, - } - _, err = configClient.ClusterOperators().UpdateStatus(created) - if err != nil { - log.Fatalf("ClusterOperator update status failed: %v", err) - } - } else if err != nil { - log.Fatalf("ClusterOperators get failed: %v", err) - } else { - clusteroperatorv1helpers.SetStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: configv1.OperatorProgressing, - Status: configv1.ConditionFalse, - Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion), - LastTransitionTime: metav1.Now(), - }) - clusteroperatorv1helpers.SetStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: configv1.OperatorFailing, - Status: configv1.ConditionFalse, - Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion), - LastTransitionTime: metav1.Now(), - }) - clusteroperatorv1helpers.SetStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ - Type: configv1.OperatorAvailable, - Status: configv1.ConditionTrue, - Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion), - LastTransitionTime: metav1.Now(), - }) - - olmOperandVersion := configv1.OperandVersion{Name: "operator", Version: olmversion.Full()} - // look for operator version, even though in OLM's case should only be one - for _, item := range existing.Status.Versions { - if item.Name == "operator" && item != olmOperandVersion { - // if a cluster wide upgrade has occurred, hopefully any existing operator statuses have been deleted - log.Infof("Updating version from %v to %v\n", item.Version, olmversion.Full()) + Versions: []configv1.OperandVersion{{ + Name: "operator", + Version: olmversion.Full(), + }}, + } + _, err = configClient.ClusterOperators().UpdateStatus(created) + if err != nil { + log.Fatalf("ClusterOperator update status failed: %v", err) + } + } else if err != nil { + log.Fatalf("ClusterOperators get failed: %v", err) + } else { + clusteroperatorv1helpers.SetStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + Type: configv1.OperatorProgressing, + Status: configv1.ConditionFalse, + Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion), + LastTransitionTime: metav1.Now(), + }) + clusteroperatorv1helpers.SetStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + Type: configv1.OperatorFailing, + Status: configv1.ConditionFalse, + Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion), + LastTransitionTime: metav1.Now(), + }) + clusteroperatorv1helpers.SetStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{ + Type: configv1.OperatorAvailable, + Status: configv1.ConditionTrue, + Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion), + LastTransitionTime: metav1.Now(), + }) + + olmOperandVersion := configv1.OperandVersion{Name: "operator", Version: olmversion.Full()} + // look for operator version, even though in OLM's case should only be one + for _, item := range existing.Status.Versions { + if item.Name == "operator" && item != olmOperandVersion { + // if a cluster wide upgrade has occurred, hopefully any existing operator statuses have been deleted + log.Infof("Updating version from %v to %v\n", item.Version, olmversion.Full()) + } + } + operatorv1helpers.SetOperandVersion(&existing.Status.Versions, olmOperandVersion) + _, err = configClient.ClusterOperators().UpdateStatus(existing) + if err != nil { + log.Fatalf("ClusterOperator update status failed: %v", err) } - } - operatorv1helpers.SetOperandVersion(&existing.Status.Versions, olmOperandVersion) - _, err = configClient.ClusterOperators().UpdateStatus(existing) - if err != nil { - log.Fatalf("ClusterOperator update status failed: %v", err) } } }