Skip to content

Commit

Permalink
fix(olm): properly detect cluster operator API
Browse files Browse the repository at this point in the history
This was causing vanilla upstream to crash due to attempting to write to
a not present API. Also, disable writing operator status by default
since it's OpenShift specific.
  • Loading branch information
Jeff Peeler authored and dmesser committed Feb 22, 2019
1 parent e40d4f4 commit 8cbdcf5
Showing 1 changed file with 85 additions and 77 deletions.
162 changes: 85 additions & 77 deletions cmd/olm/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ import (
log "github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

configv1 "github.com/openshift/api/config/v1"
Expand All @@ -26,12 +25,14 @@ import (
"github.com/operator-framework/operator-lifecycle-manager/pkg/lib/signals"
"github.com/operator-framework/operator-lifecycle-manager/pkg/metrics"
olmversion "github.com/operator-framework/operator-lifecycle-manager/pkg/version"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/discovery"
"k8s.io/client-go/tools/clientcmd"
)

const (
defaultWakeupInterval = 5 * time.Minute
defaultOperatorName = "operator-lifecycle-manager"
defaultOperatorName = ""
)

// config flags defined globally so that they appear on the test binary as well
Expand Down Expand Up @@ -131,84 +132,91 @@ func main() {
<-ready

if *writeStatusName != "" {
existing, err := configClient.ClusterOperators().Get(*writeStatusName, metav1.GetOptions{})
if meta.IsNoMatchError(err) {
log.Infof("ClusterOperator api not present, skipping update")
} else if k8serrors.IsNotFound(err) {
log.Info("Existing cluster operator not found, creating")
created, err := configClient.ClusterOperators().Create(&configv1.ClusterOperator{
ObjectMeta: metav1.ObjectMeta{
Name: *writeStatusName,
},
})
if err != nil {
log.Fatalf("ClusterOperator create failed: %v\n", err)
}

created.Status = configv1.ClusterOperatorStatus{
Conditions: []configv1.ClusterOperatorStatusCondition{
configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorProgressing,
Status: configv1.ConditionFalse,
Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion),
LastTransitionTime: metav1.Now(),
},
configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorFailing,
Status: configv1.ConditionFalse,
Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion),
LastTransitionTime: metav1.Now(),
opStatusGV := schema.GroupVersion{
Group: "config.openshift.io",
Version: "v1",
}
err := discovery.ServerSupportsVersion(opClient.KubernetesInterface().Discovery(), opStatusGV)
if err != nil {
log.Infof("ClusterOperator api not present, skipping update (%v)", err)
} else {
existing, err := configClient.ClusterOperators().Get(*writeStatusName, metav1.GetOptions{})
if k8serrors.IsNotFound(err) {
log.Info("Existing operator status not found, creating")
created, err := configClient.ClusterOperators().Create(&configv1.ClusterOperator{
ObjectMeta: metav1.ObjectMeta{
Name: *writeStatusName,
},
configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorAvailable,
Status: configv1.ConditionTrue,
Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion),
LastTransitionTime: metav1.Now(),
})
if err != nil {
log.Fatalf("ClusterOperator create failed: %v\n", err)
}

created.Status = configv1.ClusterOperatorStatus{
Conditions: []configv1.ClusterOperatorStatusCondition{
configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorProgressing,
Status: configv1.ConditionFalse,
Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion),
LastTransitionTime: metav1.Now(),
},
configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorFailing,
Status: configv1.ConditionFalse,
Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion),
LastTransitionTime: metav1.Now(),
},
configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorAvailable,
Status: configv1.ConditionTrue,
Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion),
LastTransitionTime: metav1.Now(),
},
},
},
Versions: []configv1.OperandVersion{{
Name: "operator",
Version: olmversion.Full(),
}},
}
_, err = configClient.ClusterOperators().UpdateStatus(created)
if err != nil {
log.Fatalf("ClusterOperator update status failed: %v", err)
}
} else if err != nil {
log.Fatalf("ClusterOperators get failed: %v", err)
} else {
clusteroperatorv1helpers.SetStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorProgressing,
Status: configv1.ConditionFalse,
Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion),
LastTransitionTime: metav1.Now(),
})
clusteroperatorv1helpers.SetStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorFailing,
Status: configv1.ConditionFalse,
Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion),
LastTransitionTime: metav1.Now(),
})
clusteroperatorv1helpers.SetStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorAvailable,
Status: configv1.ConditionTrue,
Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion),
LastTransitionTime: metav1.Now(),
})

olmOperandVersion := configv1.OperandVersion{Name: "operator", Version: olmversion.Full()}
// look for operator version, even though in OLM's case should only be one
for _, item := range existing.Status.Versions {
if item.Name == "operator" && item != olmOperandVersion {
// if a cluster wide upgrade has occurred, hopefully any existing operator statuses have been deleted
log.Infof("Updating version from %v to %v\n", item.Version, olmversion.Full())
Versions: []configv1.OperandVersion{{
Name: "operator",
Version: olmversion.Full(),
}},
}
_, err = configClient.ClusterOperators().UpdateStatus(created)
if err != nil {
log.Fatalf("ClusterOperator update status failed: %v", err)
}
} else if err != nil {
log.Fatalf("ClusterOperators get failed: %v", err)
} else {
clusteroperatorv1helpers.SetStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorProgressing,
Status: configv1.ConditionFalse,
Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion),
LastTransitionTime: metav1.Now(),
})
clusteroperatorv1helpers.SetStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorFailing,
Status: configv1.ConditionFalse,
Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion),
LastTransitionTime: metav1.Now(),
})
clusteroperatorv1helpers.SetStatusCondition(&existing.Status.Conditions, configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorAvailable,
Status: configv1.ConditionTrue,
Message: fmt.Sprintf("Done deploying %s.", olmversion.OLMVersion),
LastTransitionTime: metav1.Now(),
})

olmOperandVersion := configv1.OperandVersion{Name: "operator", Version: olmversion.Full()}
// look for operator version, even though in OLM's case should only be one
for _, item := range existing.Status.Versions {
if item.Name == "operator" && item != olmOperandVersion {
// if a cluster wide upgrade has occurred, hopefully any existing operator statuses have been deleted
log.Infof("Updating version from %v to %v\n", item.Version, olmversion.Full())
}
}
operatorv1helpers.SetOperandVersion(&existing.Status.Versions, olmOperandVersion)
_, err = configClient.ClusterOperators().UpdateStatus(existing)
if err != nil {
log.Fatalf("ClusterOperator update status failed: %v", err)
}
}
operatorv1helpers.SetOperandVersion(&existing.Status.Versions, olmOperandVersion)
_, err = configClient.ClusterOperators().UpdateStatus(existing)
if err != nil {
log.Fatalf("ClusterOperator update status failed: %v", err)
}
}
}
Expand Down

0 comments on commit 8cbdcf5

Please sign in to comment.