Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix scale client error handling #19275

Merged
merged 6 commits into from
Apr 11, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions contrib/completions/bash/oc

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 0 additions & 2 deletions contrib/completions/zsh/oc

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion pkg/apps/strategy/recreate/recreate.go
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ func (s *RecreateDeploymentStrategy) scaleAndWait(deployment *kapi.ReplicationCo
}
var scaleErr error
err := wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) {
scaleErr = s.scaler.Scale(deployment.Namespace, deployment.Name, uint(replicas), &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}, retry, retryParams)
scaleErr = s.scaler.Scale(deployment.Namespace, deployment.Name, uint(replicas), &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}, retry, retryParams, kapi.Resource("replicationcontrollers"))
if scaleErr == nil {
return true, nil
}
Expand Down
3 changes: 2 additions & 1 deletion pkg/apps/strategy/rolling/rolling.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,7 @@ func NewRollingDeploymentStrategy(namespace string, client kclientset.Interface,
if errOut == nil {
errOut = ioutil.Discard
}

return &RollingDeploymentStrategy{
out: out,
errOut: errOut,
Expand All @@ -105,7 +106,7 @@ func NewRollingDeploymentStrategy(namespace string, client kclientset.Interface,
apiRetryPeriod: defaultApiRetryPeriod,
apiRetryTimeout: defaultApiRetryTimeout,
rollingUpdate: func(config *kubectl.RollingUpdaterConfig) error {
updater := kubectl.NewRollingUpdater(namespace, client.Core(), client.Core())
updater := kubectl.NewRollingUpdater(namespace, client.Core(), client.Core(), appsutil.NewReplicationControllerV1ScaleClient(client))
return updater.Update(config)
},
hookExecutor: stratsupport.NewHookExecutor(client.Core(), tags, client.Core(), os.Stdout, decoder),
Expand Down
10 changes: 5 additions & 5 deletions pkg/apps/util/test/support.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@ type ScaleEvent struct {
Size uint
}

func (t *FakeScaler) Scale(namespace, name string, newSize uint, preconditions *kubectl.ScalePrecondition, retry, wait *kubectl.RetryParams) error {
func (t *FakeScaler) Scale(namespace, name string, newSize uint, preconditions *kubectl.ScalePrecondition, retry, wait *kubectl.RetryParams, resource schema.GroupResource) error {
t.Events = append(t.Events, ScaleEvent{name, newSize})
return nil
}

func (t *FakeScaler) ScaleSimple(namespace, name string, preconditions *kubectl.ScalePrecondition, newSize uint) (string, error) {
func (t *FakeScaler) ScaleSimple(namespace, name string, preconditions *kubectl.ScalePrecondition, newSize uint, resource schema.GroupResource) (string, error) {
return "", fmt.Errorf("unexpected call to ScaleSimple")
}

Expand All @@ -31,17 +31,17 @@ type FakeLaggedScaler struct {
RetryCount int
}

func (t *FakeLaggedScaler) Scale(namespace, name string, newSize uint, preconditions *kubectl.ScalePrecondition, retry, wait *kubectl.RetryParams) error {
func (t *FakeLaggedScaler) Scale(namespace, name string, newSize uint, preconditions *kubectl.ScalePrecondition, retry, wait *kubectl.RetryParams, resource schema.GroupResource) error {
if t.RetryCount != 2 {
t.RetryCount += 1
// This is faking a real error from the
// "k8s.io/apiserver/pkg/admission/plugin/namespace/lifecycle" package.
return errors.NewForbidden(schema.GroupResource{Resource: "ReplicationController"}, name, fmt.Errorf("%s: not yet ready to handle request", name))
return errors.NewForbidden(resource, name, fmt.Errorf("%s: not yet ready to handle request", name))
}
t.Events = append(t.Events, ScaleEvent{name, newSize})
return nil
}

func (t *FakeLaggedScaler) ScaleSimple(namespace, name string, preconditions *kubectl.ScalePrecondition, newSize uint) (string, error) {
func (t *FakeLaggedScaler) ScaleSimple(namespace, name string, preconditions *kubectl.ScalePrecondition, newSize uint, resource schema.GroupResource) (string, error) {
return "", nil
}
11 changes: 5 additions & 6 deletions pkg/apps/util/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,11 @@ import (
)

func NewReplicationControllerV1Scaler(client kclientset.Interface) kubectl.Scaler {
return kubectl.ScalerFor(
kapi.Kind("ReplicationController"),
nil,
scaleclient.New(client.Core().RESTClient(), rcv1mapper{}, dynamic.LegacyAPIPathResolverFunc, rcv1mapper{}),
kapi.Resource("replicationcontrollers"),
)
return kubectl.NewScaler(NewReplicationControllerV1ScaleClient(client))
}

func NewReplicationControllerV1ScaleClient(client kclientset.Interface) scaleclient.ScalesGetter {
return scaleclient.New(client.Core().RESTClient(), rcv1mapper{}, dynamic.LegacyAPIPathResolverFunc, rcv1mapper{})
}

// rcv1mapper pins preferred version to v1 and scale kind to autoscaling/v1 Scale
Expand Down
2 changes: 1 addition & 1 deletion pkg/cmd/infra/deployer/deployer.go
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ func (d *Deployer) Deploy(namespace, rcName string) error {
}
// Scale the deployment down to zero.
retryWaitParams := kubectl.NewRetryParams(1*time.Second, 120*time.Second)
if err := d.scaler.Scale(candidate.Namespace, candidate.Name, uint(0), &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}, retryWaitParams, retryWaitParams); err != nil {
if err := d.scaler.Scale(candidate.Namespace, candidate.Name, uint(0), &kubectl.ScalePrecondition{Size: -1, ResourceVersion: ""}, retryWaitParams, retryWaitParams, kapi.Resource("replicationcontrollers")); err != nil {
fmt.Fprintf(d.errOut, "error: Couldn't scale down prior deployment %s: %v\n", appsutil.LabelForDeployment(candidate), err)
} else {
fmt.Fprintf(d.out, "--> Scaled older deployment %s down\n", candidate.Name)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"

appsutil "github.com/openshift/origin/pkg/apps/util"
"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
newproject "github.com/openshift/origin/pkg/oc/admin/project"
appscmd "github.com/openshift/origin/pkg/oc/cli/deploymentconfigs"
Expand Down Expand Up @@ -82,7 +83,7 @@ func (d *AppCreate) cleanupApp() {
d.out.Debug("DCluAC043", fmt.Sprintf("%s: Deleting components of app '%s' if present.", now(), d.appName))

// reap the DC's deployments first
if err := appscmd.NewDeploymentConfigReaper(d.AppsClient, d.KubeClient).Stop(d.project, d.appName, time.Duration(1)*time.Second, nil); err != nil {
if err := appscmd.NewDeploymentConfigReaper(d.AppsClient, d.KubeClient, appsutil.NewReplicationControllerV1ScaleClient(d.KubeClient)).Stop(d.project, d.appName, time.Duration(1)*time.Second, nil); err != nil {
errs = append(errs, err)
}

Expand Down
27 changes: 18 additions & 9 deletions pkg/oc/cli/deploymentconfigs/delete.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
scaleclient "k8s.io/client-go/scale"
kapi "k8s.io/kubernetes/pkg/apis/core"
kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/kubectl"
Expand All @@ -18,14 +19,15 @@ import (
)

// NewDeploymentConfigReaper returns a new reaper for deploymentConfigs
func NewDeploymentConfigReaper(appsClient appsclient.Interface, kc kclientset.Interface) kubectl.Reaper {
return &DeploymentConfigReaper{appsClient: appsClient, kc: kc, pollInterval: kubectl.Interval, timeout: kubectl.Timeout}
func NewDeploymentConfigReaper(appsClient appsclient.Interface, kc kclientset.Interface, scaleClient scaleclient.ScalesGetter) kubectl.Reaper {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@mfojtik can we finally drop the reapers? we have GC since 3.6. We should do that upstream as well.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can't entirely drop the reapers, but I need to start doing this upstream. So upstream first gets rid of them and then we'll do it during after the next rebase. That reminds me I need to sync with @deads2k (be prepared my list of topics is growing 😉).

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can't entirely drop the reapers

@soltysh I am interested in those reasons

but I need to start doing this upstream.

+1

Copy link
Contributor

@soltysh soltysh Apr 10, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@soltysh I am interested in those reasons

Reapers, aside from removing a resource, also scale them down, eg. deployments. That bit of functionality cannot be removed and I doubt the GC is handling that.

Copy link
Contributor

@tnozicka tnozicka Apr 10, 2018

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@soltysh isn't the resource marked as deleted so it can't create new subresources? No need for scale down...

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That's something I'm not 100% sure of and as usual I'd prefer to double check it.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Controllers are not allowed to operate on resources with deletionTimestamp != nil to avoid racing with GC

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yup.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am quite positive but double checking doesn't hurt :)

return &DeploymentConfigReaper{appsClient: appsClient, kc: kc, scaleClient: scaleClient, pollInterval: kubectl.Interval, timeout: kubectl.Timeout}
}

// DeploymentConfigReaper implements the Reaper interface for deploymentConfigs
type DeploymentConfigReaper struct {
appsClient appsclient.Interface
kc kclientset.Interface
scaleClient scaleclient.ScalesGetter
pollInterval, timeout time.Duration
}

Expand Down Expand Up @@ -85,10 +87,6 @@ func (reaper *DeploymentConfigReaper) Stop(namespace, name string, timeout time.
if err != nil {
return err
}
rcReaper, err := kubectl.ReaperFor(kapi.Kind("ReplicationController"), reaper.kc)
if err != nil {
return err
}

// If there is neither a config nor any deployments, nor any deployer pods, we can return NotFound.
deployments := rcList.Items
Expand All @@ -98,9 +96,20 @@ func (reaper *DeploymentConfigReaper) Stop(namespace, name string, timeout time.
}

for _, rc := range deployments {
if err = rcReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil {
// Better not error out here...
glog.Infof("Cannot delete ReplicationController %s/%s for deployment config %s/%s: %v", rc.Namespace, rc.Name, namespace, name, err)
// this is unnecessary since the ownership is present
if reaper.scaleClient != nil {
rcReaper, err := kubectl.ReaperFor(kapi.Kind("ReplicationController"), reaper.kc, reaper.scaleClient)
if err != nil {
return err
}
if err = rcReaper.Stop(rc.Namespace, rc.Name, timeout, gracePeriod); err != nil {
// Better not error out here...
glog.Infof("Cannot delete ReplicationController %s/%s for deployment config %s/%s: %v", rc.Namespace, rc.Name, namespace, name, err)
}
} else {
if err := reaper.kc.Core().ReplicationControllers(rc.Namespace).Delete(rc.Name, nil); err != nil {
glog.Infof("Cannot delete ReplicationController %s/%s for deployment config %s/%s: %v", rc.Namespace, rc.Name, namespace, name, err)
}
}

// Only remove deployer pods when the deployment was failed. For completed
Expand Down
Loading