Skip to content

Commit

Permalink
Rename deletePolicy to remoteDeletePolicy and fix code and logic
Browse files Browse the repository at this point in the history
  • Loading branch information
AMecea committed Mar 18, 2019
1 parent 80194c8 commit 63f2138
Show file tree
Hide file tree
Showing 13 changed files with 187 additions and 75 deletions.
4 changes: 2 additions & 2 deletions .drone.yml
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ pipeline:
#

start-kubernetes:
image: quay.io/presslabs/kluster-toolbox
image: quay.io/presslabs/bfc
group: publish
secrets:
- GOOGLE_CREDENTIALS
Expand Down Expand Up @@ -199,7 +199,7 @@ pipeline:
event: push

stop-kubernetes:
image: quay.io/presslabs/kluster-toolbox
image: quay.io/presslabs/bfc
secrets:
- GOOGLE_CREDENTIALS
environment:
Expand Down
6 changes: 3 additions & 3 deletions config/crds/mysql_v1alpha1_mysqlbackup.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,9 @@ spec:
clusterName:
description: ClustterName represents the cluster for which to take backup
type: string
deletePolicy:
description: DeletePolicy the deletion policy that specify how to treat
the data from remote storage. By default it's used softDelete.
remoteDeletePolicy:
description: RemoteDeletePolicy the deletion policy that specify how
to treat the data from remote storage. By default it's used softDelete.
type: string
required:
- clusterName
Expand Down
3 changes: 3 additions & 0 deletions examples/example-backup.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,3 +15,6 @@ spec:
## specify a secret where to find credentials to access the
## bucket
# backupSecretName: backup-secret

## specify the remote deletion policy. It can be on of ["retain", "delete"]
# remoteDeletePolicy: retain
4 changes: 4 additions & 0 deletions examples/example-cluster-secret.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,7 @@ type: Opaque
data:
# root password is required to be specified
ROOT_PASSWORD: bXlwYXNz
## application credentials that will be created at cluster bootstrap
# DATABASE:
# USER:
# PASSWORD:
4 changes: 2 additions & 2 deletions pkg/apis/mysql/v1alpha1/mysqlbackup_defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ package v1alpha1
// SetDefaults_MysqlBackup sets the defaults for a mysqlbackup object
// nolint: golint
func SetDefaults_MysqlBackup(b *MysqlBackup) {
if len(b.Spec.DeletePolicy) == 0 {
b.Spec.DeletePolicy = SoftDelete
if len(b.Spec.RemoteDeletePolicy) == 0 {
b.Spec.RemoteDeletePolicy = Retain
}
}
15 changes: 8 additions & 7 deletions pkg/apis/mysql/v1alpha1/mysqlbackup_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,10 @@ type MysqlBackupSpec struct {
// +optional
BackupSecretName string `json:"backupSecretName,omitempty"`

// DeletePolicy the deletion policy that specify how to treat the data from remote storage. By
// RemoteDeletePolicy the deletion policy that specify how to treat the data from remote storage. By
// default it's used softDelete.
// +optional
DeletePolicy DeletePolicy `json:"deletePolicy,omitempty"`
RemoteDeletePolicy DeletePolicy `json:"remoteDeletePolicy,omitempty"`
}

// BackupCondition defines condition struct for backup resource
Expand Down Expand Up @@ -81,11 +81,12 @@ const (
type DeletePolicy string

const (
// HardDelete when used it will delete the backup from remote storage then will remove the
// MysqlBackup resource from Kubernetes.
HardDelete DeletePolicy = "hardDelete"
// SoftDelete when used it will delete only the MysqlBackup resource from Kuberentes.
SoftDelete DeletePolicy = "softDelete"
// Delete when used it will try to delete the backup from remote storage then will remove the
// MysqlBackup resource from Kubernetes. The remote deletion is not guaranteed that will succeed.
Delete DeletePolicy = "delete"
// Retain when used it will delete only the MysqlBackup resource from Kuberentes and will keep the backup
// on remote storage.
Retain DeletePolicy = "retain"
)

// MysqlBackupStatus defines the observed state of MysqlBackup
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,34 +17,46 @@ limitations under the License.
package syncer

import (
"fmt"
"strings"

"github.com/imdario/mergo"
"github.com/presslabs/controller-util/mergo/transformers"
"github.com/presslabs/controller-util/syncer"
"github.com/presslabs/mysql-operator/pkg/internal/mysqlbackup"
batch "k8s.io/api/batch/v1"
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"

api "github.com/presslabs/mysql-operator/pkg/apis/mysql/v1alpha1"
"github.com/presslabs/mysql-operator/pkg/internal/mysqlbackup"
"github.com/presslabs/mysql-operator/pkg/internal/mysqlcluster"
"github.com/presslabs/mysql-operator/pkg/options"
)

const (
// RemoteStorageFinalizer is the finalizer name used when hardDelete policy is used
RemoteStorageFinalizer = "backups.mysql.presslabs.org/remote-storage"
RemoteStorageFinalizer = "backups.mysql.presslabs.org/remote-storage-cleanup"

// RemoteDeletionFailedEvent is the event that is set on the cluster when the cleanup job fails
RemoteDeletionFailedEvent = "RemoteDeletionFailed"
)

type deletionJobSyncer struct {
backup *mysqlbackup.MysqlBackup
opt *options.Options
backup *mysqlbackup.MysqlBackup
cluster *mysqlcluster.MysqlCluster
opt *options.Options
schema *runtime.Scheme
recorder record.EventRecorder
}

// NewRemoteJobSyncer returns a job syncer for hard deletion job. The job which removes the backup
// NewDeleteJobSyncer returns a job syncer for hard deletion job. The job which removes the backup
// from remote storage.
func NewRemoteJobSyncer(c client.Client, s *runtime.Scheme,
backup *mysqlbackup.MysqlBackup, opt *options.Options) syncer.Interface {
func NewDeleteJobSyncer(c client.Client, s *runtime.Scheme, backup *mysqlbackup.MysqlBackup,
cluster *mysqlcluster.MysqlCluster, opt *options.Options, r record.EventRecorder) syncer.Interface {

job := &batch.Job{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -54,17 +66,21 @@ func NewRemoteJobSyncer(c client.Client, s *runtime.Scheme,
}

jobSyncer := deletionJobSyncer{
backup: backup,
opt: opt,
cluster: cluster,
backup: backup,
opt: opt,
schema: s,
recorder: r,
}

return syncer.NewObjectSyncer("Backup", backup.Unwrap(), job, c, s, jobSyncer.SyncFn)
return syncer.NewObjectSyncer("BackupCleaner", nil, job, c, s, jobSyncer.SyncFn)
}

// nolint: gocyclo
func (s *deletionJobSyncer) SyncFn(in runtime.Object) error {
out := in.(*batch.Job)

if s.backup.Spec.DeletePolicy == api.SoftDelete {
if s.backup.Spec.RemoteDeletePolicy == api.Retain {
// do nothing
return syncer.ErrIgnore
}
Expand All @@ -83,6 +99,10 @@ func (s *deletionJobSyncer) SyncFn(in runtime.Object) error {
return syncer.ErrIgnore
}

if len(s.backup.Spec.BackupURL) == 0 {
return fmt.Errorf("empty .spec.backupURL")
}

// check if the job is created and if not create it
if out.ObjectMeta.CreationTimestamp.IsZero() {
out.Labels = map[string]string{
Expand All @@ -95,19 +115,32 @@ func (s *deletionJobSyncer) SyncFn(in runtime.Object) error {
if err != nil {
return err
}

// explicit set owner reference on job because the owner has set deletionTimestamp, at this point, and
// the syncer will not set it
err = controllerutil.SetControllerReference(s.backup.Unwrap(), out, s.schema)
if err != nil {
return err
}
}

completed, failed := getJobStatus(out)
if completed && !failed {
if completed {
removeFinalizer(s.backup.Unwrap(), RemoteStorageFinalizer)
}

// announce the cluster if deletion from remote storage failed
if failed {
s.recordWEventOnCluster(RemoteDeletionFailedEvent, "job failed")
}

return nil
}

func (s *deletionJobSyncer) ensurePodSpec() core.PodSpec {
return core.PodSpec{
Containers: s.ensureContainers(),
RestartPolicy: core.RestartPolicyNever,
Containers: s.ensureContainers(),
ImagePullSecrets: []core.LocalObjectReference{
{Name: s.opt.ImagePullSecretName},
},
Expand All @@ -122,12 +155,29 @@ func (s *deletionJobSyncer) ensureContainers() []core.Container {
ImagePullPolicy: s.opt.ImagePullPolicy,
Args: []string{
"rclone", "--config=/etc/rclone.conf", "delete",
s.backup.Status.BackupURI,
bucketForRclone(s.backup.Spec.BackupURL),
},
EnvFrom: []core.EnvFromSource{
{
SecretRef: &core.SecretEnvSource{
LocalObjectReference: core.LocalObjectReference{
Name: s.backup.Spec.BackupSecretName,
},
},
},
},
},
}
}

func (s *deletionJobSyncer) recordWEventOnCluster(reason, msg string) {
s.recorder.Eventf(s.cluster, "Warning", reason, msg)
}

func bucketForRclone(name string) string {
return strings.Replace(name, "://", ":", 1)
}

func getJobStatus(job *batch.Job) (bool, bool) {
completed := false
if completCond := jobCondition(batch.JobComplete, job); completCond != nil {
Expand Down
35 changes: 26 additions & 9 deletions pkg/controller/mysqlbackup/internal/syncer/deletionjob_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ import (
batch "k8s.io/api/batch/v1"
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/record"

api "github.com/presslabs/mysql-operator/pkg/apis/mysql/v1alpha1"
"github.com/presslabs/mysql-operator/pkg/internal/mysqlbackup"
Expand All @@ -36,14 +38,16 @@ import (

var _ = Describe("MysqlBackup remove job syncer", func() {
var (
cluster *mysqlcluster.MysqlCluster
backup *mysqlbackup.MysqlBackup
syncer *deletionJobSyncer
cluster *mysqlcluster.MysqlCluster
backup *mysqlbackup.MysqlBackup
syncer *deletionJobSyncer
recorder *record.FakeRecorder
)

BeforeEach(func() {
clusterName := fmt.Sprintf("cluster-%d", rand.Int31())
name := fmt.Sprintf("backup-%d", rand.Int31())
recorder = record.NewFakeRecorder(100)
ns := "default"

two := int32(2)
Expand All @@ -64,18 +68,21 @@ var _ = Describe("MysqlBackup remove job syncer", func() {
})

syncer = &deletionJobSyncer{
backup: backup,
opt: options.GetOptions(),
cluster: cluster,
backup: backup,
opt: options.GetOptions(),
recorder: recorder,
schema: scheme.Scheme,
}
})

It("should skip job creation when no needed", func() {
delJob := &batch.Job{}
backup.Spec.DeletePolicy = api.SoftDelete
backup.Spec.RemoteDeletePolicy = api.Retain
// skip job creation because backup is set to soft delete
Expect(syncer.SyncFn(delJob)).To(Equal(syncerpkg.ErrIgnore))

backup.Spec.DeletePolicy = api.HardDelete
backup.Spec.RemoteDeletePolicy = api.Delete
// skip job creation because backup is not deleted
Expect(syncer.SyncFn(delJob)).To(Equal(syncerpkg.ErrIgnore))
Expect(backup.Finalizers).To(ContainElement(RemoteStorageFinalizer))
Expand All @@ -88,9 +95,9 @@ var _ = Describe("MysqlBackup remove job syncer", func() {
Expect(backup.Finalizers).ToNot(ContainElement(RemoteStorageFinalizer))
})

It("should create the job", func() {
It("should create the job and update backup finalizer", func() {
delJob := &batch.Job{}
backup.Spec.DeletePolicy = api.HardDelete
backup.Spec.RemoteDeletePolicy = api.Delete
deletionTime := metav1.NewTime(time.Now())
backup.DeletionTimestamp = &deletionTime
Expect(syncer.SyncFn(delJob)).To(Succeed())
Expand All @@ -110,6 +117,16 @@ var _ = Describe("MysqlBackup remove job syncer", func() {
},
}
Expect(syncer.SyncFn(delJob)).To(Succeed())
Expect(backup.Finalizers).ToNot(ContainElement(RemoteStorageFinalizer))
Expect(recorder.Events).To(Receive(ContainSubstring(RemoteDeletionFailedEvent)))

delJob.Status.Conditions = []batch.JobCondition{
batch.JobCondition{
Type: batch.JobComplete,
Status: core.ConditionFalse,
},
}
Expect(syncer.SyncFn(delJob)).To(Succeed())
Expect(backup.Finalizers).To(ContainElement(RemoteStorageFinalizer))

delJob.Status.Conditions = []batch.JobCondition{
Expand Down
18 changes: 8 additions & 10 deletions pkg/controller/mysqlbackup/internal/syncer/job.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,12 @@ func NewJobSyncer(c client.Client, s *runtime.Scheme, backup *mysqlbackup.MysqlB
func (s *jobSyncer) SyncFn(in runtime.Object) error {
out := in.(*batch.Job)

if s.backup.Status.Completed {
log.V(1).Info("backup already completed", "name", s.backup.Name)
// skip doing anything
return syncer.ErrIgnore
}

if len(s.backup.GetBackupURL(s.cluster)) == 0 {
log.Info("can't get bucketURI", "cluster", s.cluster, "backup", s.backup)
return fmt.Errorf("can't get bucketURI")
Expand All @@ -82,14 +88,6 @@ func (s *jobSyncer) SyncFn(in runtime.Object) error {
return nil
}

func (s *jobSyncer) getBackupSecretName() string {
if len(s.backup.Spec.BackupSecretName) > 0 {
return s.backup.Spec.BackupSecretName
}

return s.cluster.Spec.BackupSecretName
}

// getBackupCandidate returns the hostname of the first not-lagged and
// replicating slave node, else returns the master node.
func (s *jobSyncer) getBackupCandidate() string {
Expand Down Expand Up @@ -163,12 +161,12 @@ func (s *jobSyncer) ensurePodSpec(in core.PodSpec) core.PodSpec {
},
}

if len(s.getBackupSecretName()) != 0 {
if len(s.backup.Spec.BackupSecretName) != 0 {
in.Containers[0].EnvFrom = []core.EnvFromSource{
core.EnvFromSource{
SecretRef: &core.SecretEnvSource{
LocalObjectReference: core.LocalObjectReference{
Name: s.getBackupSecretName(),
Name: s.backup.Spec.BackupSecretName,
},
},
},
Expand Down
Loading

0 comments on commit 63f2138

Please sign in to comment.