Skip to content

Commit

Permalink
Add support for OpenShift DeploymentConfig (#714)
Browse files Browse the repository at this point in the history
Fix: #161
  • Loading branch information
hossainemruz authored and tamalsaha committed Apr 2, 2019
1 parent 8c2b81b commit 47cf2dd
Show file tree
Hide file tree
Showing 52 changed files with 1,802 additions and 2,184 deletions.
8 changes: 1 addition & 7 deletions apis/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,6 @@ var (
EnableStatusSubresource bool
)

const (
ModificationTypeInitContainerInjection = "InitContainerInjection"
ModificationTypeInitContainerDeletion = "InitContainerDeletion"
ModificationTypeSidecarInjection = "SidecarInjection"
ModificationTypeSidecarDeletion = "SidecarDeletion"
)

const (
StashKey = "stash.appscode.com"
VersionTag = StashKey + "/tag"
Expand All @@ -24,4 +17,5 @@ const (
KindDaemonSet = "DaemonSet"
KindPersistentVolumeClaim = "PersistentVolumeClaim"
KindAppBinding = "AppBinding"
KindDeploymentConfig = "DeploymentConfig"
)
23 changes: 23 additions & 0 deletions docs/examples/workloads/deploymentconfig/backupconfiguration.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
apiVersion: stash.appscode.com/v1beta1
kind: BackupConfiguration
metadata:
name: deploymentconfig-backup
namespace: demo
spec:
repository:
name: local-repo
schedule: "* * * * *"
target:
ref:
apiVersion: apps.openshift.io/v1
kind: DeploymentConfig
name: stash-demo
volumeMounts:
- name: source-data
mountPath: /source/data
directories:
- /source/data
retentionPolicy:
name: 'keep-last-5'
keepLast: 5
prune: true
45 changes: 45 additions & 0 deletions docs/examples/workloads/deploymentconfig/deploymentconfig.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: stash-sample-data
namespace: demo
data:
file1.txt: "This test file 1"
file2.txt: "This is file 2"
---
apiVersion: apps.openshift.io/v1
kind: DeploymentConfig
metadata:
labels:
app: stash-demo
name: stash-demo
namespace: demo
spec:
replicas: 3
selector:
app: stash-demo
template:
metadata:
labels:
app: stash-demo
name: busybox
spec:
containers:
- args:
- sleep
- "3600"
image: busybox
imagePullPolicy: IfNotPresent
name: busybox
volumeMounts:
- mountPath: /source/data
name: source-data
restartPolicy: Always
volumes:
- name: source-data
configMap:
name: stash-sample-data
triggers:
- type: "ConfigChange"
strategy:
type: Rolling
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: demo-pvc
namespace: demo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

---
apiVersion: apps.openshift.io/v1
kind: DeploymentConfig
metadata:
labels:
app: stash-demo
name: stash-recovered
namespace: demo
spec:
replicas: 3
selector:
app: stash-demo
template:
metadata:
labels:
app: stash-demo
name: busybox
spec:
containers:
- args:
- sleep
- "3600"
image: busybox
imagePullPolicy: IfNotPresent
name: busybox
volumeMounts:
- mountPath: /source/data
name: source-data
restartPolicy: Always
volumes:
- name: source-data
persistentVolumeClaim:
claimName: demo-pvc
triggers:
- type: "ConfigChange"
strategy:
type: Rolling
19 changes: 19 additions & 0 deletions docs/examples/workloads/deploymentconfig/restoresession.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
apiVersion: stash.appscode.com/v1beta1
kind: RestoreSession
metadata:
name: deploymentconfig-restore
namespace: demo
spec:
repository:
name: local-repo
rules:
- paths:
- /source/data
target: # target indicates where the recovered data will be stored
ref:
apiVersion: apps.openshift.io/v1
kind: DeploymentConfig
name: stash-recovered
volumeMounts:
- name: source-data
mountPath: /source/data
16 changes: 14 additions & 2 deletions docs/examples/workloads/local_repository.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,18 @@ data:
RESTIC_PASSWORD: bm90QHNlY3JldA==
type: Opaque
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: repo-pvc
namespace: demo
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: stash.appscode.com/v1alpha1
kind: Repository
metadata:
Expand All @@ -16,6 +28,6 @@ spec:
backend:
local:
mountPath: /safe/data
hostPath:
path: /data/stash-test/restic-repo
persistentVolumeClaim:
claimName: repo-pvc
storageSecretName: local-secret
19 changes: 14 additions & 5 deletions glide.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions glide.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -102,3 +102,5 @@ import:
version: master
- package: kmodules.xyz/custom-resources
version: release-10.0
- package: kmodules.xyz/openshift
version: release-10.0
19 changes: 19 additions & 0 deletions hack/deploy/mutating-webhook.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -102,3 +102,22 @@ webhooks:
- replicasets
failurePolicy: Fail
${STASH_WEBHOOK_SIDE_EFFECTS}
- name: deploymentconfig.admission.stash.appscode.com
clientConfig:
service:
namespace: default
name: kubernetes
path: /apis/admission.stash.appscode.com/v1alpha1/deploymentconfigs
caBundle: ${KUBE_CA}
rules:
- operations:
- CREATE
- UPDATE
apiGroups:
- apps.openshift.io
apiVersions:
- "*"
resources:
- deploymentconfigs
failurePolicy: Fail
${STASH_WEBHOOK_SIDE_EFFECTS}
6 changes: 6 additions & 0 deletions hack/deploy/rbac-list.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,12 @@ rules:
- roles
- rolebindings
verbs: ["get", "create", "delete", "patch"]
- apiGroups:
- apps.openshift.io
resources:
- deploymentconfigs
verbs: ["get", "list", "watch", "patch"]

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
Expand Down
6 changes: 6 additions & 0 deletions hack/docker/setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -82,13 +82,19 @@ build_docker() {
cat >Dockerfile <<EOL
FROM alpine:3.8
# add our user and group first to make sure their IDs get assigned consistently, regardless of whatever dependencies get added
RUN addgroup -g 1005 stash \
&& adduser -u 1005 -G stash -D stash
RUN set -x \
&& apk add --update --no-cache ca-certificates
COPY restic /bin/restic
COPY restic_${NEW_RESTIC_VER} /bin/restic_${NEW_RESTIC_VER}
COPY stash /bin/stash
USER stash
ENTRYPOINT ["/bin/stash"]
EXPOSE 56789
EOL
Expand Down
58 changes: 27 additions & 31 deletions pkg/backup/backupsession.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ func (c *BackupSessionController) RunBackup() error {
// for Deployment, ReplicaSet and ReplicationController run BackupSession watcher only in leader pod.
// for others workload i.e. DaemonSet and StatefulSet run BackupSession watcher in all pods.
switch backupConfiguration.Spec.Target.Ref.Kind {
case apis.KindDeployment, apis.KindReplicaSet, apis.KindReplicationController:
case apis.KindDeployment, apis.KindReplicaSet, apis.KindReplicationController, apis.KindDeploymentConfig:
if err := c.electLeaderPod(backupConfiguration, stopCh); err != nil {
return err
}
Expand Down Expand Up @@ -175,7 +175,7 @@ func (c *BackupSessionController) processBackupSession(key string) error {
// locked by only one pod. So, we need a leader election to determine who will take backup first. Once backup is complete, the leader pod will
// step down from leadership so that another replica can acquire leadership and start taking backup.
switch backupConfiguration.Spec.Target.Ref.Kind {
case apis.KindDeployment, apis.KindReplicaSet, apis.KindReplicationController:
case apis.KindDeployment, apis.KindReplicaSet, apis.KindReplicationController, apis.KindDeploymentConfig:
return c.backup(backupSession, backupConfiguration)
default:
return c.electBackupLeader(backupSession, backupConfiguration)
Expand Down Expand Up @@ -275,37 +275,33 @@ func (c *BackupSessionController) electLeaderPod(backupConfiguration *api_v1beta
ctx, cancel := context.WithCancel(context.Background())
defer cancel()

go func() {
// start the leader election code loop
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: resLock,
LeaseDuration: 15 * time.Second,
RenewDeadline: 10 * time.Second,
RetryPeriod: 2 * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
log.Infoln("Got leadership, preparing starting BackupSession controller")
// this pod is now leader. run BackupSession controller.
err := c.runBackupSessionController(backupConfiguration, stopCh)
if err != nil {
e2 := c.HandleBackupFailure(err)
if e2 != nil {
err = errors.NewAggregate([]error{err, e2})
}
// step down from leadership so that other replicas can try to start BackupSession controller
cancel()
// fail the container so that it restart and re-try this process.
log.Fatalln("failed to start BackupSession controller. Reason: ", err.Error())
// start the leader election code loop
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: resLock,
LeaseDuration: 15 * time.Second,
RenewDeadline: 10 * time.Second,
RetryPeriod: 2 * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
log.Infoln("Got leadership, preparing starting BackupSession controller")
// this pod is now leader. run BackupSession controller.
err := c.runBackupSessionController(backupConfiguration, stopCh)
if err != nil {
e2 := c.HandleBackupFailure(err)
if e2 != nil {
err = errors.NewAggregate([]error{err, e2})
}
},
OnStoppedLeading: func() {
log.Infoln("Lost leadership")
},
// step down from leadership so that other replicas can try to start BackupSession controller
cancel()
// fail the container so that it restart and re-try this process.
log.Fatalln("failed to start BackupSession controller. Reason: ", err.Error())
}
},
})
}()
// wait until stop signal is sent.
<-stopCh
OnStoppedLeading: func() {
log.Infoln("Lost leadership")
},
},
})
return nil
}

Expand Down
Loading

0 comments on commit 47cf2dd

Please sign in to comment.