Skip to content

Commit

Permalink
Fix VolumeSnapshot Issues (#912)
Browse files Browse the repository at this point in the history
  • Loading branch information
suaas21 authored and tamalsaha committed Oct 4, 2019
1 parent 7f3b225 commit 404840e
Show file tree
Hide file tree
Showing 6 changed files with 12 additions and 11 deletions.
8 changes: 4 additions & 4 deletions pkg/cmds/restore_volumesnapshot.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ func (opt *VSoption) restoreVolumeSnapshot() (*restic.RestoreOutput, error) {
return nil, fmt.Errorf("no target has been specified for RestoreSession %s/%s", restoreSession.Namespace, restoreSession.Name)
}

pvcList := make([]core.PersistentVolumeClaim, 0)
var pvcList []core.PersistentVolumeClaim
// if replica field is specified, then use it. otherwise, default it to 1
replicas := int32(1)
if restoreSession.Spec.Target.Replicas != nil {
Expand Down Expand Up @@ -117,7 +117,7 @@ func (opt *VSoption) restoreVolumeSnapshot() (*restic.RestoreOutput, error) {
Phase: api_v1beta1.HostRestoreFailed,
Error: fmt.Sprintf("VolumeSnapshot %s/%s does not exist", pvcList[i].Namespace, pvcList[i].Spec.DataSource.Name),
})
// continue to process next pvc
// continue to process next VolumeSnapshot
continue
} else {
return nil, err
Expand All @@ -126,7 +126,7 @@ func (opt *VSoption) restoreVolumeSnapshot() (*restic.RestoreOutput, error) {
}

// now, create the PVC
_, err := opt.kubeClient.CoreV1().PersistentVolumeClaims(opt.namespace).Create(&pvcList[i])
pvc, err := opt.kubeClient.CoreV1().PersistentVolumeClaims(opt.namespace).Create(&pvcList[i])
if err != nil {
if kerr.IsAlreadyExists(err) {
restoreOutput.HostRestoreStats = append(restoreOutput.HostRestoreStats, api_v1beta1.HostRestoreStats{
Expand All @@ -141,7 +141,7 @@ func (opt *VSoption) restoreVolumeSnapshot() (*restic.RestoreOutput, error) {
}
}
// PVC has been created successfully. store it's definition so that we can wait for it to be initialized
createdPVCs = append(createdPVCs, pvcList[i])
createdPVCs = append(createdPVCs, *pvc)
}

// now, wait for the PVCs to be initialized from respective VolumeSnapshot
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/backup_session.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ func (c *StashController) runBackupSessionProcessor(key string) error {

// skip if backup model is sidecar.
// for sidecar model controller inside sidecar will take care of it.
if backupConfig.Spec.Target != nil && util.BackupModel(backupConfig.Spec.Target.Ref.Kind) == util.ModelSidecar {
if backupConfig.Spec.Target != nil && backupConfig.Spec.Driver != api_v1beta1.VolumeSnapshotter && util.BackupModel(backupConfig.Spec.Target.Ref.Kind) == util.ModelSidecar {
log.Infof("Skipping processing BackupSession %s/%s. Reason: Backup model is sidecar. Controller inside sidecar will take care of it.", backupSession.Namespace, backupSession.Name)
return c.setBackupSessionRunning(backupSession)
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/sidecar.go
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,7 @@ func isPodOwnedByWorkload(w *wapi.Workload, pod core.Pod) bool {
}

func (c *StashController) handleSidecarInjectionFailure(ref *core.ObjectReference, err error) error {
log.Warningf("Failed to inject stash sidecar inot %s %s/%s. Reason: %v", ref.Kind, ref.Namespace, ref.Name, err)
log.Warningf("Failed to inject stash sidecar into %s %s/%s. Reason: %v", ref.Kind, ref.Namespace, ref.Name, err)

// write event to respective resource
_, err2 := eventer.CreateEvent(
Expand Down
5 changes: 3 additions & 2 deletions pkg/resolve/task.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,11 +142,12 @@ func GetPVCFromVolumeClaimTemplates(ordinal int32, claimTemplates []core.Persist
for i := range claimTemplates {
inputs := make(map[string]string)
inputs[util.KeyPodOrdinal] = strconv.Itoa(int(ordinal))
err := ResolvePVCSpec(&claimTemplates[i], inputs)
claim := claimTemplates[i].DeepCopy()
err := ResolvePVCSpec(claim, inputs)
if err != nil {
return pvcList, err
}
pvcList = append(pvcList, claimTemplates[i])
pvcList = append(pvcList, *claim)
}
return pvcList, nil
}
2 changes: 1 addition & 1 deletion pkg/restic/restic_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func setupTest(tempDir string) (*ResticWrapper, error) {

setupOpt := SetupOptions{
Provider: storage.ProviderLocal,
Bucket: localRepoDir,
Bucket: localRepoDir,
SecretDir: secretDir,
ScratchDir: scratchDir,
EnableCache: false,
Expand Down
4 changes: 2 additions & 2 deletions pkg/status/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ func (o UpdateStatusOptions) UpdatePostBackupStatus(backupOutput *restic.BackupO
// add or update entry for each host in BackupSession status + create event
for _, hostStats := range backupOutput.HostBackupStats {
log.Infof("Updating status of BackupSession: %s/%s for host: %s", backupSession.Namespace, backupSession.Name, hostStats.Hostname)
_, err = stash_util_v1beta1.UpdateBackupSessionStatusForHost(o.StashClient.StashV1beta1(), backupSession, hostStats)
backupSession, err = stash_util_v1beta1.UpdateBackupSessionStatusForHost(o.StashClient.StashV1beta1(), backupSession, hostStats)
if err != nil {
return err
}
Expand Down Expand Up @@ -150,7 +150,7 @@ func (o UpdateStatusOptions) UpdatePostRestoreStatus(restoreOutput *restic.Resto
// add or update entry for each host in RestoreSession status
for _, hostStats := range restoreOutput.HostRestoreStats {
log.Infof("Updating status of RestoreSession: %s/%s for host: %s", restoreSession.Namespace, restoreSession.Name, hostStats.Hostname)
_, err = stash_util_v1beta1.UpdateRestoreSessionStatusForHost(o.StashClient.StashV1beta1(), restoreSession, hostStats)
restoreSession, err = stash_util_v1beta1.UpdateRestoreSessionStatusForHost(o.StashClient.StashV1beta1(), restoreSession, hostStats)
if err != nil {
return err
}
Expand Down

0 comments on commit 404840e

Please sign in to comment.