Skip to content

Commit

Permalink
Include namespace resource needed by namespaced-scope resource in bac…
Browse files Browse the repository at this point in the history
…kup.

For some use cases, namespaced-scope resources are inluded into backup,
but the namespaces are not included due to filters setting. Add a function
to add the namespaces at the end of backup.

Signed-off-by: Xun Jiang <[email protected]>
Signed-off-by: Xun Jiang <[email protected]>
  • Loading branch information
Xun Jiang authored and Xun Jiang committed Jun 1, 2023
1 parent 9467d7c commit 613dd4a
Show file tree
Hide file tree
Showing 3 changed files with 130 additions and 0 deletions.
1 change: 1 addition & 0 deletions changelogs/unreleased/6320-blackpiglet
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Include namespaces needed by namespaced-scope resources in backup.
51 changes: 51 additions & 0 deletions pkg/backup/backup.go
Original file line number Diff line number Diff line change
Expand Up @@ -407,6 +407,12 @@ func (kb *kubernetesBackupper) BackupWithResolvers(log logrus.FieldLogger,
// no more progress updates will be sent on the 'update' channel
quit <- struct{}{}

// For some use cases, e.g. use label to filter resources in backup,
// namespace-scoped resources are included, but the namespace that
// contains the resources are not. Add the function to include the
// namespaces if needed.
kb.backupAdditionalNamespaces(items, itemBackupper, log)

// back up CRD(this is a CRD definition of the resource, it's a CRD instance) for resource if found.
// We should only need to do this if we've backed up at least one item for the resource
// and the CRD type(this is the CRD type itself) is neither included or excluded.
Expand Down Expand Up @@ -714,3 +720,48 @@ type tarWriter interface {
Write([]byte) (int, error)
WriteHeader(*tar.Header) error
}

// backupAdditionalNamespaces is used to backup namespaces that are needed but not included in the backup yet.
func (kb *kubernetesBackupper) backupAdditionalNamespaces(items []*kubernetesResource, itemBackupper *itemBackupper, log logrus.FieldLogger) {
namespaceAlreadyIncluded := make(map[string]*kubernetesResource, 0)
namespaceNeeded := make(map[string]*kubernetesResource, 0)
for _, item := range items {
if item.groupResource == kuberesource.Namespaces {
namespaceAlreadyIncluded[item.name] = item
}

if item.namespace != "" {
if _, ok := namespaceNeeded[item.namespace]; !ok {
namespaceNeeded[item.namespace] = new(kubernetesResource)
}
}
}

gvr, apiResource, err := kb.discoveryHelper.ResourceFor(kuberesource.Namespaces.WithVersion(""))
if err != nil {
log.WithError(errors.WithStack(err)).Errorf("Error getting resolved resource for %s", kuberesource.Namespaces)
return
}
log.Debugf("Got server preferred API version %s for %s", gvr.Version, kuberesource.Namespaces)

log.Debugf("Getting dynamic client for %s", gvr.String())
nsClient, err := kb.dynamicFactory.ClientForGroupVersionResource(gvr.GroupVersion(), apiResource, "")
if err != nil {
log.WithError(errors.WithStack(err)).Errorf("Error getting dynamic client for %s", kuberesource.Namespaces)
return
}

for namespace := range namespaceNeeded {
if _, ok := namespaceAlreadyIncluded[namespace]; !ok {
unstructured, err := nsClient.Get(namespace, metav1.GetOptions{})
if err != nil {
log.WithError(errors.WithStack(err)).Errorf("Error getting namespace %s", namespace)
return
}

log.Infof("Found additional namespace %s to add to backup", namespace)

kb.backupItem(log, gvr.GroupResource(), itemBackupper, unstructured, gvr)
}
}
}
78 changes: 78 additions & 0 deletions pkg/backup/backup_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4101,3 +4101,81 @@ func TestBackupNewResourceFiltering(t *testing.T) {
})
}
}

func TestBackupAdditionalNamespaces(t *testing.T) {
tests := []struct {
name string
backup *velerov1.Backup
apiResources []*test.APIResource
want []string
}{
{
name: "LabelSelector test",
backup: defaultBackup().LabelSelector(&metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}).
Result(),
apiResources: []*test.APIResource{
test.Namespaces(
builder.ForNamespace("ns-1").Result(),
builder.ForNamespace("ns-2").Result(),
builder.ForNamespace("ns-3").Result(),
),
test.Deployments(
builder.ForDeployment("ns-1", "deploy-1").ObjectMeta(builder.WithLabels("a", "b")).Result(),
),
},
want: []string{
"resources/namespaces/cluster/ns-1.json",
"resources/namespaces/v1-preferredversion/cluster/ns-1.json",
"resources/deployments.apps/namespaces/ns-1/deploy-1.json",
"resources/deployments.apps/v1-preferredversion/namespaces/ns-1/deploy-1.json",
},
},
{
name: "OrLabelSelector test",
backup: defaultBackup().OrLabelSelector([]*metav1.LabelSelector{
{MatchLabels: map[string]string{"a": "b"}},
{MatchLabels: map[string]string{"c": "d"}},
}).
Result(),
apiResources: []*test.APIResource{
test.Namespaces(
builder.ForNamespace("ns-1").Result(),
builder.ForNamespace("ns-2").Result(),
builder.ForNamespace("ns-3").Result(),
),
test.Deployments(
builder.ForDeployment("ns-1", "deploy-1").ObjectMeta(builder.WithLabels("a", "b")).Result(),
builder.ForDeployment("ns-2", "deploy-2").ObjectMeta(builder.WithLabels("c", "d")).Result(),
),
},
want: []string{
"resources/namespaces/cluster/ns-1.json",
"resources/namespaces/v1-preferredversion/cluster/ns-1.json",
"resources/namespaces/cluster/ns-2.json",
"resources/namespaces/v1-preferredversion/cluster/ns-2.json",
"resources/deployments.apps/namespaces/ns-1/deploy-1.json",
"resources/deployments.apps/v1-preferredversion/namespaces/ns-1/deploy-1.json",
"resources/deployments.apps/namespaces/ns-2/deploy-2.json",
"resources/deployments.apps/v1-preferredversion/namespaces/ns-2/deploy-2.json",
},
},
}

for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
var (
h = newHarness(t)
req = &Request{Backup: tc.backup}
backupFile = bytes.NewBuffer([]byte{})
)

for _, resource := range tc.apiResources {
h.addItems(t, resource)
}

h.backupper.Backup(h.log, req, backupFile, nil, nil)

assertTarballContents(t, backupFile, append(tc.want, "metadata/version")...)
})
}
}

0 comments on commit 613dd4a

Please sign in to comment.