diff --git a/.goreleaser.yml b/.goreleaser.yml index 9ccf4629b8a..a0977a01790 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -25,24 +25,29 @@ builds: - amd64 - arm - arm64 + - ppc64le ignore: # don't build arm/arm64 for darwin or windows - goos: darwin goarch: arm - goos: darwin goarch: arm64 + - goos: darwin + goarch: ppc64le - goos: windows goarch: arm - goos: windows goarch: arm64 + - goos: windows + goarch: ppc64le ldflags: - -X "github.com/heptio/velero/pkg/buildinfo.Version={{ .Tag }}" -X "github.com/heptio/velero/pkg/buildinfo.GitSHA={{ .FullCommit }}" -X "github.com/heptio/velero/pkg/buildinfo.GitTreeState={{ .Env.GIT_TREE_STATE }}" -archive: - name_template: "{{ .ProjectName }}-{{ .Tag }}-{{ .Os }}-{{ .Arch }}" - wrap_in_directory: true - files: - - LICENSE - - examples/**/* +archives: + - name_template: "{{ .ProjectName }}-{{ .Tag }}-{{ .Os }}-{{ .Arch }}" + wrap_in_directory: true + files: + - LICENSE + - examples/**/* checksum: name_template: 'CHECKSUM' release: @@ -50,3 +55,4 @@ release: owner: heptio name: velero draft: true + prerelease: auto diff --git a/CHANGELOG.md b/CHANGELOG.md index b0b30b69fb8..40088d36f76 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,10 +1,11 @@ ## Current release: - * [CHANGELOG-1.0.md][10] + * [CHANGELOG-1.1.md][11] ## Development release: * [Unreleased Changes][0] ## Older releases: + * [CHANGELOG-1.0.md][10] * [CHANGELOG-0.11.md][9] * [CHANGELOG-0.10.md][8] * [CHANGELOG-0.9.md][7] @@ -16,6 +17,7 @@ * [CHANGELOG-0.3.md][1] +[11]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-1.1.md [10]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-1.0.md [9]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.11.md [8]: https://github.com/heptio/velero/blob/master/changelogs/CHANGELOG-0.10.md diff --git a/Dockerfile-fsfreeze-pause-ppc64le b/Dockerfile-fsfreeze-pause-ppc64le new file mode 100644 index 00000000000..055e12fdf40 --- /dev/null +++ b/Dockerfile-fsfreeze-pause-ppc64le @@ -0,0 +1,19 @@ +# Copyright 2019 the Velero contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM ubuntu:bionic + +LABEL maintainer="Steve Kriss " + +ENTRYPOINT ["/bin/bash", "-c", "while true; do sleep 10000; done"] diff --git a/Dockerfile-velero-ppc64le b/Dockerfile-velero-ppc64le new file mode 100644 index 00000000000..e3bf188510f --- /dev/null +++ b/Dockerfile-velero-ppc64le @@ -0,0 +1,32 @@ +# Copyright 2019 the Velero contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM ubuntu:bionic + +LABEL maintainer="Steve Kriss " + +RUN apt-get update && \ + apt-get install -y --no-install-recommends ca-certificates wget && \ + wget --quiet https://oplab9.parqtec.unicamp.br/pub/ppc64el/restic/restic-0.9.4 && \ + mv restic-0.9.4 /usr/bin/restic && \ + chmod +x /usr/bin/restic && \ + apt-get remove -y wget && \ + rm -rf /var/lib/apt/lists/* + + +ADD /bin/linux/ppc64le/velero /velero + +USER nobody:nobody + +ENTRYPOINT ["/velero"] diff --git a/Dockerfile-velero-restic-restore-helper-ppc64le b/Dockerfile-velero-restic-restore-helper-ppc64le new file mode 100644 index 00000000000..dcf4804c655 --- /dev/null +++ b/Dockerfile-velero-restic-restore-helper-ppc64le @@ -0,0 +1,23 @@ +# Copyright 2019 the Velero contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +FROM ubuntu:bionic + +LABEL maintainer="Steve Kriss " + +ADD /bin/linux/ppc64le/velero-restic-restore-helper . + +USER nobody:nobody + +ENTRYPOINT [ "/velero-restic-restore-helper" ] diff --git a/Gopkg.lock b/Gopkg.lock index 2aefedc41b3..a7c91fa84be 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -180,6 +180,20 @@ revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30" version = "v1.3.1" +[[projects]] + digest = "1:1d1cbf539d9ac35eb3148129f96be5537f1a1330cadcc7e3a83b4e72a59672a3" + name = "github.com/google/go-cmp" + packages = [ + "cmp", + "cmp/internal/diff", + "cmp/internal/flags", + "cmp/internal/function", + "cmp/internal/value", + ] + pruneopts = "NUT" + revision = "2d0692c2e9617365a95b295612ac0d4415ba4627" + version = "v0.3.1" + [[projects]] branch = "master" digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc" @@ -678,7 +692,7 @@ revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f" [[projects]] - digest = "1:a937ed4322409fa22924f02124fd0727c19662f73cf15406646d19bdce972df2" + digest = "1:760e08df99c3c3b53764ef7c41c03ea9d90e8594d9df42364d9209e99a0352e1" name = "k8s.io/api" packages = [ "admissionregistration/v1beta1", @@ -719,22 +733,22 @@ "storage/v1beta1", ] pruneopts = "NUT" - revision = "40a48860b5abbba9aa891b02b32da429b08d96a0" - version = "kubernetes-1.14.0" + revision = "3544db3b9e4494309507e02eced5cd9dcff47e6a" + version = "kubernetes-1.15.3" [[projects]] - digest = "1:1d6160800196e00fc394f13ca8c1c0cdc360a170c1b6a9db0f0a1f9f1c4e9342" + digest = "1:668bafb576081b9dd26c0cac69296549ad65e3e3af789a8ad1d9b9114b2f55c1" name = "k8s.io/apiextensions-apiserver" packages = [ "pkg/apis/apiextensions", "pkg/apis/apiextensions/v1beta1", ] pruneopts = "NUT" - revision = "53c4693659ed354d76121458fb819202dd1635fa" - version = "kubernetes-1.14.0" + revision = "0dbe462fe92dfa8b56cc9facf0658a17d0c70fc5" + version = "kubernetes-1.15.3" [[projects]] - digest = "1:f249ae79e492647bb0640d656ccf70fd272359a75a35ac5b9748bd19ac42c1f0" + digest = "1:b1e28ead203a2ac2c1d82980760e26bdb34dbd47428cfe76f50c81b7d761b39e" name = "k8s.io/apimachinery" packages = [ "pkg/api/equality", @@ -787,11 +801,11 @@ "third_party/forked/golang/reflect", ] pruneopts = "NUT" - revision = "d7deff9243b165ee192f5551710ea4285dcfd615" - version = "kubernetes-1.14.0" + revision = "f2f3a405f61d6c2cdc0d00687c1b5d90de91e9f0" + version = "kubernetes-1.15.3" [[projects]] - digest = "1:c225d3ffade76b27498a24472eeaa3e7c516f0c0bbaaacfaa2a1824e1ead7952" + digest = "1:07c45445bd6c49a1b12cef2def45a866a440c55dda0ef62e93a012bffaff22f8" name = "k8s.io/client-go" packages = [ "discovery", @@ -992,8 +1006,8 @@ "util/workqueue", ] pruneopts = "NUT" - revision = "6ee68ca5fd8355d024d02f9db0b3b667e8357a0f" - version = "kubernetes-1.14.0" + revision = "e14f31a72a77f7aa82a95eaf542d1194fb027d04" + version = "kubernetes-1.15.3" [[projects]] digest = "1:2c16dda1c44c2564a7818fbacb701323c16d77c21b969987c1bec08d3ee0b050" @@ -1012,20 +1026,21 @@ revision = "d83b052f768a50a309c692a9c271da3f3276ff88" [[projects]] - digest = "1:c2ad4e18f35cf651af430e4115e9d26bdd266e61b4076cb76d23a15078c5d58e" + digest = "1:da84676239a79340c7ef4d8587097bf07ecde0d72cd339dd6a4d3f5fedce7cd3" name = "k8s.io/kubernetes" packages = ["pkg/printers"] pruneopts = "NUT" - revision = "b7394102d6ef778017f2ca4046abbaa23b88c290" - version = "v1.14.1" + revision = "2d3c76f9091b6bec110a5e63777c332469e0cba2" + version = "v1.15.3" [[projects]] branch = "master" - digest = "1:14e8a3b53e6d8cb5f44783056b71bb2ca1ac7e333939cc97f3e50b579c920845" + digest = "1:5011e453320ffdc30c5a06516f4de57df6e1c094b2923a1c7e699ea3f4364bc9" name = "k8s.io/utils" packages = [ "buffer", "integer", + "pointer", "trace", ] pruneopts = "NUT" @@ -1090,15 +1105,16 @@ "google.golang.org/grpc/codes", "google.golang.org/grpc/status", "k8s.io/api/apps/v1", - "k8s.io/api/apps/v1beta1", "k8s.io/api/batch/v1", "k8s.io/api/core/v1", "k8s.io/api/rbac/v1", "k8s.io/api/rbac/v1beta1", + "k8s.io/api/storage/v1", "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1", "k8s.io/apimachinery/pkg/api/equality", "k8s.io/apimachinery/pkg/api/errors", "k8s.io/apimachinery/pkg/api/meta", + "k8s.io/apimachinery/pkg/api/resource", "k8s.io/apimachinery/pkg/apis/meta/v1", "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured", "k8s.io/apimachinery/pkg/labels", @@ -1127,6 +1143,7 @@ "k8s.io/client-go/kubernetes/typed/core/v1", "k8s.io/client-go/kubernetes/typed/rbac/v1", "k8s.io/client-go/kubernetes/typed/rbac/v1beta1", + "k8s.io/client-go/kubernetes/typed/storage/v1", "k8s.io/client-go/listers/core/v1", "k8s.io/client-go/plugin/pkg/client/auth/azure", "k8s.io/client-go/plugin/pkg/client/auth/gcp", diff --git a/Gopkg.toml b/Gopkg.toml index 4d4f0c0afd6..599b1f861df 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -31,25 +31,25 @@ [[constraint]] name = "k8s.io/kubernetes" - version = "~1.14" + version = "~1.15" [[constraint]] name = "k8s.io/client-go" - version = "kubernetes-1.14.0" + version = "kubernetes-1.15.3" [[constraint]] name = "k8s.io/apimachinery" - version = "kubernetes-1.14.0" + version = "kubernetes-1.15.3" [[constraint]] name = "k8s.io/api" - version = "kubernetes-1.14.0" + version = "kubernetes-1.15.3" [[constraint]] name = "k8s.io/apiextensions-apiserver" - version = "kubernetes-1.14.0" + version = "kubernetes-1.15.3" -# k8s.io/client-go kubernetes-1.14.0 uses v1.1.4 +# k8s.io/client-go kubernetes-1.15.3 uses v1.1.4 [[override]] name = "github.com/json-iterator/go" version = "=1.1.4" @@ -65,7 +65,7 @@ name = "github.com/Azure/azure-sdk-for-go" version = "~21.4.0" -# k8s.io/client-go kubernetes-1.14.0 uses v11.1.2 +# k8s.io/client-go kubernetes-1.15.3 uses v11.1.2 [[constraint]] name = "github.com/Azure/go-autorest" version = "11.1.2" diff --git a/README.md b/README.md index 3d6d41503f1..47949a83af8 100644 --- a/README.md +++ b/README.md @@ -7,8 +7,8 @@ Velero (formerly Heptio Ark) gives you tools to back up and restore your Kubernetes cluster resources and persistent volumes. Velero lets you: * Take backups of your cluster and restore in case of loss. -* Copy cluster resources to other clusters. -* Replicate your production environment for development and testing environments. +* Migrate cluster resources to other clusters. +* Replicate your production cluster to development and testing clusters. Velero consists of: @@ -19,8 +19,8 @@ You can run Velero in clusters on a cloud provider or on-premises. For detailed ## Installation -We strongly recommend that you use an [official release][6] of Velero. The tarballs for each release contain the -`velero` command-line client. Follow the instructions under the **Install** section of [our documentation][29] to get started. +We strongly recommend that you use an [official release][6] of Velero. The tarballs for each release contain the +`velero` command-line client. Follow the [installation instructions][28] to get started. _The code and sample YAML files in the master branch of the Velero repository are under active development and are not guaranteed to be stable. Use them at your own risk!_ @@ -70,11 +70,11 @@ See [the list of releases][6] to find out about feature changes. [24]: https://groups.google.com/forum/#!forum/projectvelero [25]: https://kubernetes.slack.com/messages/velero -[26]: https://velero.io/docs/v1.0.0/zenhub +[26]: https://velero.io/docs/zenhub +[28]: https://velero.io/docs/install-overview +[29]: https://velero.io/docs/ +[30]: https://velero.io/docs/troubleshooting -[29]: https://velero.io/docs/v1.0.0/ -[30]: https://velero.io/docs/v1.0.0/troubleshooting - -[99]: https://velero.io/docs/v1.0.0/support-matrix +[99]: https://velero.io/docs/support-matrix [100]: /site/docs/master/img/velero.png diff --git a/changelogs/CHANGELOG-1.1.md b/changelogs/CHANGELOG-1.1.md new file mode 100644 index 00000000000..dc8d5676446 --- /dev/null +++ b/changelogs/CHANGELOG-1.1.md @@ -0,0 +1,100 @@ +## v1.1.0 +#### 2019-08-22 + +### Download +- https://github.com/heptio/velero/releases/tag/v1.1.0 + +### Container Image +`gcr.io/heptio-images/velero:v1.1.0` + +### Documentation +https://velero.io/docs/v1.1.0/ + +### Upgrading + +**If you are running Velero in a non-default namespace**, i.e. any namespace other than `velero`, manual intervention is required when upgrading to v1.1. See [upgrading to v1.1](https://velero.io/docs/v1.1.0/upgrade-to-1.1/) for details. + +### Highlights + +#### Improved Restic Support + +A big focus of our work this cycle was continuing to improve support for restic. To that end, we’ve fixed the following bugs: + + +- Prior to version 1.1, restic backups could be delayed or failed due to long-lived locks on the repository. Now, Velero removes stale locks from restic repositories every 5 minutes, ensuring they do not interrupt normal operations. +- Previously, the PodVolumeBackup custom resources that represented a restic backup within a cluster were not synchronized between clusters, making it unclear what restic volumes were available to restore into a new cluster. In version 1.1, these resources are synced into clusters, so they are more visible to you when you are trying to restore volumes. +- Originally, Velero would not validate the host path in which volumes were mounted on a given node. If a node did not expose the filesystem correctly, you wouldn’t know about it until a backup failed. Now, Velero’s restic server will validate that the directory structure is correct on startup, providing earlier feedback when it’s not. +- Velero’s restic support is intended to work on a broad range of volume types. With the general release of the [Container Storage Interface API](https://kubernetes.io/blog/2019/01/15/container-storage-interface-ga/), Velero can now use restic to back up CSI volumes. + +Along with our bug fixes, we’ve provided an easier way to move restic backups between storage providers. Different providers often have different StorageClasses, requiring user intervention to make restores successfully complete. + +To make cross-provider moves simpler, we’ve introduced a StorageClass remapping plug-in. It allows you to automatically translate one StorageClass on PersistentVolumeClaims and PersistentVolumes to another. You can read more about it in our [documentation](https://velero.io/docs/v1.1.0/restore-reference/#changing-pv-pvc-storage-classes). + +#### Quality-of-Life Improvements + +We’ve also made several other enhancements to Velero that should benefit all users. + +Users sometimes ask about recommendations for Velero’s resource allocation within their cluster. To help with this concern, we’ve added default resource requirements to the Velero Deployment and restic init containers, along with configurable requests and limits for the restic DaemonSet. All these values can be adjusted if your environment requires it. + +We’ve also taken some time to improve Velero for the future by updating the Deployment and DaemonSet to use the apps/v1 API group, which will be the [default in Kubernetes 1.16](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.16.md#action-required-3). This change means that `velero install` and the `velero plugin` commands will require Kubernetes 1.9 or later to work. Existing Velero installs will continue to work without needing changes, however. + +In order to help you better understand what resources have been backed up, we’ve added a list of resources in the `velero backup describe --details` command. This change makes it easier to inspect a backup without having to download and extract it. + +In the same vein, we’ve added the ability to put custom tags on cloud-provider snapshots. This approach should provide a better way to keep track of the resources being created in your cloud account. To add a label to a snapshot at backup time, use the `--labels` argument in the `velero backup create` command. + +Our final change for increasing visibility into your Velero installation is the `velero plugin get` command. This command will report all the plug-ins within the Velero deployment.. + +Velero has previously used a restore-only flag on the server to control whether a cluster could write backups to object storage. With Velero 1.1, we’ve now moved the restore-only behavior into read-only BackupStorageLocations. This move means that the Velero server can use a BackupStorageLocation as a source to restore from, but not for backups, while still retaining the ability to back up to other configured locations. In the future, the `--restore-only` flag will be removed in favor of configuring read-only BackupStorageLocations. + +#### Community Contributions + +We appreciate all community contributions, whether they be pull requests, bug reports, feature requests, or just questions. With this release, we wanted to draw attention to a few contributions in particular: + +For users of node-based IAM authentication systems such as kube2iam, `velero install` now supports the `--pod-annotations` argument for applying necessary annotations at install time. This support should make `velero install` more flexible for scenarios that do not use Secrets for access to their cloud buckets and volumes. You can read more about how to use this new argument in our [AWS documentation](https://velero.io/docs/v1.1.0/aws-config/#alternative-setup-permissions-using-kube2iam). Huge thanks to [Traci Kamp](https://github.com/tlkamp) for this contribution. + +Structured logging is important for any application, and Velero is no different. Starting with version 1.1, the Velero server can now output its logs in a JSON format, allowing easier parsing and ingestion. Thank you to [Donovan Carthew](https://github.com/carthewd) for this feature. + +AWS supports multiple profiles for accessing object storage, but in the past Velero only used the default. With v.1.1, you can set the `profile` key on yourBackupStorageLocation to specify an alternate profile. If no profile is set, the default one is used, making this change backward compatible. Thanks [Pranav Gaikwad](https://github.com/pranavgaikwad) for this change. + +Finally, thanks to testing by [Dylan Murray](https://github.com/dymurray) and [Scott Seago](https://github.com/sseago), an issue with running Velero in non-default namespaces was found in our beta version for this release. If you’re running Velero in a namespace other than `velero`, please follow the [upgrade instructions](https://velero.io/docs/v1.1.0/upgrade-to-1.1/). + +### All Changes + * Add the prefix to BSL config map so that object stores can use it when initializing (#1767, @betta1) + * Use `VELERO_NAMESPACE` to determine what namespace Velero server is running in. For any v1.0 installations using a different namespace, the `VELERO_NAMESPACE` environment variable will need to be set to the correct namespace. (#1748, @nrb) + * support setting CPU/memory requests with unbounded limits using velero install (#1745, @prydonius) + * sort output of resource list in `velero backup describe --details` (#1741, @prydonius) + * adds the ability to define custom tags to be added to snapshots by specifying custom labels on the Backup CR with the velero backup create --labels flag (#1729, @prydonius) + * Restore restic volumes from PodVolumeBackups CRs (#1723, @carlisia) + * properly restore PVs backed up with restic and a reclaim policy of "Retain" (#1713, @skriss) + * Make `--secret-file` flag on `velero install` optional, add `--no-secret` flag for explicit confirmation (#1699, @nrb) + * Add low cpu/memory limits to the restic init container. This allows for restoration into namespaces with quotas defined. (#1677, @nrb) + * Adds configurable CPU/memory requests and limits to the restic DaemonSet generated by velero install. (#1710, @prydonius) + * remove any stale locks from restic repositories every 5m (#1708, @skriss) + * error if backup storage location's Bucket field also contains a prefix, and gracefully handle leading/trailing slashes on Bucket and Prefix fields. (#1694, @skriss) + * enhancement: allow option to choose JSON log output (#1654, @carthewd) + * Adds configurable CPU/memory requests and limits to the Velero Deployment generated by velero install. (#1678, @prydonius) + * Store restic PodVolumeBackups in obj storage & use that as source of truth like regular backups. (#1577, @carlisia) + * Update Velero Deployment to use apps/v1 API group. `velero install` and `velero plugin add/remove` commands will now require Kubernetes 1.9+ (#1673, @nrb) + * Respect the --kubecontext and --kubeconfig arugments for `velero install`. (#1656, @nrb) + * add plugin for updating PV & PVC storage classes on restore based on a config map (#1621, @skriss) + * Add restic support for CSI volumes (#1615, @nrb) + * bug fix: Fixed namespace usage with cli command 'version' (#1630, @jwmatthews) + * enhancement: allow users to specify additional Velero/Restic pod annotations on the command line with the pod-annotations flag. (#1626, @tlkamp) + * adds validation for pod volumes hostPath mount on restic server startup (#1616, @prydonius) + * enable support for ppc64le architecture (#1605, @prajyot) + * bug fix: only restore additional items returned from restore item actions if they match the restore's namespace/resource selectors (#1612, @skriss) + * add startTimestamp and completionTimestamp to PodVolumeBackup and PodVolumeRestore status fields (#1609, @prydonius) + * bug fix: respect namespace selector when determining which restore item actions to run (#1607, @skriss) + * ensure correct backup item actions run with namespace selector (#1601, @prydonius) + * allows excluding resources from backups with the `velero.io/exclude-from-backup=true` label (#1588, @prydonius) + * ensures backup item action modifications to an item's namespace/name are saved in the file path in the tarball (#1587, @prydonius) + * Hides `velero server` and `velero restic server` commands from the list of available commands as these are not intended for use by the velero CLI user. (#1561, @prydonius) + * remove dependency on glog, update to klog (#1559, @skriss) + * move issue-template-gen from docs/ to hack/ (#1558, @skriss) + * fix panic when processing DeleteBackupRequest objects without labels (#1556, @prydonius) + * support for multiple AWS profiles (#1548, @pranavgaikwad) + * Add CLI command to list (get) all Velero plugins (#1535, @carlisia) + * Added author as a tag on blog post. Should fix 404 error when trying to follow link as specified in issue #1522. (#1522, @coonsd) + * Allow individual backup storage locations to be read-only (#1517, @skriss) + * Stop returning an error when a restic volume is empty since it is a valid scenario. (#1480, @carlisia) + * add ability to use wildcard in includes/excludes (#1428, @guilhem) diff --git a/changelogs/unreleased/1428-guilhem b/changelogs/unreleased/1428-guilhem deleted file mode 100644 index 148e22276ce..00000000000 --- a/changelogs/unreleased/1428-guilhem +++ /dev/null @@ -1 +0,0 @@ -add ability to use wildcard in includes/excludes diff --git a/changelogs/unreleased/1480-carlisia.md b/changelogs/unreleased/1480-carlisia.md deleted file mode 100644 index f68f781ca6a..00000000000 --- a/changelogs/unreleased/1480-carlisia.md +++ /dev/null @@ -1 +0,0 @@ -Stop returning an error when a restic volume is empty since it is a valid scenario. \ No newline at end of file diff --git a/changelogs/unreleased/1517-skriss b/changelogs/unreleased/1517-skriss deleted file mode 100644 index 7dce5518dce..00000000000 --- a/changelogs/unreleased/1517-skriss +++ /dev/null @@ -1 +0,0 @@ -Allow individual backup storage locations to be read-only diff --git a/changelogs/unreleased/1522-coonsd b/changelogs/unreleased/1522-coonsd deleted file mode 100644 index f92796ee250..00000000000 --- a/changelogs/unreleased/1522-coonsd +++ /dev/null @@ -1 +0,0 @@ -Added author as a tag on blog post. Should fix 404 error when trying to follow link as specified in issue #1522. \ No newline at end of file diff --git a/changelogs/unreleased/1535-carlisia b/changelogs/unreleased/1535-carlisia deleted file mode 100644 index 77bf2f032ac..00000000000 --- a/changelogs/unreleased/1535-carlisia +++ /dev/null @@ -1 +0,0 @@ -Add CLI command to list (get) all Velero plugins \ No newline at end of file diff --git a/changelogs/unreleased/1548-pranavgaikwad b/changelogs/unreleased/1548-pranavgaikwad deleted file mode 100644 index e0b7b7ba365..00000000000 --- a/changelogs/unreleased/1548-pranavgaikwad +++ /dev/null @@ -1 +0,0 @@ -support for multiple AWS profiles \ No newline at end of file diff --git a/changelogs/unreleased/1556-prydonius b/changelogs/unreleased/1556-prydonius deleted file mode 100644 index 41e4d0ab5fc..00000000000 --- a/changelogs/unreleased/1556-prydonius +++ /dev/null @@ -1 +0,0 @@ -fix panic when processing DeleteBackupRequest objects without labels diff --git a/changelogs/unreleased/1558-skriss b/changelogs/unreleased/1558-skriss deleted file mode 100644 index 8d81d01b301..00000000000 --- a/changelogs/unreleased/1558-skriss +++ /dev/null @@ -1 +0,0 @@ -move issue-template-gen from docs/ to hack/ diff --git a/changelogs/unreleased/1559-skriss b/changelogs/unreleased/1559-skriss deleted file mode 100644 index 09c89766ec9..00000000000 --- a/changelogs/unreleased/1559-skriss +++ /dev/null @@ -1 +0,0 @@ -remove dependency on glog, update to klog diff --git a/changelogs/unreleased/1561-prydonius b/changelogs/unreleased/1561-prydonius deleted file mode 100644 index d43890e4811..00000000000 --- a/changelogs/unreleased/1561-prydonius +++ /dev/null @@ -1 +0,0 @@ -Hides `velero server` and `velero restic server` commands from the list of available commands as these are not intended for use by the velero CLI user. diff --git a/changelogs/unreleased/1734-prydonius b/changelogs/unreleased/1734-prydonius new file mode 100644 index 00000000000..8d13fcbca31 --- /dev/null +++ b/changelogs/unreleased/1734-prydonius @@ -0,0 +1 @@ +adds --from-schedule flag to the `velero create backup` command to create a Backup from an existing Schedule diff --git a/changelogs/unreleased/1779-skriss b/changelogs/unreleased/1779-skriss new file mode 100644 index 00000000000..114f1f1b60f --- /dev/null +++ b/changelogs/unreleased/1779-skriss @@ -0,0 +1 @@ +when using `velero restore create --namespace-mappings ...` to create a second copy of a namespace in a cluster, create copies of the PVs used diff --git a/changelogs/unreleased/1781-s12chung b/changelogs/unreleased/1781-s12chung new file mode 100644 index 00000000000..ebb563be218 --- /dev/null +++ b/changelogs/unreleased/1781-s12chung @@ -0,0 +1 @@ +fix error formatting due interpreting % as printf formatted strings diff --git a/changelogs/unreleased/1794-skriss b/changelogs/unreleased/1794-skriss new file mode 100644 index 00000000000..2afc420892a --- /dev/null +++ b/changelogs/unreleased/1794-skriss @@ -0,0 +1 @@ +remove 'restic check' calls from before/after 'restic prune' since they're redundant diff --git a/changelogs/unreleased/1807-skriss b/changelogs/unreleased/1807-skriss new file mode 100644 index 00000000000..e32d6bbe963 --- /dev/null +++ b/changelogs/unreleased/1807-skriss @@ -0,0 +1 @@ +when backing up PVCs with restic, specify --parent flag to prevent full volume rescans after pod reschedules diff --git a/design/_template.md b/design/_template.md new file mode 100644 index 00000000000..fddd68f3834 --- /dev/null +++ b/design/_template.md @@ -0,0 +1,48 @@ +# Design proposal template (replace with your proposal's title) + +Status: {Draft,Accepted,Declined} + +One to two sentences that describes the goal of this proposal. +The reader should be able to tell by the title, and the opening paragraph, if this document is relevant to them. + +_Note_: The preferred style for design documents is one sentence per line. +*Do not wrap lines*. +This aids in review of the document as changes to a line are not obscured by the reflowing those changes caused and has a side effect of avoiding debate about one or two space after a period. + +## Goals + +- A short list of things which will be accomplished by implementing this proposal. +- Two things is ok. +- Three is pushing it. +- More than three goals suggests that the proposal's scope is too large. + +## Non Goals + +- A short list of items which are: +- a. out of scope +- b. follow on items which are deliberately excluded from this proposal. + +## Background + +One to two paragraphs of exposition to set the context for this proposal. + +## High-Level Design + +One to two paragraphs that describe the high level changes that will be made to implement this proposal. + +## Detailed Design + +A detailed design describing how the changes to the product should be made. + +The names of types, fields, interfaces, and methods should be agreed on here, not debated in code review. +The same applies to changes in CRDs, YAML examples, and so on. + +Ideally the changes should be made in sequence so that the work required to implement this design can be done incrementally, possibly in parallel. + +## Alternatives Considered + +If there are alternative high level or detailed designs that were not pursued they should be called out here with a brief explanation of why they were not pursued. + +## Security Considerations + +If this proposal has an impact to the security of the product, its users, or data stored or transmitted via the product, they must be addressed here. diff --git a/design/backup-resource-list.md b/design/backup-resource-list.md new file mode 100644 index 00000000000..309679a4255 --- /dev/null +++ b/design/backup-resource-list.md @@ -0,0 +1,96 @@ +# Expose list of backed up resources in backup details + +Status: Accepted + +To increase the visibility of what a backup might contain, this document proposes storing metadata about backed up resources in object storage and adding a new section to the detailed backup description output to list them. + +## Goals + +- Include a list of backed up resources as metadata in the bucket +- Enable users to get a view of what resources are included in a backup using the Velero CLI + +## Non Goals + +- Expose the full manifests of the backed up resources + +## Background + +As reported in [#396](https://github.com/heptio/velero/issues/396), the information reported in a `velero backup describe --details` command is fairly limited, and does not easily describe what resources a backup contains. +In order to see what a backup might contain, a user would have to download the backup tarball and extract it. +This makes it difficult to keep track of different backups in a cluster. + +## High-Level Design + +After performing a backup, a new file will be created that contains the list of the resources that have been included in the backup. +This file will be persisted in object storage alongside the backup contents and existing metadata. + +A section will be added to the output of `velero backup describe --details` command to view this metadata. + +## Detailed Design + +### Metadata file + +This metadata will be in JSON (or YAML) format so that it can be easily inspected from the bucket outside of Velero tooling, and will contain the API resource and group, namespaces and names of the resources: + +``` +apps/v1/Deployment: +- default/database +- default/wordpress +v1/Service: +- default/database +- default/wordpress +v1/Secret: +- default/database-root-password +- default/database-user-password +v1/ConfigMap: +- default/database +v1/PersistentVolume: +- my-pv +``` + +The filename for this metadata will be `-resource-list.json.gz`. +The top-level key is the string form of the `schema.GroupResource` type that we currently keep track of in the backup controller code path. + +### Changes in Backup controller + +The Backupper currently initialises a map to track the `backedUpItems` (https://github.com/heptio/velero/blob/1594bdc8d0132f548e18ffcc1db8c4cd2b042726/pkg/backup/backup.go#L269), this is passed down through GroupBackupper, ResourceBackupper and ItemBackupper where ItemBackupper records each backed up item. +This property will be moved to the [Backup request struct](https://github.com/heptio/velero/blob/16910a6215cbd8f0bde385dba9879629ebcbcc28/pkg/backup/request.go#L11), allowing the BackupController to access it after a successful backup. + +`backedUpItems` currently uses the `schema.GroupResource` as a key for the resource. +In order to record the API group, version and kind for the resource, this key will be constructed from the object's `schema.GroupVersionKind` in the format `{group}/{version}/{kind}` (e.g. `apps/v1/Deployment`). + +The `backedUpItems` map is kept as a flat structure internally for quick lookup. +When the backup is ready to upload, `backedUpItems` will be converted to a nested structure representing the metadata file above, grouped by `schema.GroupVersionKind`. +After converting to the right format, it can be passed to the `persistBackup` function to persist the file in object storage. + +### Changes to DownloadRequest CRD and processing + +A new `DownloadTargetKind` "BackupResourceList" will be added to the DownloadRequest CR. + +The `GetDownloadURL` function in the `persistence` package will be updated to handle this new DownloadTargetKind to enable the Velero client to fetch the metadata from the bucket. + +### Changes to `velero backup describe --details` + +This command will need to be updated to fetch the metadata from the bucket using the `Stream` method used in other commands. +The file will be read in memory and displayed in the output of the command. +Depending on the format the metadata is stored in, it may need processing to print in a more human-readable format. +If we choose to store the metadata in YAML, it can likely be directly printed out. + +If the metadata file does not exist, this is an older backup and we cannot display the list of resources that were backed up. + +## Open Questions + +## Alternatives Considered + +### Fetch backup contents archive and walkthrough to list contents + +Instead of recording new metadata about what resources have been backed up, we could simply download the backup contents archive and walkthrough it to list the contents everytime `velero backup describe --details` is run. + +The advantage of this approach is that we don't need to change any backup procedures as we already have this content, and we will also be able to list resources for older backups. +Additionally, if we wanted to expose more information about the backed up resources, we can do so without having to update what we store in the metadata. + +The disadvantages are: +- downloading the whole backup archive will be larger than just downloading a smaller file with metadata +- reduces the metadata available in the bucket that users might want to inspect outside of Velero tooling (though this is not an explicit requirement) + +## Security Considerations diff --git a/design/feature-flags.md b/design/feature-flags.md new file mode 100644 index 00000000000..b6382c64ffa --- /dev/null +++ b/design/feature-flags.md @@ -0,0 +1,71 @@ +# Feature Flags + +Status: Accepted + +Some features may take a while to get fully implemented, and we don't necessarily want to have long-lived feature branches +A simple feature flag implementation allows code to be merged into master, but not used unless a flag is set. + +## Goals + +- Allow unfinished features to be present in Velero releases, but only enabled when the associated flag is set. + +## Non Goals + +- A robust feature flag library. + +## Background + +When considering the [CSI integration work](https://github.com/heptio/velero/pull/1661), the timelines involved presented a problem in balancing a release and longer-running feature work. +A simple implementation of feature flags can help protect unfinished code while allowing the rest of the changes to ship. + +## High-Level Design + +A new command line flag, `--features` will be added to the root `velero` command. + +`--features` will accept a comma-separated list of features, such as `--features EnableCSI,Replication`. +Each feature listed will correspond to a key in a map in `pkg/features/features.go` defining whether a feature should be enabled. + +Any code implementing the feature would then import the map and look up the key's value. + +For the Velero client, a `features` key can be added to the `config.json` file for more convenient client invocations. + +## Detailed Design + +A new `features` package will be introduced with these basic structs: + +```go +type FeatureFlagSet struct { + flags map[string]bool +} + +type Flags interface { + // Enabled reports whether or not the specified flag is found. + Enabled(name string) bool + + // Enable adds the specified flags to the list of enabled flags. + Enable(names ...string) + + // All returns all enabled features + All() []string +} + +// NewFeatureFlagSet initializes and populates a new FeatureFlagSet +func NewFeatureFlagSet(flags ...string) FeatureFlagSet +``` + +When parsing the `--features` flag, the entire `[]string` will be passed to `NewFeatureFlagSet`. +Additional features can be added with the `Enable` function. +Parsed features will be printed as an `Info` level message on server start up. + +No verification of features will be done in order to keep the implementation minimal. + +On the client side, `--features` and the `features` key in `config.json` file will be additive, resulting in the union of both. + +To disable a feature, the server must be stopped and restarted with a modified `--features` list. +Similarly, the client process must be stopped and restarted without features. + +## Alternatives Considered +Omitted + +## Security Considerations +Omitted diff --git a/design/pv-cloning.md b/design/pv-cloning.md new file mode 100644 index 00000000000..9d4828ee51d --- /dev/null +++ b/design/pv-cloning.md @@ -0,0 +1,54 @@ +# Cloning PVs While Remapping Namespaces + +Status: Approved + +Velero supports restoring resources into different namespaces than they were backed up from. +This enables a user to, among other things, clone a namespace within a cluster. +However, if the namespace being cloned uses persistent volume claims, Velero cannot currently create a second copy of the original persistent volume when restoring. +This limitation is documented in detail in [issue #192](https://github.com/heptio/velero/issues/192). +This document proposes a solution that allows new copies of persistent volumes to be created during a namespace clone. + +## Goals + +- Enable persistent volumes to be cloned when using `velero restore create --namespace-mappings ...` to create a second copy of a namespace within a cluster. + +## Non Goals + +- Cloning of persistent volumes in any scenario other than when using `velero restore create --namespace-mappings ...` flag. +- [CSI-based cloning](https://kubernetes.io/docs/concepts/storage/volume-pvc-datasource/). + +## Background + +(Omitted, see introduction) + +## High-Level Design + +During a restore, Velero will detect that it needs to assign a new name to a persistent volume being restored if and only if both of the following conditions are met: +- the persistent volume is claimed by a persistent volume claim in a namespace that's being remapped using `velero restore create --namespace-mappings ...` +- a persistent volume already exists in the cluster with the original name + +If these conditions exist, Velero will give the persistent volume a new arbitrary name before restoring it. +It will also update the `spec.volumeName` of the related persistent volume claim. + +## Detailed Design + +In `pkg/restore/restore.go`, around [line 872](https://github.com/heptio/velero/blob/master/pkg/restore/restore.go#L872), Velero has special-case code for persistent volumes. +This code will be updated to check for the two preconditions described in the previous section. +If the preconditions are met, the object will be given a new name. +The persistent volume will also be annotated with the original name, e.g. `velero.io/original-pv-name=NAME`. +Importantly, the name change will occur **before** [line 890](https://github.com/heptio/velero/blob/master/pkg/restore/restore.go#L890), where Velero checks to see if it should restore the persistent volume. +Additionally, the old and new persistent volume names will be recorded in a new field that will be added to the `context` struct, `renamedPVs map[string]string`. + +In the special-case code for persistent volume claims starting on [line 987](https://github.com/heptio/velero/blob/master/pkg/restore/restore.go#L987), Velero will check to see if the claimed persistent volume has been renamed by looking in `ctx.renamedPVs`. +If so, Velero will update the persistent volume claim's `spec.volumeName` to the new name. + +## Alternatives Considered + +One alternative approach is to add a new CLI flag and API field for restores, e.g. `--clone-pvs`, that a user could provide to indicate they want to create copies of persistent volumes. +This approach would work fine, but it does require the user to be aware of this flag/field and to properly specify it when needed. +It seems like a better UX to detect the typical conditions where this behavior is needed, and to automatically apply it. +Additionally, the design proposed here does not preclude such a flag/field from being added later, if it becomes necessary to cover other use cases. + +## Security Considerations + +N/A diff --git a/design/restic-backup-and-restore-progress.md b/design/restic-backup-and-restore-progress.md new file mode 100644 index 00000000000..f988211b02d --- /dev/null +++ b/design/restic-backup-and-restore-progress.md @@ -0,0 +1,116 @@ +# Progress reporting for restic backups and restores + +Status: Accepted + +During long-running restic backups/restores, there is no visibility into what (if anything) is happening, making it hard to know if the backup/restore is making progress or hung, how long the operation might take, etc. +We should capture progress during restic operations and make it user-visible so that it's easier to reason about. +This document proposes an approach for capturing progress of backup and restore operations and exposing this information to users. + +## Goals + +- Provide basic visibility into restic operations to inform users about their progress. + +## Non Goals + +- Capturing progress for non-restic backups and restores. + +## Background + +(Omitted, see introduction) + +## High-Level Design + +### restic backup progress + +The `restic backup` command provides progress reporting to stdout in JSON format, which includes the completion percentage of the backup. +This progress will be read on some interval and the PodVolumeBackup Custom Resource's (CR) status will be updated with this information. + +### restic restore progress + +The `restic stats` command returns the total size of a backup. +This can be compared with the total size the volume periodically to calculate the completion percentage of the restore. +The PodVolumeRestore CR's status will be updated with this information. + +## Detailed Design + +## Changes to PodVolumeBackup and PodVolumeRestore Status type + +A new `Progress` field will be added to PodVolumeBackupStatus and PodVolumeRestoreStatus of type `PodVolumeOperationProgress`: + +``` +type PodVolumeOperationProgress struct { + TotalBytes int64 + BytesDone int64 +} +``` + +### restic backup progress + +restic added support for [streaming JSON output for the `restic backup` command](https://github.com/restic/restic/pull/1944) in 0.9.5. +Our current images ship restic 0.9.4, and so the Dockerfile will be updated to pull the new version: https://github.com/heptio/velero/blob/af4b9373fc73047f843cd4bc3648603d780c8b74/Dockerfile-velero#L21. +With the `--json` flag, `restic backup` outputs single lines of JSON reporting the status of the backup: + +``` +{"message_type":"status","percent_done":0,"total_files":1,"total_bytes":21424504832} +{"message_type":"status","action":"scan_finished","item":"","duration":0.219241873,"data_size":49461329920,"metadata_size":0,"total_files":10} +{"message_type":"status","percent_done":0,"total_files":10,"total_bytes":49461329920,"current_files":["/file3"]} +{"message_type":"status","percent_done":0.0003815984736061056,"total_files":10,"total_bytes":49461329920,"bytes_done":18874368,"current_files":["/file1","/file3"]} +{"message_type":"status","percent_done":0.0011765952936188255,"total_files":10,"total_bytes":49461329920,"bytes_done":58195968,"current_files":["/file1","/file3"]} +{"message_type":"status","percent_done":0.0019503921984312064,"total_files":10,"total_bytes":49461329920,"bytes_done":96468992,"current_files":["/file1","/file3"]} +{"message_type":"status","percent_done":0.0028089887640449437,"total_files":10,"total_bytes":49461329920,"bytes_done":138936320,"current_files":["/file1","/file3"]} +``` + +The [command factory for backup](https://github.com/heptio/velero/blob/af4b9373fc73047f843cd4bc3648603d780c8b74/pkg/restic/command_factory.go#L37) will be updated to include the `--json` flag. +The code to run the `restic backup` command (https://github.com/heptio/velero/blob/af4b9373fc73047f843cd4bc3648603d780c8b74/pkg/controller/pod_volume_backup_controller.go#L241) will be changed to include a Goroutine that reads from the command's stdout stream. +The implementation of this will largely follow [@jmontleon's PoC](https://github.com/fusor/velero/pull/4/files) of this. +The Goroutine will periodically read the stream (every 10 seconds) and get the last printed status line, which will be convered to JSON. +If `bytes_done` is empty, restic has not finished scanning the volume and hasn't calculated the `total_bytes`. +In this case, we will not update the PodVolumeBackup and instead will wait for the next iteration. +Once we get a non-zero value for `bytes_done`, the `bytes_done` and `total_bytes` properties will be read and the PodVolumeBackup will be patched to update `status.Progress.BytesDone` and `status.Progress.TotalBytes` respectively. + +Once the backup has completed successfully, the PodVolumeBackup will be patched to set `status.Progress.BytesDone = status.Progress.TotalBytes`. +This is done since the main thread may cause early termination of the Goroutine once the operation has finished, preventing a final update to the `BytesDone` property. + +### restic restore progress + +The `restic stats --json` command provides information about the size of backups: + +``` +{"total_size":10558111744,"total_file_count":11} +``` + +Before beginning the restore operation, we can use the output of `restic stats` to get the total size of the backup. +The PodVolumeRestore will be patched to set `status.Progress.TotalBytes` to the total size of the backup. + +The code to run the `restic restore` command will be changed to include a Goroutine that periodically (every 10 seconds) gets the current size of the volume. +To get the current size of the volume, we will recursively walkthrough all files in the volume to accumulate the total size. +The current total size is the number of bytes transferred so far and the PodVolumeRestore will be patched to update `status.Progress.BytesDone`. + +Once the restore has completed successfully, the PodVolumeRestore will be patched to set `status.Progress.BytesDone = status.Progress.TotalBytes`. +This is done since the main thread may cause early termination of the Goroutine once the operation has finished, preventing a final update to the `BytesDone` property. + +### Velero CLI changes + +The output that describes detailed information about [PodVolumeBackups](https://github.com/heptio/velero/blob/559d62a2ec99f7a522924348fc4a173a0699813a/pkg/cmd/util/output/backup_describer.go#L349) and [PodVolumeRestores](https://github.com/heptio/velero/blob/559d62a2ec99f7a522924348fc4a173a0699813a/pkg/cmd/util/output/restore_describer.go#L160) will be updated to calculate and display a completion percentage from `status.Progress.TotalBytes` and `status.Progress.BytesDone` if available. + +## Open Questions + +- Can we assume that the volume we are restoring in will be empty? Can it contain other artefacts? + - Based on discussion in this PR, we are okay making the assumption that the PVC is empty and will proceed with the above proposed approach. + +## Alternatives Considered + +### restic restore progress + +If we cannot assume that the volume we are restoring into will be empty, we can instead use the output from `restic snapshot` to get the list of files in the backup. +This can then be used to calculate the current total size of just those files in the volume, so that we avoid considering any other files unrelated to the backup. +The above proposed approach is simpler than this one, as we don't need to keep track of each file in the backup, but this will be more robust if the volume could contain other files not included in the backup. +It's possible that certain volume types may contain hidden files that could attribute to the total size of the volume, though these should be small enough that the BytesDone calculation will only be slightly inflated. + +Another option is to contribute progress reporting similar to `restic backup` for `restic restore` upstream. +This may take more time, but would give us a more native view on the progress of a restore. +There are several issues about this already in the restic repo (https://github.com/restic/restic/issues/426, https://github.com/restic/restic/issues/1154), and what looks like an abandoned attempt (https://github.com/restic/restic/pull/2003) which we may be able to pick up. + +## Security Considerations + +N/A diff --git a/examples/minio/00-minio-deployment.yaml b/examples/minio/00-minio-deployment.yaml index a761989536a..e736c9148b0 100644 --- a/examples/minio/00-minio-deployment.yaml +++ b/examples/minio/00-minio-deployment.yaml @@ -19,7 +19,7 @@ metadata: name: velero --- -apiVersion: apps/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: namespace: velero @@ -29,6 +29,9 @@ metadata: spec: strategy: type: Recreate + selector: + matchLabels: + component: minio template: metadata: labels: diff --git a/examples/nginx-app/base.yaml b/examples/nginx-app/base.yaml index cb183a7c268..c1a3717f377 100644 --- a/examples/nginx-app/base.yaml +++ b/examples/nginx-app/base.yaml @@ -21,13 +21,16 @@ metadata: app: nginx --- -apiVersion: apps/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: nginx-deployment namespace: nginx-example spec: replicas: 2 + selector: + matchLabels: + app: nginx template: metadata: labels: diff --git a/examples/nginx-app/with-pv.yaml b/examples/nginx-app/with-pv.yaml index a4a11e7851e..4464cdcf941 100644 --- a/examples/nginx-app/with-pv.yaml +++ b/examples/nginx-app/with-pv.yaml @@ -29,7 +29,7 @@ metadata: labels: app: nginx spec: - storageClassName: + # storageClassName: accessModes: - ReadWriteOnce resources: @@ -37,13 +37,16 @@ spec: storage: 50Mi --- -apiVersion: apps/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: nginx-deployment namespace: nginx-example spec: replicas: 1 + selector: + matchLabels: + app: nginx template: metadata: labels: diff --git a/hack/build-image/Dockerfile b/hack/build-image/Dockerfile index 419a43cfa2a..b69a8c8adc6 100644 --- a/hack/build-image/Dockerfile +++ b/hack/build-image/Dockerfile @@ -17,8 +17,10 @@ FROM golang:1.12 RUN mkdir -p /go/src/k8s.io && \ cd /go/src/k8s.io && \ git config --global advice.detachedHead false && \ - git clone -b kubernetes-1.14.0 https://github.com/kubernetes/code-generator && \ - git clone -b kubernetes-1.14.0 https://github.com/kubernetes/apimachinery && \ + git clone -b kubernetes-1.15.3 https://github.com/kubernetes/code-generator && \ + git clone -b kubernetes-1.15.3 https://github.com/kubernetes/apimachinery && \ + # vendor code-generator go modules to be compatible with pre-1.15 + cd /go/src/k8s.io/code-generator && GO111MODULE=on go mod vendor && \ go get golang.org/x/tools/cmd/goimports && \ cd /go/src/golang.org/x/tools && \ git checkout 40a48ad93fbe707101afb2099b738471f70594ec && \ diff --git a/hack/gen-docs.sh b/hack/gen-docs.sh new file mode 100755 index 00000000000..46509614962 --- /dev/null +++ b/hack/gen-docs.sh @@ -0,0 +1,108 @@ +#!/bin/bash + +# Copyright 2019 the Velero contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# gen-docs.sh is used for the "make gen-docs" target. See additional +# documentation in the Makefile. + +set -o errexit +set -o nounset +set -o pipefail + +# don't run if there's already a directory for the target docs version +if [[ -d site/docs/$NEW_DOCS_VERSION ]]; then + echo "ERROR: site/docs/$NEW_DOCS_VERSION already exists" + exit 1 +fi + +# get the alphabetically last item in site/docs to use as PREVIOUS_DOCS_VERSION +# if not explicitly specified by the user +if [[ -z "${PREVIOUS_DOCS_VERSION:-}" ]]; then + echo "PREVIOUS_DOCS_VERSION was not specified, getting the latest version" + PREVIOUS_DOCS_VERSION=$(ls -1 site/docs/ | tail -n 1) +fi + +# make a copy of the previous versioned docs dir +echo "Creating copy of docs directory site/docs/$PREVIOUS_DOCS_VERSION in site/docs/$NEW_DOCS_VERSION" +cp -r site/docs/${PREVIOUS_DOCS_VERSION}/ site/docs/${NEW_DOCS_VERSION}/ + +# 'git add' the previous version's docs as-is so we get a useful diff when we copy the master docs in +echo "Running 'git add' for previous version's doc contents to use as a base for diff" +git add site/docs/${NEW_DOCS_VERSION} + +# now copy the contents of site/docs/master into the same directory so we can get a nice +# git diff of what changed since previous version +echo "Copying site/docs/master/ to site/docs/${NEW_DOCS_VERSION}/" +rm -rf site/docs/${NEW_DOCS_VERSION}/ && cp -r site/docs/master/ site/docs/${NEW_DOCS_VERSION}/ + +# make a copy of the previous versioned ToC +NEW_DOCS_TOC="$(echo ${NEW_DOCS_VERSION} | tr . -)-toc" +PREVIOUS_DOCS_TOC="$(echo ${PREVIOUS_DOCS_VERSION} | tr . -)-toc" + +echo "Creating copy of site/_data/$PREVIOUS_DOCS_TOC.yml at site/_data/$NEW_DOCS_TOC.yml" +cp site/_data/$PREVIOUS_DOCS_TOC.yml site/_data/$NEW_DOCS_TOC.yml + +# 'git add' the previous version's ToC content as-is so we get a useful diff when we copy the master ToC in +echo "Running 'git add' for previous version's ToC to use as a base for diff" +git add site/_data/$NEW_DOCS_TOC.yml + +# now copy the master ToC so we can get a nice git diff of what changed since previous version +echo "Copying site/_data/master-toc.yml to site/_data/$NEW_DOCS_TOC.yml" +rm site/_data/$NEW_DOCS_TOC.yml && cp site/_data/master-toc.yml site/_data/$NEW_DOCS_TOC.yml + +# replace known version-specific links -- the sed syntax is slightly different in OS X and Linux, +# so check which OS we're running on. +if [[ $(uname) == "Darwin" ]]; then + echo "[OS X] updating version-specific links" + find site/docs/${NEW_DOCS_VERSION} -type f -name "*.md" | xargs sed -i '' "s|https://velero.io/docs/master|https://velero.io/docs/$NEW_DOCS_VERSION|g" + find site/docs/${NEW_DOCS_VERSION} -type f -name "*.md" | xargs sed -i '' "s|https://github.com/heptio/velero/blob/master|https://github.com/heptio/velero/blob/$NEW_DOCS_VERSION|g" + + echo "[OS X] Updating latest version in _config.yml" + sed -i '' "s/latest: ${PREVIOUS_DOCS_VERSION}/latest: ${NEW_DOCS_VERSION}/" site/_config.yml + + # newlines and lack of indentation are requirements for this sed syntax + # which is doing an append + echo "[OS X] Adding latest version to versions list in _config.yml" + sed -i '' "/- master/a\\ +- ${NEW_DOCS_VERSION} +" site/_config.yml + + echo "[OS X] Adding ToC mapping entry" + sed -i '' "/master: master-toc/a\\ +${NEW_DOCS_VERSION}: ${NEW_DOCS_TOC} +" site/_data/toc-mapping.yml + +else + echo "[Linux] updating version-specific links" + find site/docs/${NEW_DOCS_VERSION} -type f -name "*.md" | xargs sed -i'' "s|https://velero.io/docs/master|https://velero.io/docs/$NEW_DOCS_VERSION|g" + find site/docs/${NEW_DOCS_VERSION} -type f -name "*.md" | xargs sed -i'' "s|https://github.com/heptio/velero/blob/master|https://github.com/heptio/velero/blob/$NEW_DOCS_VERSION|g" + + echo "[Linux] Updating latest version in _config.yml" + sed -i'' "s/latest: ${PREVIOUS_DOCS_VERSION}/latest: ${NEW_DOCS_VERSION}/" site/_config.yml + + echo "[Linux] Adding latest version to versions list in _config.yml" + sed -i'' "/- master/a - ${NEW_DOCS_VERSION}" site/_config.yml + + echo "[Linux] Adding ToC mapping entry" + sed -i'' "/master: master-toc/a ${NEW_DOCS_VERSION}: ${NEW_DOCS_TOC}" site/_data/toc-mapping.yml +fi + +echo "Success! site/docs/$NEW_DOCS_VERSION has been created." +echo "" +echo "The next steps are:" +echo " 1. Consult site/README-JEKYLL.md for further manual steps required to finalize the new versioned docs generation." +echo " 2. Run a 'git diff' to review all changes made to the docs since the previous version." +echo " 3. Make any manual changes/corrections necessary." +echo " 4. Run 'git add' to stage all unstaged changes, then 'git commit'." diff --git a/pkg/apis/velero/v1/download_request.go b/pkg/apis/velero/v1/download_request.go index 0599309268b..b9e6492f78b 100644 --- a/pkg/apis/velero/v1/download_request.go +++ b/pkg/apis/velero/v1/download_request.go @@ -31,6 +31,7 @@ const ( DownloadTargetKindBackupLog DownloadTargetKind = "BackupLog" DownloadTargetKindBackupContents DownloadTargetKind = "BackupContents" DownloadTargetKindBackupVolumeSnapshots DownloadTargetKind = "BackupVolumeSnapshots" + DownloadTargetKindBackupResourceList DownloadTargetKind = "BackupResourceList" DownloadTargetKindRestoreLog DownloadTargetKind = "RestoreLog" DownloadTargetKindRestoreResults DownloadTargetKind = "RestoreResults" ) diff --git a/pkg/apis/velero/v1/labels_annotations.go b/pkg/apis/velero/v1/labels_annotations.go index f0a4d8ec7a7..04fb355c5d6 100644 --- a/pkg/apis/velero/v1/labels_annotations.go +++ b/pkg/apis/velero/v1/labels_annotations.go @@ -35,6 +35,9 @@ const ( // PodUIDLabel is the label key used to identify a pod by uid. PodUIDLabel = "velero.io/pod-uid" + // PVCUIDLabel is the label key used to identify a PVC by uid. + PVCUIDLabel = "velero.io/pvc-uid" + // PodVolumeOperationTimeoutAnnotation is the annotation key used to apply // a backup/restore-specific timeout value for pod volume operations (i.e. // restic backups/restores). diff --git a/pkg/apis/velero/v1/pod_volume_backup.go b/pkg/apis/velero/v1/pod_volume_backup.go index 889be1e2cd9..330fda00e6b 100644 --- a/pkg/apis/velero/v1/pod_volume_backup.go +++ b/pkg/apis/velero/v1/pod_volume_backup.go @@ -68,6 +68,18 @@ type PodVolumeBackupStatus struct { // Message is a message about the pod volume backup's status. Message string `json:"message"` + + // StartTimestamp records the time a backup was started. + // Separate from CreationTimestamp, since that value changes + // on restores. + // The server's time is used for StartTimestamps + StartTimestamp metav1.Time `json:"startTimestamp"` + + // CompletionTimestamp records the time a backup was completed. + // Completion time is recorded even on failed backups. + // Completion time is recorded before uploading the backup object. + // The server's time is used for CompletionTimestamps + CompletionTimestamp metav1.Time `json:"completionTimestamp"` } // +genclient diff --git a/pkg/apis/velero/v1/pod_volume_restore.go b/pkg/apis/velero/v1/pod_volume_restore.go index ab4fc3bafaa..98668e002f2 100644 --- a/pkg/apis/velero/v1/pod_volume_restore.go +++ b/pkg/apis/velero/v1/pod_volume_restore.go @@ -57,6 +57,15 @@ type PodVolumeRestoreStatus struct { // Message is a message about the pod volume restore's status. Message string `json:"message"` + + // StartTimestamp records the time a restore was started. + // The server's time is used for StartTimestamps + StartTimestamp metav1.Time `json:"startTimestamp"` + + // CompletionTimestamp records the time a restore was completed. + // Completion time is recorded even on failed restores. + // The server's time is used for CompletionTimestamps + CompletionTimestamp metav1.Time `json:"completionTimestamp"` } // +genclient diff --git a/pkg/apis/velero/v1/zz_generated.deepcopy.go b/pkg/apis/velero/v1/zz_generated.deepcopy.go index ed6bb9e244b..ef51072cc72 100644 --- a/pkg/apis/velero/v1/zz_generated.deepcopy.go +++ b/pkg/apis/velero/v1/zz_generated.deepcopy.go @@ -80,7 +80,7 @@ func (in *BackupHooks) DeepCopy() *BackupHooks { func (in *BackupList) DeepCopyInto(out *BackupList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Backup, len(*in)) @@ -299,7 +299,7 @@ func (in *BackupStorageLocation) DeepCopyObject() runtime.Object { func (in *BackupStorageLocationList) DeepCopyInto(out *BackupStorageLocationList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]BackupStorageLocation, len(*in)) @@ -401,7 +401,7 @@ func (in *DeleteBackupRequest) DeepCopyObject() runtime.Object { func (in *DeleteBackupRequestList) DeepCopyInto(out *DeleteBackupRequestList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DeleteBackupRequest, len(*in)) @@ -499,7 +499,7 @@ func (in *DownloadRequest) DeepCopyObject() runtime.Object { func (in *DownloadRequestList) DeepCopyInto(out *DownloadRequestList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DownloadRequest, len(*in)) @@ -638,7 +638,7 @@ func (in *PodVolumeBackup) DeepCopyInto(out *PodVolumeBackup) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -664,7 +664,7 @@ func (in *PodVolumeBackup) DeepCopyObject() runtime.Object { func (in *PodVolumeBackupList) DeepCopyInto(out *PodVolumeBackupList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodVolumeBackup, len(*in)) @@ -720,6 +720,8 @@ func (in *PodVolumeBackupSpec) DeepCopy() *PodVolumeBackupSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodVolumeBackupStatus) DeepCopyInto(out *PodVolumeBackupStatus) { *out = *in + in.StartTimestamp.DeepCopyInto(&out.StartTimestamp) + in.CompletionTimestamp.DeepCopyInto(&out.CompletionTimestamp) return } @@ -739,7 +741,7 @@ func (in *PodVolumeRestore) DeepCopyInto(out *PodVolumeRestore) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -765,7 +767,7 @@ func (in *PodVolumeRestore) DeepCopyObject() runtime.Object { func (in *PodVolumeRestoreList) DeepCopyInto(out *PodVolumeRestoreList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodVolumeRestore, len(*in)) @@ -814,6 +816,8 @@ func (in *PodVolumeRestoreSpec) DeepCopy() *PodVolumeRestoreSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodVolumeRestoreStatus) DeepCopyInto(out *PodVolumeRestoreStatus) { *out = *in + in.StartTimestamp.DeepCopyInto(&out.StartTimestamp) + in.CompletionTimestamp.DeepCopyInto(&out.CompletionTimestamp) return } @@ -859,7 +863,7 @@ func (in *ResticRepository) DeepCopyObject() runtime.Object { func (in *ResticRepositoryList) DeepCopyInto(out *ResticRepositoryList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ResticRepository, len(*in)) @@ -954,7 +958,7 @@ func (in *Restore) DeepCopyObject() runtime.Object { func (in *RestoreList) DeepCopyInto(out *RestoreList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Restore, len(*in)) @@ -1094,7 +1098,7 @@ func (in *Schedule) DeepCopyObject() runtime.Object { func (in *ScheduleList) DeepCopyInto(out *ScheduleList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Schedule, len(*in)) @@ -1194,7 +1198,7 @@ func (in *ServerStatusRequest) DeepCopyObject() runtime.Object { func (in *ServerStatusRequestList) DeepCopyInto(out *ServerStatusRequestList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ServerStatusRequest, len(*in)) @@ -1314,7 +1318,7 @@ func (in *VolumeSnapshotLocation) DeepCopyObject() runtime.Object { func (in *VolumeSnapshotLocationList) DeepCopyInto(out *VolumeSnapshotLocationList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]VolumeSnapshotLocation, len(*in)) diff --git a/pkg/backup/backup.go b/pkg/backup/backup.go index d244f764e13..9f548473cfa 100644 --- a/pkg/backup/backup.go +++ b/pkg/backup/backup.go @@ -60,12 +60,6 @@ type kubernetesBackupper struct { resticTimeout time.Duration } -type itemKey struct { - resource string - namespace string - name string -} - type resolvedAction struct { velero.BackupItemAction @@ -240,6 +234,8 @@ func (kb *kubernetesBackupper) Backup(log logrus.FieldLogger, backupRequest *Req return err } + backupRequest.BackedUpItems = map[itemKey]struct{}{} + podVolumeTimeout := kb.resticTimeout if val := backupRequest.Annotations[api.PodVolumeOperationTimeoutAnnotation]; val != "" { parsed, err := time.ParseDuration(val) @@ -266,7 +262,6 @@ func (kb *kubernetesBackupper) Backup(log logrus.FieldLogger, backupRequest *Req backupRequest, kb.dynamicFactory, kb.discoveryHelper, - make(map[itemKey]struct{}), cohabitatingResources(), kb.podCommandExecutor, tw, diff --git a/pkg/backup/backup_new_test.go b/pkg/backup/backup_new_test.go deleted file mode 100644 index 639622e1521..00000000000 --- a/pkg/backup/backup_new_test.go +++ /dev/null @@ -1,2005 +0,0 @@ -/* -Copyright 2019 the Velero contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package backup - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "encoding/json" - "io" - "io/ioutil" - "sort" - "strings" - "testing" - "time" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - discoveryfake "k8s.io/client-go/discovery/fake" - dynamicfake "k8s.io/client-go/dynamic/fake" - kubefake "k8s.io/client-go/kubernetes/fake" - - velerov1 "github.com/heptio/velero/pkg/apis/velero/v1" - "github.com/heptio/velero/pkg/client" - "github.com/heptio/velero/pkg/discovery" - "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" - "github.com/heptio/velero/pkg/kuberesource" - "github.com/heptio/velero/pkg/plugin/velero" - "github.com/heptio/velero/pkg/test" - kubeutil "github.com/heptio/velero/pkg/util/kube" - "github.com/heptio/velero/pkg/volume" -) - -// TestBackupResourceFiltering runs backups with different combinations -// of resource filters (included/excluded resources, included/excluded -// namespaces, label selectors, "include cluster resources" flag), and -// verifies that the set of items written to the backup tarball are -// correct. Validation is done by looking at the names of the files in -// the backup tarball; the contents of the files are not checked. -func TestBackupResourceFiltering(t *testing.T) { - tests := []struct { - name string - backup *velerov1.Backup - apiResources []*apiResource - want []string - }{ - { - name: "no filters backs up everything", - backup: defaultBackup().Backup(), - apiResources: []*apiResource{ - pods( - newPod("foo", "bar"), - newPod("zoo", "raz"), - ), - deployments( - newDeployment("foo", "bar"), - newDeployment("zoo", "raz"), - ), - }, - want: []string{ - "resources/pods/namespaces/foo/bar.json", - "resources/pods/namespaces/zoo/raz.json", - "resources/deployments.apps/namespaces/foo/bar.json", - "resources/deployments.apps/namespaces/zoo/raz.json", - }, - }, - { - name: "included resources filter only backs up resources of those types", - backup: defaultBackup(). - IncludedResources("pods"). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("foo", "bar"), - newPod("zoo", "raz"), - ), - deployments( - newDeployment("foo", "bar"), - newDeployment("zoo", "raz"), - ), - }, - want: []string{ - "resources/pods/namespaces/foo/bar.json", - "resources/pods/namespaces/zoo/raz.json", - }, - }, - { - name: "excluded resources filter only backs up resources not of those types", - backup: defaultBackup(). - ExcludedResources("deployments"). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("foo", "bar"), - newPod("zoo", "raz"), - ), - deployments( - newDeployment("foo", "bar"), - newDeployment("zoo", "raz"), - ), - }, - want: []string{ - "resources/pods/namespaces/foo/bar.json", - "resources/pods/namespaces/zoo/raz.json", - }, - }, - { - name: "included namespaces filter only backs up resources in those namespaces", - backup: defaultBackup(). - IncludedNamespaces("foo"). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("foo", "bar"), - newPod("zoo", "raz"), - ), - deployments( - newDeployment("foo", "bar"), - newDeployment("zoo", "raz"), - ), - }, - want: []string{ - "resources/pods/namespaces/foo/bar.json", - "resources/deployments.apps/namespaces/foo/bar.json", - }, - }, - { - name: "excluded namespaces filter only backs up resources not in those namespaces", - backup: defaultBackup(). - ExcludedNamespaces("zoo"). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("foo", "bar"), - newPod("zoo", "raz"), - ), - deployments( - newDeployment("foo", "bar"), - newDeployment("zoo", "raz"), - ), - }, - want: []string{ - "resources/pods/namespaces/foo/bar.json", - "resources/deployments.apps/namespaces/foo/bar.json", - }, - }, - { - name: "IncludeClusterResources=false only backs up namespaced resources", - backup: defaultBackup(). - IncludeClusterResources(false). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("foo", "bar"), - newPod("zoo", "raz"), - ), - deployments( - newDeployment("foo", "bar"), - newDeployment("zoo", "raz"), - ), - pvs( - newPV("bar"), - newPV("baz"), - ), - }, - want: []string{ - "resources/pods/namespaces/foo/bar.json", - "resources/pods/namespaces/zoo/raz.json", - "resources/deployments.apps/namespaces/foo/bar.json", - "resources/deployments.apps/namespaces/zoo/raz.json", - }, - }, - { - name: "label selector only backs up matching resources", - backup: defaultBackup(). - LabelSelector(&metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}). - Backup(), - apiResources: []*apiResource{ - pods( - withLabel(newPod("foo", "bar"), "a", "b"), - newPod("zoo", "raz"), - ), - deployments( - newDeployment("foo", "bar"), - withLabel(newDeployment("zoo", "raz"), "a", "b"), - ), - pvs( - withLabel(newPV("bar"), "a", "b"), - withLabel(newPV("baz"), "a", "c"), - ), - }, - want: []string{ - "resources/pods/namespaces/foo/bar.json", - "resources/deployments.apps/namespaces/zoo/raz.json", - "resources/persistentvolumes/cluster/bar.json", - }, - }, - { - name: "should include cluster-scoped resources if backing up subset of namespaces and IncludeClusterResources=true", - backup: defaultBackup(). - IncludedNamespaces("ns-1", "ns-2"). - IncludeClusterResources(true). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - newPod("ns-2", "pod-1"), - newPod("ns-3", "pod-1"), - ), - pvs( - newPV("pv-1"), - newPV("pv-2"), - ), - }, - want: []string{ - "resources/pods/namespaces/ns-1/pod-1.json", - "resources/pods/namespaces/ns-2/pod-1.json", - "resources/persistentvolumes/cluster/pv-1.json", - "resources/persistentvolumes/cluster/pv-2.json", - }, - }, - { - name: "should not include cluster-scoped resource if backing up subset of namespaces and IncludeClusterResources=false", - backup: defaultBackup(). - IncludedNamespaces("ns-1", "ns-2"). - IncludeClusterResources(false). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - newPod("ns-2", "pod-1"), - newPod("ns-3", "pod-1"), - ), - pvs( - newPV("pv-1"), - newPV("pv-2"), - ), - }, - want: []string{ - "resources/pods/namespaces/ns-1/pod-1.json", - "resources/pods/namespaces/ns-2/pod-1.json", - }, - }, - { - name: "should not include cluster-scoped resource if backing up subset of namespaces and IncludeClusterResources=nil", - backup: defaultBackup(). - IncludedNamespaces("ns-1", "ns-2"). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - newPod("ns-2", "pod-1"), - newPod("ns-3", "pod-1"), - ), - pvs( - newPV("pv-1"), - newPV("pv-2"), - ), - }, - want: []string{ - "resources/pods/namespaces/ns-1/pod-1.json", - "resources/pods/namespaces/ns-2/pod-1.json", - }, - }, - { - name: "should include cluster-scoped resources if backing up all namespaces and IncludeClusterResources=true", - backup: defaultBackup(). - IncludeClusterResources(true). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - newPod("ns-2", "pod-1"), - newPod("ns-3", "pod-1"), - ), - pvs( - newPV("pv-1"), - newPV("pv-2"), - ), - }, - want: []string{ - "resources/pods/namespaces/ns-1/pod-1.json", - "resources/pods/namespaces/ns-2/pod-1.json", - "resources/pods/namespaces/ns-3/pod-1.json", - "resources/persistentvolumes/cluster/pv-1.json", - "resources/persistentvolumes/cluster/pv-2.json", - }, - }, - { - name: "should not include cluster-scoped resources if backing up all namespaces and IncludeClusterResources=false", - backup: defaultBackup(). - IncludeClusterResources(false). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - newPod("ns-2", "pod-1"), - newPod("ns-3", "pod-1"), - ), - pvs( - newPV("pv-1"), - newPV("pv-2"), - ), - }, - want: []string{ - "resources/pods/namespaces/ns-1/pod-1.json", - "resources/pods/namespaces/ns-2/pod-1.json", - "resources/pods/namespaces/ns-3/pod-1.json", - }, - }, - { - name: "should include cluster-scoped resources if backing up all namespaces and IncludeClusterResources=nil", - backup: defaultBackup(). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - newPod("ns-2", "pod-1"), - newPod("ns-3", "pod-1"), - ), - pvs( - newPV("pv-1"), - newPV("pv-2"), - ), - }, - want: []string{ - "resources/pods/namespaces/ns-1/pod-1.json", - "resources/pods/namespaces/ns-2/pod-1.json", - "resources/pods/namespaces/ns-3/pod-1.json", - "resources/persistentvolumes/cluster/pv-1.json", - "resources/persistentvolumes/cluster/pv-2.json", - }, - }, - { - name: "when a wildcard and a specific resource are included, the wildcard takes precedence", - backup: defaultBackup(). - IncludedResources("*", "pods"). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("foo", "bar"), - newPod("zoo", "raz"), - ), - deployments( - newDeployment("foo", "bar"), - newDeployment("zoo", "raz"), - ), - }, - want: []string{ - "resources/pods/namespaces/foo/bar.json", - "resources/pods/namespaces/zoo/raz.json", - "resources/deployments.apps/namespaces/foo/bar.json", - "resources/deployments.apps/namespaces/zoo/raz.json", - }, - }, - { - name: "wildcard excludes are ignored", - backup: defaultBackup(). - ExcludedResources("*"). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("foo", "bar"), - newPod("zoo", "raz"), - ), - deployments( - newDeployment("foo", "bar"), - newDeployment("zoo", "raz"), - ), - }, - want: []string{ - "resources/pods/namespaces/foo/bar.json", - "resources/pods/namespaces/zoo/raz.json", - "resources/deployments.apps/namespaces/foo/bar.json", - "resources/deployments.apps/namespaces/zoo/raz.json", - }, - }, - { - name: "unresolvable included resources are ignored", - backup: defaultBackup(). - IncludedResources("pods", "unresolvable"). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("foo", "bar"), - newPod("zoo", "raz"), - ), - deployments( - newDeployment("foo", "bar"), - newDeployment("zoo", "raz"), - ), - }, - want: []string{ - "resources/pods/namespaces/foo/bar.json", - "resources/pods/namespaces/zoo/raz.json", - }, - }, - { - name: "unresolvable excluded resources are ignored", - backup: defaultBackup(). - ExcludedResources("deployments", "unresolvable"). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("foo", "bar"), - newPod("zoo", "raz"), - ), - deployments( - newDeployment("foo", "bar"), - newDeployment("zoo", "raz"), - ), - }, - want: []string{ - "resources/pods/namespaces/foo/bar.json", - "resources/pods/namespaces/zoo/raz.json", - }, - }, - { - name: "terminating resources are not backed up", - backup: defaultBackup().Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "ns-2", Name: "pod-2", DeletionTimestamp: &metav1.Time{Time: time.Now()}}}, - ), - }, - want: []string{ - "resources/pods/namespaces/ns-1/pod-1.json", - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - var ( - h = newHarness(t) - req = &Request{Backup: tc.backup} - backupFile = bytes.NewBuffer([]byte{}) - ) - - for _, resource := range tc.apiResources { - h.addItems(t, resource.group, resource.version, resource.name, resource.shortName, resource.namespaced, resource.items...) - } - - h.backupper.Backup(h.log, req, backupFile, nil, nil) - - assertTarballContents(t, backupFile, append(tc.want, "metadata/version")...) - }) - } -} - -// TestBackupResourceCohabitation runs backups for resources that "cohabitate", -// meaning they exist in multiple API groups (e.g. deployments.extensions and -// deployments.apps), and verifies that only one copy of each resource is backed -// up, with preference for the non-"extensions" API group. -func TestBackupResourceCohabitation(t *testing.T) { - tests := []struct { - name string - backup *velerov1.Backup - apiResources []*apiResource - want []string - }{ - { - name: "when deployments exist only in extensions, they're backed up", - backup: defaultBackup().Backup(), - apiResources: []*apiResource{ - extensionsDeployments( - newDeployment("foo", "bar"), - newDeployment("zoo", "raz"), - ), - }, - want: []string{ - "resources/deployments.extensions/namespaces/foo/bar.json", - "resources/deployments.extensions/namespaces/zoo/raz.json", - }, - }, - { - name: "when deployments exist in both apps and extensions, only apps/deployments are backed up", - backup: defaultBackup().Backup(), - apiResources: []*apiResource{ - extensionsDeployments( - newDeployment("foo", "bar"), - newDeployment("zoo", "raz"), - ), - deployments( - newDeployment("foo", "bar"), - newDeployment("zoo", "raz"), - ), - }, - want: []string{ - "resources/deployments.apps/namespaces/foo/bar.json", - "resources/deployments.apps/namespaces/zoo/raz.json", - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - var ( - h = newHarness(t) - req = &Request{Backup: tc.backup} - backupFile = bytes.NewBuffer([]byte{}) - ) - - for _, resource := range tc.apiResources { - h.addItems(t, resource.group, resource.version, resource.name, resource.shortName, resource.namespaced, resource.items...) - } - - h.backupper.Backup(h.log, req, backupFile, nil, nil) - - assertTarballContents(t, backupFile, append(tc.want, "metadata/version")...) - }) - } -} - -// TestBackupUsesNewCohabitatingResourcesForEachBackup ensures that when two backups are -// run that each include cohabitating resources, one copy of the relevant resources is -// backed up in each backup. Verification is done by looking at the contents of the backup -// tarball. This covers a specific issue that was fixed by https://github.com/heptio/velero/pull/485. -func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) { - h := newHarness(t) - - // run and verify backup 1 - backup1 := &Request{ - Backup: defaultBackup().Backup(), - } - backup1File := bytes.NewBuffer([]byte{}) - - h.addItems(t, "apps", "v1", "deployments", "deploys", true, newDeployment("ns-1", "deploy-1")) - h.addItems(t, "extensions", "v1", "deployments", "deploys", true, newDeployment("ns-1", "deploy-1")) - - h.backupper.Backup(h.log, backup1, backup1File, nil, nil) - - assertTarballContents(t, backup1File, "metadata/version", "resources/deployments.apps/namespaces/ns-1/deploy-1.json") - - // run and verify backup 2 - backup2 := &Request{ - Backup: defaultBackup().Backup(), - } - backup2File := bytes.NewBuffer([]byte{}) - - h.backupper.Backup(h.log, backup2, backup2File, nil, nil) - - assertTarballContents(t, backup2File, "metadata/version", "resources/deployments.apps/namespaces/ns-1/deploy-1.json") -} - -// TestBackupResourceOrdering runs backups of the core API group and ensures that items are backed -// up in the expected order (pods, PVCs, PVs, everything else). Verification is done by looking -// at the order of files written to the backup tarball. -func TestBackupResourceOrdering(t *testing.T) { - tests := []struct { - name string - backup *velerov1.Backup - apiResources []*apiResource - }{ - { - name: "core API group: pods come before pvcs, pvcs come before pvs, pvs come before anything else", - backup: defaultBackup(). - SnapshotVolumes(false). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("foo", "bar"), - newPod("zoo", "raz"), - ), - pvcs( - newPVC("foo", "bar"), - newPVC("zoo", "raz"), - ), - pvs( - newPV("bar"), - newPV("baz"), - ), - secrets( - newSecret("foo", "bar"), - newSecret("zoo", "raz"), - ), - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - var ( - h = newHarness(t) - req = &Request{Backup: tc.backup} - backupFile = bytes.NewBuffer([]byte{}) - ) - - for _, resource := range tc.apiResources { - h.addItems(t, resource.group, resource.version, resource.name, resource.shortName, resource.namespaced, resource.items...) - } - - h.backupper.Backup(h.log, req, backupFile, nil, nil) - - assertTarballOrdering(t, backupFile, "pods", "persistentvolumeclaims", "persistentvolumes") - }) - } -} - -// recordResourcesAction is a backup item action that can be configured -// to run for specific resources/namespaces and simply records the items -// that it is executed for. -type recordResourcesAction struct { - selector velero.ResourceSelector - ids []string - backups []velerov1.Backup - additionalItems []velero.ResourceIdentifier -} - -func (a *recordResourcesAction) Execute(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { - metadata, err := meta.Accessor(item) - if err != nil { - return item, a.additionalItems, err - } - a.ids = append(a.ids, kubeutil.NamespaceAndName(metadata)) - a.backups = append(a.backups, *backup) - - return item, a.additionalItems, nil -} - -func (a *recordResourcesAction) AppliesTo() (velero.ResourceSelector, error) { - return a.selector, nil -} - -func (a *recordResourcesAction) ForResource(resource string) *recordResourcesAction { - a.selector.IncludedResources = append(a.selector.IncludedResources, resource) - return a -} - -func (a *recordResourcesAction) ForNamespace(namespace string) *recordResourcesAction { - a.selector.IncludedNamespaces = append(a.selector.IncludedNamespaces, namespace) - return a -} - -func (a *recordResourcesAction) ForLabelSelector(selector string) *recordResourcesAction { - a.selector.LabelSelector = selector - return a -} - -func (a *recordResourcesAction) WithAdditionalItems(items []velero.ResourceIdentifier) *recordResourcesAction { - a.additionalItems = items - return a -} - -// TestBackupActionsRunsForCorrectItems runs backups with backup item actions, and -// verifies that each backup item action is run for the correct set of resources based on its -// AppliesTo() resource selector. Verification is done by using the recordResourcesAction struct, -// which records which resources it's executed for. -func TestBackupActionsRunForCorrectItems(t *testing.T) { - tests := []struct { - name string - backup *velerov1.Backup - apiResources []*apiResource - - // actions is a map from a recordResourcesAction (which will record the items it was called for) - // to a slice of expected items, formatted as {namespace}/{name}. - actions map[*recordResourcesAction][]string - }{ - { - name: "single action with no selector runs for all items", - backup: defaultBackup(). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - newPod("ns-2", "pod-2"), - ), - pvs( - newPV("pv-1"), - newPV("pv-2"), - ), - }, - actions: map[*recordResourcesAction][]string{ - new(recordResourcesAction): {"ns-1/pod-1", "ns-2/pod-2", "pv-1", "pv-2"}, - }, - }, - { - name: "single action with a resource selector for namespaced resources runs only for matching resources", - backup: defaultBackup(). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - newPod("ns-2", "pod-2"), - ), - pvs( - newPV("pv-1"), - newPV("pv-2"), - ), - }, - actions: map[*recordResourcesAction][]string{ - new(recordResourcesAction).ForResource("pods"): {"ns-1/pod-1", "ns-2/pod-2"}, - }, - }, - { - name: "single action with a resource selector for cluster-scoped resources runs only for matching resources", - backup: defaultBackup(). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - newPod("ns-2", "pod-2"), - ), - pvs( - newPV("pv-1"), - newPV("pv-2"), - ), - }, - actions: map[*recordResourcesAction][]string{ - new(recordResourcesAction).ForResource("persistentvolumes"): {"pv-1", "pv-2"}, - }, - }, - { - // TODO this seems like a bug - name: "single action with a namespace selector runs for resources in that namespace plus cluster-scoped resources", - backup: defaultBackup(). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - newPod("ns-2", "pod-2"), - ), - pvcs( - newPVC("ns-1", "pvc-1"), - newPVC("ns-2", "pvc-2"), - ), - pvs( - newPV("pv-1"), - newPV("pv-2"), - ), - }, - actions: map[*recordResourcesAction][]string{ - new(recordResourcesAction).ForNamespace("ns-1"): {"ns-1/pod-1", "ns-1/pvc-1", "pv-1", "pv-2"}, - }, - }, - { - name: "single action with a resource and namespace selector runs only for matching resources", - backup: defaultBackup(). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - newPod("ns-2", "pod-2"), - ), - pvs( - newPV("pv-1"), - newPV("pv-2"), - ), - }, - actions: map[*recordResourcesAction][]string{ - new(recordResourcesAction).ForResource("pods").ForNamespace("ns-1"): {"ns-1/pod-1"}, - }, - }, - { - name: "multiple actions, each with a different resource selector using short name, run for matching resources", - backup: defaultBackup(). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - newPod("ns-2", "pod-2"), - ), - pvs( - newPV("pv-1"), - newPV("pv-2"), - ), - }, - actions: map[*recordResourcesAction][]string{ - new(recordResourcesAction).ForResource("po"): {"ns-1/pod-1", "ns-2/pod-2"}, - new(recordResourcesAction).ForResource("pv"): {"pv-1", "pv-2"}, - }, - }, - { - name: "actions with selectors that don't match anything don't run for any resources", - backup: defaultBackup(). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - ), - pvcs( - newPVC("ns-2", "pvc-2"), - ), - pvs( - newPV("pv-1"), - newPV("pv-2"), - ), - }, - actions: map[*recordResourcesAction][]string{ - new(recordResourcesAction).ForNamespace("ns-1").ForResource("persistentvolumeclaims"): nil, - new(recordResourcesAction).ForNamespace("ns-2").ForResource("pods"): nil, - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - var ( - h = newHarness(t) - req = &Request{Backup: tc.backup} - backupFile = bytes.NewBuffer([]byte{}) - ) - - for _, resource := range tc.apiResources { - h.addItems(t, resource.group, resource.version, resource.name, resource.shortName, resource.namespaced, resource.items...) - } - - actions := []velero.BackupItemAction{} - for action := range tc.actions { - actions = append(actions, action) - } - - err := h.backupper.Backup(h.log, req, backupFile, actions, nil) - assert.NoError(t, err) - - for action, want := range tc.actions { - assert.Equal(t, want, action.ids) - } - }) - } -} - -// TestBackupWithInvalidActions runs backups with backup item actions that are invalid -// in some way (e.g. an invalid label selector returned from AppliesTo(), an error returned -// from AppliesTo()) and verifies that this causes the backupper.Backup(...) method to -// return an error. -func TestBackupWithInvalidActions(t *testing.T) { - // all test cases in this function are expected to cause the method under test - // to return an error, so no expected results need to be set up. - tests := []struct { - name string - backup *velerov1.Backup - apiResources []*apiResource - actions []velero.BackupItemAction - }{ - { - name: "action with invalid label selector results in an error", - backup: defaultBackup(). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("foo", "bar"), - newPod("zoo", "raz"), - ), - pvs( - newPV("bar"), - newPV("baz"), - ), - }, - actions: []velero.BackupItemAction{ - new(recordResourcesAction).ForLabelSelector("=invalid-selector"), - }, - }, - { - name: "action returning an error from AppliesTo results in an error", - backup: defaultBackup(). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("foo", "bar"), - newPod("zoo", "raz"), - ), - pvs( - newPV("bar"), - newPV("baz"), - ), - }, - actions: []velero.BackupItemAction{ - &appliesToErrorAction{}, - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - var ( - h = newHarness(t) - req = &Request{Backup: tc.backup} - backupFile = bytes.NewBuffer([]byte{}) - ) - - for _, resource := range tc.apiResources { - h.addItems(t, resource.group, resource.version, resource.name, resource.shortName, resource.namespaced, resource.items...) - } - - assert.Error(t, h.backupper.Backup(h.log, req, backupFile, tc.actions, nil)) - }) - } -} - -// appliesToErrorAction is a backup item action that always returns -// an error when AppliesTo() is called. -type appliesToErrorAction struct{} - -func (a *appliesToErrorAction) AppliesTo() (velero.ResourceSelector, error) { - return velero.ResourceSelector{}, errors.New("error calling AppliesTo") -} - -func (a *appliesToErrorAction) Execute(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { - panic("not implemented") -} - -// TestBackupActionModifications runs backups with backup item actions that make modifications -// to items in their Execute(...) methods and verifies that these modifications are -// persisted to the backup tarball. Verification is done by inspecting the file contents -// of the tarball. -func TestBackupActionModifications(t *testing.T) { - // modifyingActionGetter is a helper function that returns a *pluggableAction, whose Execute(...) - // method modifies the item being passed in by calling the 'modify' function on it. - modifyingActionGetter := func(modify func(*unstructured.Unstructured)) *pluggableAction { - return &pluggableAction{ - executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { - obj, ok := item.(*unstructured.Unstructured) - if !ok { - return nil, nil, errors.Errorf("unexpected type %T", item) - } - - res := obj.DeepCopy() - modify(res) - - return res, nil, nil - }, - } - } - - tests := []struct { - name string - backup *velerov1.Backup - apiResources []*apiResource - actions []velero.BackupItemAction - want map[string]unstructuredObject - }{ - { - name: "action that adds a label to item gets persisted", - backup: defaultBackup().Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - ), - }, - actions: []velero.BackupItemAction{ - modifyingActionGetter(func(item *unstructured.Unstructured) { - item.SetLabels(map[string]string{"updated": "true"}) - }), - }, - want: map[string]unstructuredObject{ - "resources/pods/namespaces/ns-1/pod-1.json": toUnstructuredOrFail(t, withLabel(newPod("ns-1", "pod-1"), "updated", "true")), - }, - }, - { - name: "action that removes labels from item gets persisted", - backup: defaultBackup().Backup(), - apiResources: []*apiResource{ - pods( - withLabel(newPod("ns-1", "pod-1"), "should-be-removed", "true"), - ), - }, - actions: []velero.BackupItemAction{ - modifyingActionGetter(func(item *unstructured.Unstructured) { - item.SetLabels(nil) - }), - }, - want: map[string]unstructuredObject{ - "resources/pods/namespaces/ns-1/pod-1.json": toUnstructuredOrFail(t, newPod("ns-1", "pod-1")), - }, - }, - { - name: "action that sets a spec field on item gets persisted", - backup: defaultBackup().Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - ), - }, - actions: []velero.BackupItemAction{ - modifyingActionGetter(func(item *unstructured.Unstructured) { - item.Object["spec"].(map[string]interface{})["nodeName"] = "foo" - }), - }, - want: map[string]unstructuredObject{ - "resources/pods/namespaces/ns-1/pod-1.json": toUnstructuredOrFail(t, &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "ns-1", Name: "pod-1"}, Spec: corev1.PodSpec{NodeName: "foo"}}), - }, - }, - { - // TODO this seems like a bug - name: "modifications to name and namespace in an action are persisted in JSON but not in filename", - backup: defaultBackup(). - Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - ), - }, - actions: []velero.BackupItemAction{ - modifyingActionGetter(func(item *unstructured.Unstructured) { - item.SetName(item.GetName() + "-updated") - item.SetNamespace(item.GetNamespace() + "-updated") - }), - }, - want: map[string]unstructuredObject{ - "resources/pods/namespaces/ns-1/pod-1.json": toUnstructuredOrFail(t, newPod("ns-1-updated", "pod-1-updated")), - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - var ( - h = newHarness(t) - req = &Request{Backup: tc.backup} - backupFile = bytes.NewBuffer([]byte{}) - ) - - for _, resource := range tc.apiResources { - h.addItems(t, resource.group, resource.version, resource.name, resource.shortName, resource.namespaced, resource.items...) - } - - err := h.backupper.Backup(h.log, req, backupFile, tc.actions, nil) - assert.NoError(t, err) - - assertTarballFileContents(t, backupFile, tc.want) - }) - } - -} - -// TestBackupActionAdditionalItems runs backups with backup item actions that return -// additional items to be backed up, and verifies that those items are included in the -// backup tarball as appropriate. Verification is done by looking at the files that exist -// in the backup tarball. -func TestBackupActionAdditionalItems(t *testing.T) { - tests := []struct { - name string - backup *velerov1.Backup - apiResources []*apiResource - actions []velero.BackupItemAction - want []string - }{ - { - name: "additional items that are already being backed up are not backed up twice", - backup: defaultBackup().Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - newPod("ns-2", "pod-2"), - newPod("ns-3", "pod-3"), - ), - }, - actions: []velero.BackupItemAction{ - &pluggableAction{ - selector: velero.ResourceSelector{IncludedNamespaces: []string{"ns-1"}}, - executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { - additionalItems := []velero.ResourceIdentifier{ - {GroupResource: kuberesource.Pods, Namespace: "ns-2", Name: "pod-2"}, - {GroupResource: kuberesource.Pods, Namespace: "ns-3", Name: "pod-3"}, - } - - return item, additionalItems, nil - }, - }, - }, - want: []string{ - "resources/pods/namespaces/ns-1/pod-1.json", - "resources/pods/namespaces/ns-2/pod-2.json", - "resources/pods/namespaces/ns-3/pod-3.json", - }, - }, - { - name: "when using a backup namespace filter, additional items that are in a non-included namespace are not backed up", - backup: defaultBackup().IncludedNamespaces("ns-1").Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - newPod("ns-2", "pod-2"), - newPod("ns-3", "pod-3"), - ), - }, - actions: []velero.BackupItemAction{ - &pluggableAction{ - executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { - additionalItems := []velero.ResourceIdentifier{ - {GroupResource: kuberesource.Pods, Namespace: "ns-2", Name: "pod-2"}, - {GroupResource: kuberesource.Pods, Namespace: "ns-3", Name: "pod-3"}, - } - - return item, additionalItems, nil - }, - }, - }, - want: []string{ - "resources/pods/namespaces/ns-1/pod-1.json", - }, - }, - { - name: "when using a backup namespace filter, additional items that are cluster-scoped are backed up", - backup: defaultBackup().IncludedNamespaces("ns-1").Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - newPod("ns-2", "pod-2"), - ), - pvs( - newPV("pv-1"), - newPV("pv-2"), - ), - }, - actions: []velero.BackupItemAction{ - &pluggableAction{ - executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { - additionalItems := []velero.ResourceIdentifier{ - {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, - {GroupResource: kuberesource.PersistentVolumes, Name: "pv-2"}, - } - - return item, additionalItems, nil - }, - }, - }, - want: []string{ - "resources/pods/namespaces/ns-1/pod-1.json", - "resources/persistentvolumes/cluster/pv-1.json", - "resources/persistentvolumes/cluster/pv-2.json", - }, - }, - { - name: "when using a backup resource filter, additional items that are non-included resources are not backed up", - backup: defaultBackup().IncludedResources("pods").Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - ), - pvs( - newPV("pv-1"), - newPV("pv-2"), - ), - }, - actions: []velero.BackupItemAction{ - &pluggableAction{ - executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { - additionalItems := []velero.ResourceIdentifier{ - {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, - {GroupResource: kuberesource.PersistentVolumes, Name: "pv-2"}, - } - - return item, additionalItems, nil - }, - }, - }, - want: []string{ - "resources/pods/namespaces/ns-1/pod-1.json", - }, - }, - { - name: "when IncludeClusterResources=false, additional items that are cluster-scoped are not backed up", - backup: defaultBackup().IncludeClusterResources(false).Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - newPod("ns-2", "pod-2"), - ), - pvs( - newPV("pv-1"), - newPV("pv-2"), - ), - }, - actions: []velero.BackupItemAction{ - &pluggableAction{ - executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { - additionalItems := []velero.ResourceIdentifier{ - {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, - {GroupResource: kuberesource.PersistentVolumes, Name: "pv-2"}, - } - - return item, additionalItems, nil - }, - }, - }, - want: []string{ - "resources/pods/namespaces/ns-1/pod-1.json", - "resources/pods/namespaces/ns-2/pod-2.json", - }, - }, - { - name: "if there's an error backing up additional items, the item the action was run for isn't backed up", - backup: defaultBackup().Backup(), - apiResources: []*apiResource{ - pods( - newPod("ns-1", "pod-1"), - newPod("ns-2", "pod-2"), - newPod("ns-3", "pod-3"), - ), - }, - actions: []velero.BackupItemAction{ - &pluggableAction{ - selector: velero.ResourceSelector{IncludedNamespaces: []string{"ns-1"}}, - executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { - additionalItems := []velero.ResourceIdentifier{ - {GroupResource: kuberesource.Pods, Namespace: "ns-4", Name: "pod-4"}, - {GroupResource: kuberesource.Pods, Namespace: "ns-5", Name: "pod-5"}, - } - - return item, additionalItems, nil - }, - }, - }, - want: []string{ - "resources/pods/namespaces/ns-2/pod-2.json", - "resources/pods/namespaces/ns-3/pod-3.json", - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - var ( - h = newHarness(t) - req = &Request{Backup: tc.backup} - backupFile = bytes.NewBuffer([]byte{}) - ) - - for _, resource := range tc.apiResources { - h.addItems(t, resource.group, resource.version, resource.name, resource.shortName, resource.namespaced, resource.items...) - } - - err := h.backupper.Backup(h.log, req, backupFile, tc.actions, nil) - assert.NoError(t, err) - - assertTarballContents(t, backupFile, append(tc.want, "metadata/version")...) - }) - } -} - -// volumeSnapshotterGetter is a simple implementation of the VolumeSnapshotterGetter -// interface that returns velero.VolumeSnapshotters from a map if they exist. -type volumeSnapshotterGetter map[string]velero.VolumeSnapshotter - -func (vsg volumeSnapshotterGetter) GetVolumeSnapshotter(name string) (velero.VolumeSnapshotter, error) { - snapshotter, ok := vsg[name] - if !ok { - return nil, errors.New("volume snapshotter not found") - } - - return snapshotter, nil -} - -func int64Ptr(val int) *int64 { - i := int64(val) - return &i -} - -type volumeIdentifier struct { - volumeID string - volumeAZ string -} - -type volumeInfo struct { - volumeType string - iops *int64 - snapshotErr bool -} - -// fakeVolumeSnapshotter is a test fake for the velero.VolumeSnapshotter interface. -type fakeVolumeSnapshotter struct { - // PVVolumeNames is a map from PV name to volume ID, used as the basis - // for the GetVolumeID method. - PVVolumeNames map[string]string - - // Volumes is a map from volume identifier (volume ID + AZ) to a struct - // of volume info, used for the GetVolumeInfo and CreateSnapshot methods. - Volumes map[volumeIdentifier]*volumeInfo -} - -// WithVolume is a test helper for registering persistent volumes that the -// fakeVolumeSnapshotter should handle. -func (vs *fakeVolumeSnapshotter) WithVolume(pvName, id, az, volumeType string, iops int, snapshotErr bool) *fakeVolumeSnapshotter { - if vs.PVVolumeNames == nil { - vs.PVVolumeNames = make(map[string]string) - } - vs.PVVolumeNames[pvName] = id - - if vs.Volumes == nil { - vs.Volumes = make(map[volumeIdentifier]*volumeInfo) - } - - identifier := volumeIdentifier{ - volumeID: id, - volumeAZ: az, - } - - vs.Volumes[identifier] = &volumeInfo{ - volumeType: volumeType, - iops: int64Ptr(iops), - snapshotErr: snapshotErr, - } - - return vs -} - -// Init is a no-op. -func (*fakeVolumeSnapshotter) Init(config map[string]string) error { - return nil -} - -// GetVolumeID looks up the PV name in the PVVolumeNames map and returns the result -// if found, or an error otherwise. -func (vs *fakeVolumeSnapshotter) GetVolumeID(pv runtime.Unstructured) (string, error) { - obj := pv.(*unstructured.Unstructured) - - volumeID, ok := vs.PVVolumeNames[obj.GetName()] - if !ok { - return "", errors.New("unsupported volume type") - } - - return volumeID, nil -} - -// CreateSnapshot looks up the volume in the Volume map. If it's not found, an error is -// returned; if snapshotErr is true on the result, an error is returned; otherwise, -// a snapshotID of "-snapshot" is returned. -func (vs *fakeVolumeSnapshotter) CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (snapshotID string, err error) { - vi, ok := vs.Volumes[volumeIdentifier{volumeID: volumeID, volumeAZ: volumeAZ}] - if !ok { - return "", errors.New("volume not found") - } - - if vi.snapshotErr { - return "", errors.New("error calling CreateSnapshot") - } - - return volumeID + "-snapshot", nil -} - -// GetVolumeInfo returns volume info if it exists in the Volumes map -// for the specified volume ID and AZ, or an error otherwise. -func (vs *fakeVolumeSnapshotter) GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error) { - vi, ok := vs.Volumes[volumeIdentifier{volumeID: volumeID, volumeAZ: volumeAZ}] - if !ok { - return "", nil, errors.New("volume not found") - } - - return vi.volumeType, vi.iops, nil -} - -// CreateVolumeFromSnapshot panics because it's not expected to be used for backups. -func (*fakeVolumeSnapshotter) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (volumeID string, err error) { - panic("CreateVolumeFromSnapshot should not be used for backups") -} - -// SetVolumeID panics because it's not expected to be used for backups. -func (*fakeVolumeSnapshotter) SetVolumeID(pv runtime.Unstructured, volumeID string) (runtime.Unstructured, error) { - panic("SetVolumeID should not be used for backups") -} - -// DeleteSnapshot panics because it's not expected to be used for backups. -func (*fakeVolumeSnapshotter) DeleteSnapshot(snapshotID string) error { - panic("DeleteSnapshot should not be used for backups") -} - -// TestBackupWithSnapshots runs backups with volume snapshot locations and volume snapshotters -// configured and verifies that snapshots are created as appropriate. Verification is done by -// looking at the backup request's VolumeSnapshots field. This test uses the fakeVolumeSnapshotter -// struct in place of real volume snapshotters. -func TestBackupWithSnapshots(t *testing.T) { - tests := []struct { - name string - req *Request - vsls []*velerov1.VolumeSnapshotLocation - apiResources []*apiResource - snapshotterGetter volumeSnapshotterGetter - want []*volume.Snapshot - }{ - { - name: "persistent volume with no zone annotation creates a snapshot", - req: &Request{ - Backup: defaultBackup().Backup(), - SnapshotLocations: []*velerov1.VolumeSnapshotLocation{ - newSnapshotLocation("velero", "default", "default"), - }, - }, - apiResources: []*apiResource{ - pvs( - newPV("pv-1"), - ), - }, - snapshotterGetter: map[string]velero.VolumeSnapshotter{ - "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "", "type-1", 100, false), - }, - want: []*volume.Snapshot{ - { - Spec: volume.SnapshotSpec{ - BackupName: "backup-1", - Location: "default", - PersistentVolumeName: "pv-1", - ProviderVolumeID: "vol-1", - VolumeType: "type-1", - VolumeIOPS: int64Ptr(100), - }, - Status: volume.SnapshotStatus{ - Phase: volume.SnapshotPhaseCompleted, - ProviderSnapshotID: "vol-1-snapshot", - }, - }, - }, - }, - { - name: "persistent volume with zone annotation creates a snapshot", - req: &Request{ - Backup: defaultBackup().Backup(), - SnapshotLocations: []*velerov1.VolumeSnapshotLocation{ - newSnapshotLocation("velero", "default", "default"), - }, - }, - apiResources: []*apiResource{ - pvs( - withLabel(newPV("pv-1"), "failure-domain.beta.kubernetes.io/zone", "zone-1"), - ), - }, - snapshotterGetter: map[string]velero.VolumeSnapshotter{ - "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "zone-1", "type-1", 100, false), - }, - want: []*volume.Snapshot{ - { - Spec: volume.SnapshotSpec{ - BackupName: "backup-1", - Location: "default", - PersistentVolumeName: "pv-1", - ProviderVolumeID: "vol-1", - VolumeAZ: "zone-1", - VolumeType: "type-1", - VolumeIOPS: int64Ptr(100), - }, - Status: volume.SnapshotStatus{ - Phase: volume.SnapshotPhaseCompleted, - ProviderSnapshotID: "vol-1-snapshot", - }, - }, - }, - }, - { - name: "error returned from CreateSnapshot results in a failed snapshot", - req: &Request{ - Backup: defaultBackup().Backup(), - SnapshotLocations: []*velerov1.VolumeSnapshotLocation{ - newSnapshotLocation("velero", "default", "default"), - }, - }, - apiResources: []*apiResource{ - pvs( - newPV("pv-1"), - ), - }, - snapshotterGetter: map[string]velero.VolumeSnapshotter{ - "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "", "type-1", 100, true), - }, - want: []*volume.Snapshot{ - { - Spec: volume.SnapshotSpec{ - BackupName: "backup-1", - Location: "default", - PersistentVolumeName: "pv-1", - ProviderVolumeID: "vol-1", - VolumeType: "type-1", - VolumeIOPS: int64Ptr(100), - }, - Status: volume.SnapshotStatus{ - Phase: volume.SnapshotPhaseFailed, - }, - }, - }, - }, - { - name: "backup with SnapshotVolumes=false does not create any snapshots", - req: &Request{ - Backup: defaultBackup().SnapshotVolumes(false).Backup(), - SnapshotLocations: []*velerov1.VolumeSnapshotLocation{ - newSnapshotLocation("velero", "default", "default"), - }, - }, - apiResources: []*apiResource{ - pvs( - newPV("pv-1"), - ), - }, - snapshotterGetter: map[string]velero.VolumeSnapshotter{ - "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "", "type-1", 100, false), - }, - want: nil, - }, - { - name: "backup with no volume snapshot locations does not create any snapshots", - req: &Request{ - Backup: defaultBackup().Backup(), - }, - apiResources: []*apiResource{ - pvs( - newPV("pv-1"), - ), - }, - snapshotterGetter: map[string]velero.VolumeSnapshotter{ - "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "", "type-1", 100, false), - }, - want: nil, - }, - { - name: "backup with no volume snapshotters does not create any snapshots", - req: &Request{ - Backup: defaultBackup().Backup(), - SnapshotLocations: []*velerov1.VolumeSnapshotLocation{ - newSnapshotLocation("velero", "default", "default"), - }, - }, - apiResources: []*apiResource{ - pvs( - newPV("pv-1"), - ), - }, - snapshotterGetter: map[string]velero.VolumeSnapshotter{}, - want: nil, - }, - { - name: "unsupported persistent volume type does not create any snapshots", - req: &Request{ - Backup: defaultBackup().Backup(), - SnapshotLocations: []*velerov1.VolumeSnapshotLocation{ - newSnapshotLocation("velero", "default", "default"), - }, - }, - apiResources: []*apiResource{ - pvs( - newPV("pv-1"), - ), - }, - snapshotterGetter: map[string]velero.VolumeSnapshotter{ - "default": new(fakeVolumeSnapshotter), - }, - want: nil, - }, - { - name: "when there are multiple volumes, snapshot locations, and snapshotters, volumes are matched to the right snapshotters", - req: &Request{ - Backup: defaultBackup().Backup(), - SnapshotLocations: []*velerov1.VolumeSnapshotLocation{ - newSnapshotLocation("velero", "default", "default"), - newSnapshotLocation("velero", "another", "another"), - }, - }, - apiResources: []*apiResource{ - pvs( - newPV("pv-1"), - newPV("pv-2"), - ), - }, - snapshotterGetter: map[string]velero.VolumeSnapshotter{ - "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "", "type-1", 100, false), - "another": new(fakeVolumeSnapshotter).WithVolume("pv-2", "vol-2", "", "type-2", 100, false), - }, - want: []*volume.Snapshot{ - { - Spec: volume.SnapshotSpec{ - BackupName: "backup-1", - Location: "default", - PersistentVolumeName: "pv-1", - ProviderVolumeID: "vol-1", - VolumeType: "type-1", - VolumeIOPS: int64Ptr(100), - }, - Status: volume.SnapshotStatus{ - Phase: volume.SnapshotPhaseCompleted, - ProviderSnapshotID: "vol-1-snapshot", - }, - }, - { - Spec: volume.SnapshotSpec{ - BackupName: "backup-1", - Location: "another", - PersistentVolumeName: "pv-2", - ProviderVolumeID: "vol-2", - VolumeType: "type-2", - VolumeIOPS: int64Ptr(100), - }, - Status: volume.SnapshotStatus{ - Phase: volume.SnapshotPhaseCompleted, - ProviderSnapshotID: "vol-2-snapshot", - }, - }, - }, - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - var ( - h = newHarness(t) - backupFile = bytes.NewBuffer([]byte{}) - ) - - for _, resource := range tc.apiResources { - h.addItems(t, resource.group, resource.version, resource.name, resource.shortName, resource.namespaced, resource.items...) - } - - err := h.backupper.Backup(h.log, tc.req, backupFile, nil, tc.snapshotterGetter) - assert.NoError(t, err) - - assert.Equal(t, tc.want, tc.req.VolumeSnapshots) - }) - } -} - -// pluggableAction is a backup item action that can be plugged with an Execute -// function body at runtime. -type pluggableAction struct { - selector velero.ResourceSelector - executeFunc func(runtime.Unstructured, *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) -} - -func (a *pluggableAction) Execute(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { - if a.executeFunc == nil { - return item, nil, nil - } - - return a.executeFunc(item, backup) -} - -func (a *pluggableAction) AppliesTo() (velero.ResourceSelector, error) { - return a.selector, nil -} - -type apiResource struct { - group string - version string - name string - shortName string - namespaced bool - items []metav1.Object -} - -func pods(items ...metav1.Object) *apiResource { - return &apiResource{ - group: "", - version: "v1", - name: "pods", - shortName: "po", - namespaced: true, - items: items, - } -} - -func pvcs(items ...metav1.Object) *apiResource { - return &apiResource{ - group: "", - version: "v1", - name: "persistentvolumeclaims", - shortName: "pvc", - namespaced: true, - items: items, - } -} - -func secrets(items ...metav1.Object) *apiResource { - return &apiResource{ - group: "", - version: "v1", - name: "secrets", - shortName: "secrets", - namespaced: true, - items: items, - } -} - -func deployments(items ...metav1.Object) *apiResource { - return &apiResource{ - group: "apps", - version: "v1", - name: "deployments", - shortName: "deploy", - namespaced: true, - items: items, - } -} - -func extensionsDeployments(items ...metav1.Object) *apiResource { - return &apiResource{ - group: "extensions", - version: "v1", - name: "deployments", - shortName: "deploy", - namespaced: true, - items: items, - } -} - -func pvs(items ...metav1.Object) *apiResource { - return &apiResource{ - group: "", - version: "v1", - name: "persistentvolumes", - shortName: "pv", - namespaced: false, - items: items, - } -} - -type harness struct { - veleroClient *fake.Clientset - kubeClient *kubefake.Clientset - dynamicClient *dynamicfake.FakeDynamicClient - discoveryClient *test.DiscoveryClient - backupper *kubernetesBackupper - log logrus.FieldLogger -} - -func (h *harness) addItems(t *testing.T, group, version, resource, shortName string, namespaced bool, items ...metav1.Object) { - t.Helper() - - h.discoveryClient.WithResource(group, version, resource, namespaced, shortName) - require.NoError(t, h.backupper.discoveryHelper.Refresh()) - - gvr := schema.GroupVersionResource{ - Group: group, - Version: version, - Resource: resource, - } - - for _, item := range items { - obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(item) - require.NoError(t, err) - - unstructuredObj := &unstructured.Unstructured{Object: obj} - - if namespaced { - _, err = h.dynamicClient.Resource(gvr).Namespace(item.GetNamespace()).Create(unstructuredObj, metav1.CreateOptions{}) - } else { - _, err = h.dynamicClient.Resource(gvr).Create(unstructuredObj, metav1.CreateOptions{}) - } - require.NoError(t, err) - } -} - -func newHarness(t *testing.T) *harness { - t.Helper() - - // API server fakes - var ( - veleroClient = fake.NewSimpleClientset() - kubeClient = kubefake.NewSimpleClientset() - dynamicClient = dynamicfake.NewSimpleDynamicClient(runtime.NewScheme()) - discoveryClient = &test.DiscoveryClient{FakeDiscovery: kubeClient.Discovery().(*discoveryfake.FakeDiscovery)} - ) - - log := logrus.StandardLogger() - - discoveryHelper, err := discovery.NewHelper(discoveryClient, log) - require.NoError(t, err) - - return &harness{ - veleroClient: veleroClient, - kubeClient: kubeClient, - dynamicClient: dynamicClient, - discoveryClient: discoveryClient, - backupper: &kubernetesBackupper{ - dynamicFactory: client.NewDynamicFactory(dynamicClient), - discoveryHelper: discoveryHelper, - groupBackupperFactory: new(defaultGroupBackupperFactory), - - // unsupported - podCommandExecutor: nil, - resticBackupperFactory: nil, - resticTimeout: 0, - }, - log: log, - } -} - -func withLabel(obj metav1.Object, key, val string) metav1.Object { - labels := obj.GetLabels() - if labels == nil { - labels = make(map[string]string) - } - labels[key] = val - obj.SetLabels(labels) - - return obj -} - -func newPod(ns, name string) *corev1.Pod { - return &corev1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - } -} - -func newPVC(ns, name string) *corev1.PersistentVolumeClaim { - return &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - } -} - -func newSecret(ns, name string) *corev1.Secret { - return &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - } -} - -func newDeployment(ns, name string) *appsv1.Deployment { - return &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - } -} - -func newPV(name string) *corev1.PersistentVolume { - return &corev1.PersistentVolume{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - } -} - -func newSnapshotLocation(ns, name, provider string) *velerov1.VolumeSnapshotLocation { - return &velerov1.VolumeSnapshotLocation{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - Spec: velerov1.VolumeSnapshotLocationSpec{ - Provider: provider, - }, - } -} - -func defaultBackup() *Builder { - return NewNamedBuilder(velerov1.DefaultNamespace, "backup-1") -} - -func toUnstructuredOrFail(t *testing.T, obj interface{}) map[string]interface{} { - t.Helper() - - res, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) - require.NoError(t, err) - - return res -} - -// assertTarballContents verifies that the gzipped tarball stored in the provided -// backupFile contains exactly the file names specified. -func assertTarballContents(t *testing.T, backupFile io.Reader, items ...string) { - t.Helper() - - gzr, err := gzip.NewReader(backupFile) - require.NoError(t, err) - - r := tar.NewReader(gzr) - - var files []string - for { - hdr, err := r.Next() - if err == io.EOF { - break - } - require.NoError(t, err) - - files = append(files, hdr.Name) - } - - sort.Strings(files) - sort.Strings(items) - assert.Equal(t, items, files) -} - -// unstructuredObject is a type alias to improve readability. -type unstructuredObject map[string]interface{} - -// assertTarballFileContents verifies that the gzipped tarball stored in the provided -// backupFile contains the files specified as keys in 'want', and for each of those -// files verifies that the content of the file is JSON and is equivalent to the JSON -// content stored as values in 'want'. -func assertTarballFileContents(t *testing.T, backupFile io.Reader, want map[string]unstructuredObject) { - t.Helper() - - gzr, err := gzip.NewReader(backupFile) - require.NoError(t, err) - - r := tar.NewReader(gzr) - items := make(map[string][]byte) - - for { - hdr, err := r.Next() - if err == io.EOF { - break - } - require.NoError(t, err) - - bytes, err := ioutil.ReadAll(r) - require.NoError(t, err) - - items[hdr.Name] = bytes - } - - for name, wantItem := range want { - gotData, ok := items[name] - assert.True(t, ok, "did not find item %s in tarball", name) - if !ok { - continue - } - - // json-unmarshal the data from the tarball - var got unstructuredObject - err := json.Unmarshal(gotData, &got) - assert.NoError(t, err) - if err != nil { - continue - } - - assert.Equal(t, wantItem, got) - } -} - -// assertTarballOrdering ensures that resources were written to the tarball in the expected -// order. Any resources *not* in orderedResources are required to come *after* all resources -// in orderedResources, in any order. -func assertTarballOrdering(t *testing.T, backupFile io.Reader, orderedResources ...string) { - t.Helper() - - gzr, err := gzip.NewReader(backupFile) - require.NoError(t, err) - - r := tar.NewReader(gzr) - - // lastSeen tracks the index in 'orderedResources' of the last resource type - // we saw in the tarball. Once we've seen a resource in 'orderedResources', - // we should never see another instance of a prior resource. - lastSeen := 0 - - for { - hdr, err := r.Next() - if err == io.EOF { - break - } - require.NoError(t, err) - - // ignore files like metadata/version - if !strings.HasPrefix(hdr.Name, "resources/") { - continue - } - - // get the resource name - parts := strings.Split(hdr.Name, "/") - require.True(t, len(parts) >= 2) - resourceName := parts[1] - - // Find the index in 'orderedResources' of the resource type for - // the current tar item, if it exists. This index ('current') *must* - // be greater than or equal to 'lastSeen', which was the last resource - // we saw, since otherwise the current resource would be out of order. By - // initializing current to len(ordered), we're saying that if the resource - // is not explicitly in orederedResources, then it must come *after* - // all orderedResources. - current := len(orderedResources) - for i, item := range orderedResources { - if item == resourceName { - current = i - break - } - } - - // the index of the current resource must be the same as or greater than the index of - // the last resource we saw for the backed-up order to be correct. - assert.True(t, current >= lastSeen, "%s was backed up out of order", resourceName) - lastSeen = current - } -} diff --git a/pkg/backup/backup_pv_action_test.go b/pkg/backup/backup_pv_action_test.go index ef492e97ba0..f81873d2b70 100644 --- a/pkg/backup/backup_pv_action_test.go +++ b/pkg/backup/backup_pv_action_test.go @@ -27,7 +27,7 @@ import ( v1 "github.com/heptio/velero/pkg/apis/velero/v1" "github.com/heptio/velero/pkg/kuberesource" "github.com/heptio/velero/pkg/plugin/velero" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) func TestBackupPVAction(t *testing.T) { diff --git a/pkg/backup/backup_test.go b/pkg/backup/backup_test.go index f5964d53148..35b89bd3672 100644 --- a/pkg/backup/backup_test.go +++ b/pkg/backup/backup_test.go @@ -1,5 +1,5 @@ /* -Copyright 2017, 2019 the Velero contributors. +Copyright 2019 the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,447 +17,2386 @@ limitations under the License. package backup import ( + "archive/tar" "bytes" + "compress/gzip" + "context" + "encoding/json" + "io" + "io/ioutil" + "sort" + "strings" "testing" + "time" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - v1 "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/builder" "github.com/heptio/velero/pkg/client" "github.com/heptio/velero/pkg/discovery" + "github.com/heptio/velero/pkg/kuberesource" "github.com/heptio/velero/pkg/plugin/velero" - "github.com/heptio/velero/pkg/podexec" "github.com/heptio/velero/pkg/restic" - "github.com/heptio/velero/pkg/util/collections" - "github.com/heptio/velero/pkg/util/logging" - velerotest "github.com/heptio/velero/pkg/util/test" + "github.com/heptio/velero/pkg/test" + testutil "github.com/heptio/velero/pkg/test" + kubeutil "github.com/heptio/velero/pkg/util/kube" + "github.com/heptio/velero/pkg/volume" ) -var ( - v1Group = &metav1.APIResourceList{ - GroupVersion: "v1", - APIResources: []metav1.APIResource{configMapsResource, podsResource, namespacesResource}, +func TestBackedUpItemsMatchesTarballContents(t *testing.T) { + // TODO: figure out if this can be replaced with the restmapper + // (https://github.com/kubernetes/apimachinery/blob/035e418f1ad9b6da47c4e01906a0cfe32f4ee2e7/pkg/api/meta/restmapper.go) + gvkToResource := map[string]string{ + "v1/Pod": "pods", + "apps/v1/Deployment": "deployments.apps", + "v1/PersistentVolume": "persistentvolumes", } - configMapsResource = metav1.APIResource{ - Name: "configmaps", - SingularName: "configmap", - Namespaced: true, - Kind: "ConfigMap", - Verbs: metav1.Verbs([]string{"create", "update", "get", "list", "watch", "delete"}), - ShortNames: []string{"cm"}, - Categories: []string{"all"}, + h := newHarness(t) + req := &Request{Backup: defaultBackup().Result()} + backupFile := bytes.NewBuffer([]byte{}) + + apiResources := []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.Deployments( + builder.ForDeployment("foo", "bar").Result(), + builder.ForDeployment("zoo", "raz").Result(), + ), + test.PVs( + builder.ForPersistentVolume("bar").Result(), + builder.ForPersistentVolume("baz").Result(), + ), + } + for _, resource := range apiResources { + h.addItems(t, resource) + } + + h.backupper.Backup(h.log, req, backupFile, nil, nil) + + // go through BackedUpItems after the backup to assemble the list of files we + // expect to see in the tarball and compare to see if they match + var expectedFiles []string + for item := range req.BackedUpItems { + file := "resources/" + gvkToResource[item.resource] + if item.namespace != "" { + file = file + "/namespaces/" + item.namespace + } else { + file = file + "/cluster" + } + file = file + "/" + item.name + ".json" + expectedFiles = append(expectedFiles, file) + } + + assertTarballContents(t, backupFile, append(expectedFiles, "metadata/version")...) +} + +// TestBackupResourceFiltering runs backups with different combinations +// of resource filters (included/excluded resources, included/excluded +// namespaces, label selectors, "include cluster resources" flag), and +// verifies that the set of items written to the backup tarball are +// correct. Validation is done by looking at the names of the files in +// the backup tarball; the contents of the files are not checked. +func TestBackupResourceFiltering(t *testing.T) { + tests := []struct { + name string + backup *velerov1.Backup + apiResources []*test.APIResource + want []string + }{ + { + name: "no filters backs up everything", + backup: defaultBackup().Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.Deployments( + builder.ForDeployment("foo", "bar").Result(), + builder.ForDeployment("zoo", "raz").Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/foo/bar.json", + "resources/pods/namespaces/zoo/raz.json", + "resources/deployments.apps/namespaces/foo/bar.json", + "resources/deployments.apps/namespaces/zoo/raz.json", + }, + }, + { + name: "included resources filter only backs up resources of those types", + backup: defaultBackup(). + IncludedResources("pods"). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.Deployments( + builder.ForDeployment("foo", "bar").Result(), + builder.ForDeployment("zoo", "raz").Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/foo/bar.json", + "resources/pods/namespaces/zoo/raz.json", + }, + }, + { + name: "excluded resources filter only backs up resources not of those types", + backup: defaultBackup(). + ExcludedResources("deployments"). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.Deployments( + builder.ForDeployment("foo", "bar").Result(), + builder.ForDeployment("zoo", "raz").Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/foo/bar.json", + "resources/pods/namespaces/zoo/raz.json", + }, + }, + { + name: "included namespaces filter only backs up resources in those namespaces", + backup: defaultBackup(). + IncludedNamespaces("foo"). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.Deployments( + builder.ForDeployment("foo", "bar").Result(), + builder.ForDeployment("zoo", "raz").Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/foo/bar.json", + "resources/deployments.apps/namespaces/foo/bar.json", + }, + }, + { + name: "excluded namespaces filter only backs up resources not in those namespaces", + backup: defaultBackup(). + ExcludedNamespaces("zoo"). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.Deployments( + builder.ForDeployment("foo", "bar").Result(), + builder.ForDeployment("zoo", "raz").Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/foo/bar.json", + "resources/deployments.apps/namespaces/foo/bar.json", + }, + }, + { + name: "IncludeClusterResources=false only backs up namespaced resources", + backup: defaultBackup(). + IncludeClusterResources(false). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.Deployments( + builder.ForDeployment("foo", "bar").Result(), + builder.ForDeployment("zoo", "raz").Result(), + ), + test.PVs( + builder.ForPersistentVolume("bar").Result(), + builder.ForPersistentVolume("baz").Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/foo/bar.json", + "resources/pods/namespaces/zoo/raz.json", + "resources/deployments.apps/namespaces/foo/bar.json", + "resources/deployments.apps/namespaces/zoo/raz.json", + }, + }, + { + name: "label selector only backs up matching resources", + backup: defaultBackup(). + LabelSelector(&metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").ObjectMeta(builder.WithLabels("a", "b")).Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.Deployments( + builder.ForDeployment("foo", "bar").Result(), + builder.ForDeployment("zoo", "raz").ObjectMeta(builder.WithLabels("a", "b")).Result(), + ), + test.PVs( + builder.ForPersistentVolume("bar").ObjectMeta(builder.WithLabels("a", "b")).Result(), + builder.ForPersistentVolume("baz").ObjectMeta(builder.WithLabels("a", "c")).Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/foo/bar.json", + "resources/deployments.apps/namespaces/zoo/raz.json", + "resources/persistentvolumes/cluster/bar.json", + }, + }, + { + name: "resources with velero.io/exclude-from-backup=true label are not included", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").ObjectMeta(builder.WithLabels("velero.io/exclude-from-backup", "true")).Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.Deployments( + builder.ForDeployment("foo", "bar").Result(), + builder.ForDeployment("zoo", "raz").ObjectMeta(builder.WithLabels("velero.io/exclude-from-backup", "true")).Result(), + ), + test.PVs( + builder.ForPersistentVolume("bar").ObjectMeta(builder.WithLabels("a", "b")).Result(), + builder.ForPersistentVolume("baz").ObjectMeta(builder.WithLabels("velero.io/exclude-from-backup", "true")).Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/zoo/raz.json", + "resources/deployments.apps/namespaces/foo/bar.json", + "resources/persistentvolumes/cluster/bar.json", + }, + }, + { + name: "resources with velero.io/exclude-from-backup=true label are not included even if matching label selector", + backup: defaultBackup(). + LabelSelector(&metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").ObjectMeta(builder.WithLabels("velero.io/exclude-from-backup", "true", "a", "b")).Result(), + builder.ForPod("zoo", "raz").ObjectMeta(builder.WithLabels("a", "b")).Result(), + ), + test.Deployments( + builder.ForDeployment("foo", "bar").Result(), + builder.ForDeployment("zoo", "raz").ObjectMeta(builder.WithLabels("velero.io/exclude-from-backup", "true", "a", "b")).Result(), + ), + test.PVs( + builder.ForPersistentVolume("bar").ObjectMeta(builder.WithLabels("a", "b")).Result(), + builder.ForPersistentVolume("baz").ObjectMeta(builder.WithLabels("a", "b", "velero.io/exclude-from-backup", "true")).Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/zoo/raz.json", + "resources/persistentvolumes/cluster/bar.json", + }, + }, + { + name: "resources with velero.io/exclude-from-backup label specified but not 'true' are included", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").ObjectMeta(builder.WithLabels("velero.io/exclude-from-backup", "false")).Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.Deployments( + builder.ForDeployment("foo", "bar").Result(), + builder.ForDeployment("zoo", "raz").ObjectMeta(builder.WithLabels("velero.io/exclude-from-backup", "1")).Result(), + ), + test.PVs( + builder.ForPersistentVolume("bar").ObjectMeta(builder.WithLabels("a", "b")).Result(), + builder.ForPersistentVolume("baz").ObjectMeta(builder.WithLabels("velero.io/exclude-from-backup", "")).Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/foo/bar.json", + "resources/pods/namespaces/zoo/raz.json", + "resources/deployments.apps/namespaces/foo/bar.json", + "resources/deployments.apps/namespaces/zoo/raz.json", + "resources/persistentvolumes/cluster/bar.json", + "resources/persistentvolumes/cluster/baz.json", + }, + }, + { + name: "should include cluster-scoped resources if backing up subset of namespaces and IncludeClusterResources=true", + backup: defaultBackup(). + IncludedNamespaces("ns-1", "ns-2"). + IncludeClusterResources(true). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-1").Result(), + builder.ForPod("ns-3", "pod-1").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/pods/namespaces/ns-2/pod-1.json", + "resources/persistentvolumes/cluster/pv-1.json", + "resources/persistentvolumes/cluster/pv-2.json", + }, + }, + { + name: "should not include cluster-scoped resource if backing up subset of namespaces and IncludeClusterResources=false", + backup: defaultBackup(). + IncludedNamespaces("ns-1", "ns-2"). + IncludeClusterResources(false). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-1").Result(), + builder.ForPod("ns-3", "pod-1").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/pods/namespaces/ns-2/pod-1.json", + }, + }, + { + name: "should not include cluster-scoped resource if backing up subset of namespaces and IncludeClusterResources=nil", + backup: defaultBackup(). + IncludedNamespaces("ns-1", "ns-2"). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-1").Result(), + builder.ForPod("ns-3", "pod-1").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/pods/namespaces/ns-2/pod-1.json", + }, + }, + { + name: "should include cluster-scoped resources if backing up all namespaces and IncludeClusterResources=true", + backup: defaultBackup(). + IncludeClusterResources(true). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-1").Result(), + builder.ForPod("ns-3", "pod-1").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/pods/namespaces/ns-2/pod-1.json", + "resources/pods/namespaces/ns-3/pod-1.json", + "resources/persistentvolumes/cluster/pv-1.json", + "resources/persistentvolumes/cluster/pv-2.json", + }, + }, + { + name: "should not include cluster-scoped resources if backing up all namespaces and IncludeClusterResources=false", + backup: defaultBackup(). + IncludeClusterResources(false). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-1").Result(), + builder.ForPod("ns-3", "pod-1").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/pods/namespaces/ns-2/pod-1.json", + "resources/pods/namespaces/ns-3/pod-1.json", + }, + }, + { + name: "should include cluster-scoped resources if backing up all namespaces and IncludeClusterResources=nil", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-1").Result(), + builder.ForPod("ns-3", "pod-1").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/pods/namespaces/ns-2/pod-1.json", + "resources/pods/namespaces/ns-3/pod-1.json", + "resources/persistentvolumes/cluster/pv-1.json", + "resources/persistentvolumes/cluster/pv-2.json", + }, + }, + { + name: "when a wildcard and a specific resource are included, the wildcard takes precedence", + backup: defaultBackup(). + IncludedResources("*", "pods"). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.Deployments( + builder.ForDeployment("foo", "bar").Result(), + builder.ForDeployment("zoo", "raz").Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/foo/bar.json", + "resources/pods/namespaces/zoo/raz.json", + "resources/deployments.apps/namespaces/foo/bar.json", + "resources/deployments.apps/namespaces/zoo/raz.json", + }, + }, + { + name: "wildcard excludes are ignored", + backup: defaultBackup(). + ExcludedResources("*"). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.Deployments( + builder.ForDeployment("foo", "bar").Result(), + builder.ForDeployment("zoo", "raz").Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/foo/bar.json", + "resources/pods/namespaces/zoo/raz.json", + "resources/deployments.apps/namespaces/foo/bar.json", + "resources/deployments.apps/namespaces/zoo/raz.json", + }, + }, + { + name: "unresolvable included resources are ignored", + backup: defaultBackup(). + IncludedResources("pods", "unresolvable"). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.Deployments( + builder.ForDeployment("foo", "bar").Result(), + builder.ForDeployment("zoo", "raz").Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/foo/bar.json", + "resources/pods/namespaces/zoo/raz.json", + }, + }, + { + name: "unresolvable excluded resources are ignored", + backup: defaultBackup(). + ExcludedResources("deployments", "unresolvable"). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.Deployments( + builder.ForDeployment("foo", "bar").Result(), + builder.ForDeployment("zoo", "raz").Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/foo/bar.json", + "resources/pods/namespaces/zoo/raz.json", + }, + }, + { + name: "terminating resources are not backed up", + backup: defaultBackup().Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").ObjectMeta(builder.WithDeletionTimestamp(time.Now())).Result(), + ), + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var ( + h = newHarness(t) + req = &Request{Backup: tc.backup} + backupFile = bytes.NewBuffer([]byte{}) + ) + + for _, resource := range tc.apiResources { + h.addItems(t, resource) + } + + h.backupper.Backup(h.log, req, backupFile, nil, nil) + + assertTarballContents(t, backupFile, append(tc.want, "metadata/version")...) + }) + } +} + +// TestBackupResourceCohabitation runs backups for resources that "cohabitate", +// meaning they exist in multiple API groups (e.g. deployments.extensions and +// deployments.apps), and verifies that only one copy of each resource is backed +// up, with preference for the non-"extensions" API group. +func TestBackupResourceCohabitation(t *testing.T) { + tests := []struct { + name string + backup *velerov1.Backup + apiResources []*test.APIResource + want []string + }{ + { + name: "when deployments exist only in extensions, they're backed up", + backup: defaultBackup().Result(), + apiResources: []*test.APIResource{ + test.ExtensionsDeployments( + builder.ForDeployment("foo", "bar").Result(), + builder.ForDeployment("zoo", "raz").Result(), + ), + }, + want: []string{ + "resources/deployments.extensions/namespaces/foo/bar.json", + "resources/deployments.extensions/namespaces/zoo/raz.json", + }, + }, + { + name: "when deployments exist in both apps and extensions, only apps/deployments are backed up", + backup: defaultBackup().Result(), + apiResources: []*test.APIResource{ + test.ExtensionsDeployments( + builder.ForDeployment("foo", "bar").Result(), + builder.ForDeployment("zoo", "raz").Result(), + ), + test.Deployments( + builder.ForDeployment("foo", "bar").Result(), + builder.ForDeployment("zoo", "raz").Result(), + ), + }, + want: []string{ + "resources/deployments.apps/namespaces/foo/bar.json", + "resources/deployments.apps/namespaces/zoo/raz.json", + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var ( + h = newHarness(t) + req = &Request{Backup: tc.backup} + backupFile = bytes.NewBuffer([]byte{}) + ) + + for _, resource := range tc.apiResources { + h.addItems(t, resource) + } + + h.backupper.Backup(h.log, req, backupFile, nil, nil) + + assertTarballContents(t, backupFile, append(tc.want, "metadata/version")...) + }) + } +} + +// TestBackupUsesNewCohabitatingResourcesForEachBackup ensures that when two backups are +// run that each include cohabitating resources, one copy of the relevant resources is +// backed up in each backup. Verification is done by looking at the contents of the backup +// tarball. This covers a specific issue that was fixed by https://github.com/heptio/velero/pull/485. +func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) { + h := newHarness(t) + + // run and verify backup 1 + backup1 := &Request{ + Backup: defaultBackup().Result(), + } + backup1File := bytes.NewBuffer([]byte{}) + + h.addItems(t, test.Deployments(builder.ForDeployment("ns-1", "deploy-1").Result())) + h.addItems(t, test.ExtensionsDeployments(builder.ForDeployment("ns-1", "deploy-1").Result())) + + h.backupper.Backup(h.log, backup1, backup1File, nil, nil) + + assertTarballContents(t, backup1File, "metadata/version", "resources/deployments.apps/namespaces/ns-1/deploy-1.json") + + // run and verify backup 2 + backup2 := &Request{ + Backup: defaultBackup().Result(), + } + backup2File := bytes.NewBuffer([]byte{}) + + h.backupper.Backup(h.log, backup2, backup2File, nil, nil) + + assertTarballContents(t, backup2File, "metadata/version", "resources/deployments.apps/namespaces/ns-1/deploy-1.json") +} + +// TestBackupResourceOrdering runs backups of the core API group and ensures that items are backed +// up in the expected order (pods, PVCs, PVs, everything else). Verification is done by looking +// at the order of files written to the backup tarball. +func TestBackupResourceOrdering(t *testing.T) { + tests := []struct { + name string + backup *velerov1.Backup + apiResources []*test.APIResource + }{ + { + name: "core API group: pods come before pvcs, pvcs come before pvs, pvs come before anything else", + backup: defaultBackup(). + SnapshotVolumes(false). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.PVCs( + builder.ForPersistentVolumeClaim("foo", "bar").Result(), + builder.ForPersistentVolumeClaim("zoo", "raz").Result(), + ), + test.PVs( + builder.ForPersistentVolume("bar").Result(), + builder.ForPersistentVolume("baz").Result(), + ), + test.Secrets( + builder.ForSecret("foo", "bar").Result(), + builder.ForSecret("zoo", "raz").Result(), + ), + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var ( + h = newHarness(t) + req = &Request{Backup: tc.backup} + backupFile = bytes.NewBuffer([]byte{}) + ) + + for _, resource := range tc.apiResources { + h.addItems(t, resource) + } + + h.backupper.Backup(h.log, req, backupFile, nil, nil) + + assertTarballOrdering(t, backupFile, "pods", "persistentvolumeclaims", "persistentvolumes") + }) + } +} + +// recordResourcesAction is a backup item action that can be configured +// to run for specific resources/namespaces and simply records the items +// that it is executed for. +type recordResourcesAction struct { + selector velero.ResourceSelector + ids []string + backups []velerov1.Backup + additionalItems []velero.ResourceIdentifier +} + +func (a *recordResourcesAction) Execute(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { + metadata, err := meta.Accessor(item) + if err != nil { + return item, a.additionalItems, err + } + a.ids = append(a.ids, kubeutil.NamespaceAndName(metadata)) + a.backups = append(a.backups, *backup) + + return item, a.additionalItems, nil +} + +func (a *recordResourcesAction) AppliesTo() (velero.ResourceSelector, error) { + return a.selector, nil +} + +func (a *recordResourcesAction) ForResource(resource string) *recordResourcesAction { + a.selector.IncludedResources = append(a.selector.IncludedResources, resource) + return a +} + +func (a *recordResourcesAction) ForNamespace(namespace string) *recordResourcesAction { + a.selector.IncludedNamespaces = append(a.selector.IncludedNamespaces, namespace) + return a +} + +func (a *recordResourcesAction) ForLabelSelector(selector string) *recordResourcesAction { + a.selector.LabelSelector = selector + return a +} + +func (a *recordResourcesAction) WithAdditionalItems(items []velero.ResourceIdentifier) *recordResourcesAction { + a.additionalItems = items + return a +} + +// TestBackupActionsRunsForCorrectItems runs backups with backup item actions, and +// verifies that each backup item action is run for the correct set of resources based on its +// AppliesTo() resource selector. Verification is done by using the recordResourcesAction struct, +// which records which resources it's executed for. +func TestBackupActionsRunForCorrectItems(t *testing.T) { + tests := []struct { + name string + backup *velerov1.Backup + apiResources []*test.APIResource + + // actions is a map from a recordResourcesAction (which will record the items it was called for) + // to a slice of expected items, formatted as {namespace}/{name}. + actions map[*recordResourcesAction][]string + }{ + { + name: "single action with no selector runs for all items", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: map[*recordResourcesAction][]string{ + new(recordResourcesAction): {"ns-1/pod-1", "ns-2/pod-2", "pv-1", "pv-2"}, + }, + }, + { + name: "single action with a resource selector for namespaced resources runs only for matching resources", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: map[*recordResourcesAction][]string{ + new(recordResourcesAction).ForResource("pods"): {"ns-1/pod-1", "ns-2/pod-2"}, + }, + }, + { + name: "single action with a resource selector for cluster-scoped resources runs only for matching resources", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: map[*recordResourcesAction][]string{ + new(recordResourcesAction).ForResource("persistentvolumes"): {"pv-1", "pv-2"}, + }, + }, + { + name: "single action with a namespace selector runs only for resources in that namespace", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), + test.PVCs( + builder.ForPersistentVolumeClaim("ns-1", "pvc-1").Result(), + builder.ForPersistentVolumeClaim("ns-2", "pvc-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + test.Namespaces( + builder.ForNamespace("ns-1").Result(), + builder.ForNamespace("ns-2").Result(), + ), + }, + actions: map[*recordResourcesAction][]string{ + new(recordResourcesAction).ForNamespace("ns-1"): {"ns-1/pod-1", "ns-1/pvc-1"}, + }, + }, + { + name: "single action with a resource and namespace selector runs only for matching resources", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: map[*recordResourcesAction][]string{ + new(recordResourcesAction).ForResource("pods").ForNamespace("ns-1"): {"ns-1/pod-1"}, + }, + }, + { + name: "multiple actions, each with a different resource selector using short name, run for matching resources", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: map[*recordResourcesAction][]string{ + new(recordResourcesAction).ForResource("po"): {"ns-1/pod-1", "ns-2/pod-2"}, + new(recordResourcesAction).ForResource("pv"): {"pv-1", "pv-2"}, + }, + }, + { + name: "actions with selectors that don't match anything don't run for any resources", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + ), + test.PVCs( + builder.ForPersistentVolumeClaim("ns-2", "pvc-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: map[*recordResourcesAction][]string{ + new(recordResourcesAction).ForNamespace("ns-1").ForResource("persistentvolumeclaims"): nil, + new(recordResourcesAction).ForNamespace("ns-2").ForResource("pods"): nil, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var ( + h = newHarness(t) + req = &Request{Backup: tc.backup} + backupFile = bytes.NewBuffer([]byte{}) + ) + + for _, resource := range tc.apiResources { + h.addItems(t, resource) + } + + actions := []velero.BackupItemAction{} + for action := range tc.actions { + actions = append(actions, action) + } + + err := h.backupper.Backup(h.log, req, backupFile, actions, nil) + assert.NoError(t, err) + + for action, want := range tc.actions { + assert.Equal(t, want, action.ids) + } + }) + } +} + +// TestBackupWithInvalidActions runs backups with backup item actions that are invalid +// in some way (e.g. an invalid label selector returned from AppliesTo(), an error returned +// from AppliesTo()) and verifies that this causes the backupper.Backup(...) method to +// return an error. +func TestBackupWithInvalidActions(t *testing.T) { + // all test cases in this function are expected to cause the method under test + // to return an error, so no expected results need to be set up. + tests := []struct { + name string + backup *velerov1.Backup + apiResources []*test.APIResource + actions []velero.BackupItemAction + }{ + { + name: "action with invalid label selector results in an error", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.PVs( + builder.ForPersistentVolume("bar").Result(), + builder.ForPersistentVolume("baz").Result(), + ), + }, + actions: []velero.BackupItemAction{ + new(recordResourcesAction).ForLabelSelector("=invalid-selector"), + }, + }, + { + name: "action returning an error from AppliesTo results in an error", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").Result(), + builder.ForPod("zoo", "raz").Result(), + ), + test.PVs( + builder.ForPersistentVolume("bar").Result(), + builder.ForPersistentVolume("baz").Result(), + ), + }, + actions: []velero.BackupItemAction{ + &appliesToErrorAction{}, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var ( + h = newHarness(t) + req = &Request{Backup: tc.backup} + backupFile = bytes.NewBuffer([]byte{}) + ) + + for _, resource := range tc.apiResources { + h.addItems(t, resource) + } + + assert.Error(t, h.backupper.Backup(h.log, req, backupFile, tc.actions, nil)) + }) + } +} + +// appliesToErrorAction is a backup item action that always returns +// an error when AppliesTo() is called. +type appliesToErrorAction struct{} + +func (a *appliesToErrorAction) AppliesTo() (velero.ResourceSelector, error) { + return velero.ResourceSelector{}, errors.New("error calling AppliesTo") +} + +func (a *appliesToErrorAction) Execute(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { + panic("not implemented") +} + +// TestBackupActionModifications runs backups with backup item actions that make modifications +// to items in their Execute(...) methods and verifies that these modifications are +// persisted to the backup tarball. Verification is done by inspecting the file contents +// of the tarball. +func TestBackupActionModifications(t *testing.T) { + // modifyingActionGetter is a helper function that returns a *pluggableAction, whose Execute(...) + // method modifies the item being passed in by calling the 'modify' function on it. + modifyingActionGetter := func(modify func(*unstructured.Unstructured)) *pluggableAction { + return &pluggableAction{ + executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { + obj, ok := item.(*unstructured.Unstructured) + if !ok { + return nil, nil, errors.Errorf("unexpected type %T", item) + } + + res := obj.DeepCopy() + modify(res) + + return res, nil, nil + }, + } + } + + tests := []struct { + name string + backup *velerov1.Backup + apiResources []*test.APIResource + actions []velero.BackupItemAction + want map[string]unstructuredObject + }{ + { + name: "action that adds a label to item gets persisted", + backup: defaultBackup().Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + ), + }, + actions: []velero.BackupItemAction{ + modifyingActionGetter(func(item *unstructured.Unstructured) { + item.SetLabels(map[string]string{"updated": "true"}) + }), + }, + want: map[string]unstructuredObject{ + "resources/pods/namespaces/ns-1/pod-1.json": toUnstructuredOrFail(t, builder.ForPod("ns-1", "pod-1").ObjectMeta(builder.WithLabels("updated", "true")).Result()), + }, + }, + { + name: "action that removes labels from item gets persisted", + backup: defaultBackup().Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").ObjectMeta(builder.WithLabels("should-be-removed", "true")).Result(), + ), + }, + actions: []velero.BackupItemAction{ + modifyingActionGetter(func(item *unstructured.Unstructured) { + item.SetLabels(nil) + }), + }, + want: map[string]unstructuredObject{ + "resources/pods/namespaces/ns-1/pod-1.json": toUnstructuredOrFail(t, builder.ForPod("ns-1", "pod-1").Result()), + }, + }, + { + name: "action that sets a spec field on item gets persisted", + backup: defaultBackup().Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + ), + }, + actions: []velero.BackupItemAction{ + modifyingActionGetter(func(item *unstructured.Unstructured) { + item.Object["spec"].(map[string]interface{})["nodeName"] = "foo" + }), + }, + want: map[string]unstructuredObject{ + "resources/pods/namespaces/ns-1/pod-1.json": toUnstructuredOrFail(t, builder.ForPod("ns-1", "pod-1").NodeName("foo").Result()), + }, + }, + { + name: "modifications to name and namespace in an action are persisted in JSON and in filename", + backup: defaultBackup(). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + ), + }, + actions: []velero.BackupItemAction{ + modifyingActionGetter(func(item *unstructured.Unstructured) { + item.SetName(item.GetName() + "-updated") + item.SetNamespace(item.GetNamespace() + "-updated") + }), + }, + want: map[string]unstructuredObject{ + "resources/pods/namespaces/ns-1-updated/pod-1-updated.json": toUnstructuredOrFail(t, builder.ForPod("ns-1-updated", "pod-1-updated").Result()), + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var ( + h = newHarness(t) + req = &Request{Backup: tc.backup} + backupFile = bytes.NewBuffer([]byte{}) + ) + + for _, resource := range tc.apiResources { + h.addItems(t, resource) + } + + err := h.backupper.Backup(h.log, req, backupFile, tc.actions, nil) + assert.NoError(t, err) + + assertTarballFileContents(t, backupFile, tc.want) + }) + } +} + +// TestBackupActionAdditionalItems runs backups with backup item actions that return +// additional items to be backed up, and verifies that those items are included in the +// backup tarball as appropriate. Verification is done by looking at the files that exist +// in the backup tarball. +func TestBackupActionAdditionalItems(t *testing.T) { + tests := []struct { + name string + backup *velerov1.Backup + apiResources []*test.APIResource + actions []velero.BackupItemAction + want []string + }{ + { + name: "additional items that are already being backed up are not backed up twice", + backup: defaultBackup().Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + builder.ForPod("ns-3", "pod-3").Result(), + ), + }, + actions: []velero.BackupItemAction{ + &pluggableAction{ + selector: velero.ResourceSelector{IncludedNamespaces: []string{"ns-1"}}, + executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { + additionalItems := []velero.ResourceIdentifier{ + {GroupResource: kuberesource.Pods, Namespace: "ns-2", Name: "pod-2"}, + {GroupResource: kuberesource.Pods, Namespace: "ns-3", Name: "pod-3"}, + } + + return item, additionalItems, nil + }, + }, + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/pods/namespaces/ns-2/pod-2.json", + "resources/pods/namespaces/ns-3/pod-3.json", + }, + }, + { + name: "when using a backup namespace filter, additional items that are in a non-included namespace are not backed up", + backup: defaultBackup().IncludedNamespaces("ns-1").Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + builder.ForPod("ns-3", "pod-3").Result(), + ), + }, + actions: []velero.BackupItemAction{ + &pluggableAction{ + executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { + additionalItems := []velero.ResourceIdentifier{ + {GroupResource: kuberesource.Pods, Namespace: "ns-2", Name: "pod-2"}, + {GroupResource: kuberesource.Pods, Namespace: "ns-3", Name: "pod-3"}, + } + + return item, additionalItems, nil + }, + }, + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + }, + }, + { + name: "when using a backup namespace filter, additional items that are cluster-scoped are backed up", + backup: defaultBackup().IncludedNamespaces("ns-1").Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: []velero.BackupItemAction{ + &pluggableAction{ + executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { + additionalItems := []velero.ResourceIdentifier{ + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-2"}, + } + + return item, additionalItems, nil + }, + }, + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/persistentvolumes/cluster/pv-1.json", + "resources/persistentvolumes/cluster/pv-2.json", + }, + }, + { + name: "when using a backup resource filter, additional items that are non-included resources are not backed up", + backup: defaultBackup().IncludedResources("pods").Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: []velero.BackupItemAction{ + &pluggableAction{ + executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { + additionalItems := []velero.ResourceIdentifier{ + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-2"}, + } + + return item, additionalItems, nil + }, + }, + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + }, + }, + { + name: "when IncludeClusterResources=false, additional items that are cluster-scoped are not backed up", + backup: defaultBackup().IncludeClusterResources(false).Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + actions: []velero.BackupItemAction{ + &pluggableAction{ + executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { + additionalItems := []velero.ResourceIdentifier{ + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-2"}, + } + + return item, additionalItems, nil + }, + }, + }, + want: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/pods/namespaces/ns-2/pod-2.json", + }, + }, + { + name: "if there's an error backing up additional items, the item the action was run for isn't backed up", + backup: defaultBackup().Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + builder.ForPod("ns-3", "pod-3").Result(), + ), + }, + actions: []velero.BackupItemAction{ + &pluggableAction{ + selector: velero.ResourceSelector{IncludedNamespaces: []string{"ns-1"}}, + executeFunc: func(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { + additionalItems := []velero.ResourceIdentifier{ + {GroupResource: kuberesource.Pods, Namespace: "ns-4", Name: "pod-4"}, + {GroupResource: kuberesource.Pods, Namespace: "ns-5", Name: "pod-5"}, + } + + return item, additionalItems, nil + }, + }, + }, + want: []string{ + "resources/pods/namespaces/ns-2/pod-2.json", + "resources/pods/namespaces/ns-3/pod-3.json", + }, + }, } - podsResource = metav1.APIResource{ - Name: "pods", - SingularName: "pod", - Namespaced: true, - Kind: "Pod", - Verbs: metav1.Verbs([]string{"create", "update", "get", "list", "watch", "delete"}), - ShortNames: []string{"po"}, - Categories: []string{"all"}, + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var ( + h = newHarness(t) + req = &Request{Backup: tc.backup} + backupFile = bytes.NewBuffer([]byte{}) + ) + + for _, resource := range tc.apiResources { + h.addItems(t, resource) + } + + err := h.backupper.Backup(h.log, req, backupFile, tc.actions, nil) + assert.NoError(t, err) + + assertTarballContents(t, backupFile, append(tc.want, "metadata/version")...) + }) + } +} + +// volumeSnapshotterGetter is a simple implementation of the VolumeSnapshotterGetter +// interface that returns velero.VolumeSnapshotters from a map if they exist. +type volumeSnapshotterGetter map[string]velero.VolumeSnapshotter + +func (vsg volumeSnapshotterGetter) GetVolumeSnapshotter(name string) (velero.VolumeSnapshotter, error) { + snapshotter, ok := vsg[name] + if !ok { + return nil, errors.New("volume snapshotter not found") + } + + return snapshotter, nil +} + +func int64Ptr(val int) *int64 { + i := int64(val) + return &i +} + +type volumeIdentifier struct { + volumeID string + volumeAZ string +} + +type volumeInfo struct { + volumeType string + iops *int64 + snapshotErr bool +} + +// fakeVolumeSnapshotter is a test fake for the velero.VolumeSnapshotter interface. +type fakeVolumeSnapshotter struct { + // PVVolumeNames is a map from PV name to volume ID, used as the basis + // for the GetVolumeID method. + PVVolumeNames map[string]string + + // Volumes is a map from volume identifier (volume ID + AZ) to a struct + // of volume info, used for the GetVolumeInfo and CreateSnapshot methods. + Volumes map[volumeIdentifier]*volumeInfo +} + +// WithVolume is a test helper for registering persistent volumes that the +// fakeVolumeSnapshotter should handle. +func (vs *fakeVolumeSnapshotter) WithVolume(pvName, id, az, volumeType string, iops int, snapshotErr bool) *fakeVolumeSnapshotter { + if vs.PVVolumeNames == nil { + vs.PVVolumeNames = make(map[string]string) + } + vs.PVVolumeNames[pvName] = id + + if vs.Volumes == nil { + vs.Volumes = make(map[volumeIdentifier]*volumeInfo) + } + + identifier := volumeIdentifier{ + volumeID: id, + volumeAZ: az, } - rbacGroup = &metav1.APIResourceList{ - GroupVersion: "rbac.authorization.k8s.io/v1beta1", - APIResources: []metav1.APIResource{rolesResource}, + vs.Volumes[identifier] = &volumeInfo{ + volumeType: volumeType, + iops: int64Ptr(iops), + snapshotErr: snapshotErr, } - rolesResource = metav1.APIResource{ - Name: "roles", - SingularName: "role", - Namespaced: true, - Kind: "Role", - Verbs: metav1.Verbs([]string{"create", "update", "get", "list", "watch", "delete"}), + return vs +} + +// Init is a no-op. +func (*fakeVolumeSnapshotter) Init(config map[string]string) error { + return nil +} + +// GetVolumeID looks up the PV name in the PVVolumeNames map and returns the result +// if found, or an error otherwise. +func (vs *fakeVolumeSnapshotter) GetVolumeID(pv runtime.Unstructured) (string, error) { + obj := pv.(*unstructured.Unstructured) + + volumeID, ok := vs.PVVolumeNames[obj.GetName()] + if !ok { + return "", errors.New("unsupported volume type") } - namespacesResource = metav1.APIResource{ - Name: "namespaces", - SingularName: "namespace", - Namespaced: false, - Kind: "Namespace", - Verbs: metav1.Verbs([]string{"create", "update", "get", "list", "watch", "delete"}), + return volumeID, nil +} + +// CreateSnapshot looks up the volume in the Volume map. If it's not found, an error is +// returned; if snapshotErr is true on the result, an error is returned; otherwise, +// a snapshotID of "-snapshot" is returned. +func (vs *fakeVolumeSnapshotter) CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (snapshotID string, err error) { + vi, ok := vs.Volumes[volumeIdentifier{volumeID: volumeID, volumeAZ: volumeAZ}] + if !ok { + return "", errors.New("volume not found") } - certificatesGroup = &metav1.APIResourceList{ - GroupVersion: "certificates.k8s.io/v1beta1", - APIResources: []metav1.APIResource{certificateSigningRequestsResource}, + if vi.snapshotErr { + return "", errors.New("error calling CreateSnapshot") } - certificateSigningRequestsResource = metav1.APIResource{ - Name: "certificatesigningrequests", - SingularName: "certificatesigningrequest", - Namespaced: false, - Kind: "CertificateSigningRequest", - Verbs: metav1.Verbs([]string{"create", "update", "get", "list", "watch", "delete"}), - ShortNames: []string{"csr"}, + return volumeID + "-snapshot", nil +} + +// GetVolumeInfo returns volume info if it exists in the Volumes map +// for the specified volume ID and AZ, or an error otherwise. +func (vs *fakeVolumeSnapshotter) GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error) { + vi, ok := vs.Volumes[volumeIdentifier{volumeID: volumeID, volumeAZ: volumeAZ}] + if !ok { + return "", nil, errors.New("volume not found") + } + + return vi.volumeType, vi.iops, nil +} + +// CreateVolumeFromSnapshot panics because it's not expected to be used for backups. +func (*fakeVolumeSnapshotter) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (volumeID string, err error) { + panic("CreateVolumeFromSnapshot should not be used for backups") +} + +// SetVolumeID panics because it's not expected to be used for backups. +func (*fakeVolumeSnapshotter) SetVolumeID(pv runtime.Unstructured, volumeID string) (runtime.Unstructured, error) { + panic("SetVolumeID should not be used for backups") +} + +// DeleteSnapshot panics because it's not expected to be used for backups. +func (*fakeVolumeSnapshotter) DeleteSnapshot(snapshotID string) error { + panic("DeleteSnapshot should not be used for backups") +} + +// TestBackupWithSnapshots runs backups with volume snapshot locations and volume snapshotters +// configured and verifies that snapshots are created as appropriate. Verification is done by +// looking at the backup request's VolumeSnapshots field. This test uses the fakeVolumeSnapshotter +// struct in place of real volume snapshotters. +func TestBackupWithSnapshots(t *testing.T) { + tests := []struct { + name string + req *Request + vsls []*velerov1.VolumeSnapshotLocation + apiResources []*test.APIResource + snapshotterGetter volumeSnapshotterGetter + want []*volume.Snapshot + }{ + { + name: "persistent volume with no zone annotation creates a snapshot", + req: &Request{ + Backup: defaultBackup().Result(), + SnapshotLocations: []*velerov1.VolumeSnapshotLocation{ + newSnapshotLocation("velero", "default", "default"), + }, + }, + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + ), + }, + snapshotterGetter: map[string]velero.VolumeSnapshotter{ + "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "", "type-1", 100, false), + }, + want: []*volume.Snapshot{ + { + Spec: volume.SnapshotSpec{ + BackupName: "backup-1", + Location: "default", + PersistentVolumeName: "pv-1", + ProviderVolumeID: "vol-1", + VolumeType: "type-1", + VolumeIOPS: int64Ptr(100), + }, + Status: volume.SnapshotStatus{ + Phase: volume.SnapshotPhaseCompleted, + ProviderSnapshotID: "vol-1-snapshot", + }, + }, + }, + }, + { + name: "persistent volume with zone annotation creates a snapshot", + req: &Request{ + Backup: defaultBackup().Result(), + SnapshotLocations: []*velerov1.VolumeSnapshotLocation{ + newSnapshotLocation("velero", "default", "default"), + }, + }, + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1").ObjectMeta(builder.WithLabels("failure-domain.beta.kubernetes.io/zone", "zone-1")).Result(), + ), + }, + snapshotterGetter: map[string]velero.VolumeSnapshotter{ + "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "zone-1", "type-1", 100, false), + }, + want: []*volume.Snapshot{ + { + Spec: volume.SnapshotSpec{ + BackupName: "backup-1", + Location: "default", + PersistentVolumeName: "pv-1", + ProviderVolumeID: "vol-1", + VolumeAZ: "zone-1", + VolumeType: "type-1", + VolumeIOPS: int64Ptr(100), + }, + Status: volume.SnapshotStatus{ + Phase: volume.SnapshotPhaseCompleted, + ProviderSnapshotID: "vol-1-snapshot", + }, + }, + }, + }, + { + name: "error returned from CreateSnapshot results in a failed snapshot", + req: &Request{ + Backup: defaultBackup().Result(), + SnapshotLocations: []*velerov1.VolumeSnapshotLocation{ + newSnapshotLocation("velero", "default", "default"), + }, + }, + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + ), + }, + snapshotterGetter: map[string]velero.VolumeSnapshotter{ + "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "", "type-1", 100, true), + }, + want: []*volume.Snapshot{ + { + Spec: volume.SnapshotSpec{ + BackupName: "backup-1", + Location: "default", + PersistentVolumeName: "pv-1", + ProviderVolumeID: "vol-1", + VolumeType: "type-1", + VolumeIOPS: int64Ptr(100), + }, + Status: volume.SnapshotStatus{ + Phase: volume.SnapshotPhaseFailed, + }, + }, + }, + }, + { + name: "backup with SnapshotVolumes=false does not create any snapshots", + req: &Request{ + Backup: defaultBackup().SnapshotVolumes(false).Result(), + SnapshotLocations: []*velerov1.VolumeSnapshotLocation{ + newSnapshotLocation("velero", "default", "default"), + }, + }, + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + ), + }, + snapshotterGetter: map[string]velero.VolumeSnapshotter{ + "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "", "type-1", 100, false), + }, + want: nil, + }, + { + name: "backup with no volume snapshot locations does not create any snapshots", + req: &Request{ + Backup: defaultBackup().Result(), + }, + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + ), + }, + snapshotterGetter: map[string]velero.VolumeSnapshotter{ + "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "", "type-1", 100, false), + }, + want: nil, + }, + { + name: "backup with no volume snapshotters does not create any snapshots", + req: &Request{ + Backup: defaultBackup().Result(), + SnapshotLocations: []*velerov1.VolumeSnapshotLocation{ + newSnapshotLocation("velero", "default", "default"), + }, + }, + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + ), + }, + snapshotterGetter: map[string]velero.VolumeSnapshotter{}, + want: nil, + }, + { + name: "unsupported persistent volume type does not create any snapshots", + req: &Request{ + Backup: defaultBackup().Result(), + SnapshotLocations: []*velerov1.VolumeSnapshotLocation{ + newSnapshotLocation("velero", "default", "default"), + }, + }, + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + ), + }, + snapshotterGetter: map[string]velero.VolumeSnapshotter{ + "default": new(fakeVolumeSnapshotter), + }, + want: nil, + }, + { + name: "when there are multiple volumes, snapshot locations, and snapshotters, volumes are matched to the right snapshotters", + req: &Request{ + Backup: defaultBackup().Result(), + SnapshotLocations: []*velerov1.VolumeSnapshotLocation{ + newSnapshotLocation("velero", "default", "default"), + newSnapshotLocation("velero", "another", "another"), + }, + }, + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ), + }, + snapshotterGetter: map[string]velero.VolumeSnapshotter{ + "default": new(fakeVolumeSnapshotter).WithVolume("pv-1", "vol-1", "", "type-1", 100, false), + "another": new(fakeVolumeSnapshotter).WithVolume("pv-2", "vol-2", "", "type-2", 100, false), + }, + want: []*volume.Snapshot{ + { + Spec: volume.SnapshotSpec{ + BackupName: "backup-1", + Location: "default", + PersistentVolumeName: "pv-1", + ProviderVolumeID: "vol-1", + VolumeType: "type-1", + VolumeIOPS: int64Ptr(100), + }, + Status: volume.SnapshotStatus{ + Phase: volume.SnapshotPhaseCompleted, + ProviderSnapshotID: "vol-1-snapshot", + }, + }, + { + Spec: volume.SnapshotSpec{ + BackupName: "backup-1", + Location: "another", + PersistentVolumeName: "pv-2", + ProviderVolumeID: "vol-2", + VolumeType: "type-2", + VolumeIOPS: int64Ptr(100), + }, + Status: volume.SnapshotStatus{ + Phase: volume.SnapshotPhaseCompleted, + ProviderSnapshotID: "vol-2-snapshot", + }, + }, + }, + }, } - extensionsGroup = &metav1.APIResourceList{ - GroupVersion: "extensions/v1beta1", - APIResources: []metav1.APIResource{deploymentsResource, networkPoliciesResource}, - } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var ( + h = newHarness(t) + backupFile = bytes.NewBuffer([]byte{}) + ) - extensionsGroupVersion = schema.GroupVersion{ - Group: "extensions", - Version: "v1beta1", - } + for _, resource := range tc.apiResources { + h.addItems(t, resource) + } - appsGroup = &metav1.APIResourceList{ - GroupVersion: "apps/v1beta1", - APIResources: []metav1.APIResource{deploymentsResource}, - } + err := h.backupper.Backup(h.log, tc.req, backupFile, nil, tc.snapshotterGetter) + assert.NoError(t, err) - appsGroupVersion = schema.GroupVersion{ - Group: "apps", - Version: "v1beta1", + assert.Equal(t, tc.want, tc.req.VolumeSnapshots) + }) } +} - deploymentsResource = metav1.APIResource{ - Name: "deployments", - SingularName: "deployment", - Namespaced: true, - Kind: "Deployment", - Verbs: metav1.Verbs([]string{"create", "update", "get", "list", "watch", "delete"}), - ShortNames: []string{"deploy"}, - Categories: []string{"all"}, +// TestBackupWithInvalidHooks runs backups with invalid hook specifications and verifies +// that an error is returned. +func TestBackupWithInvalidHooks(t *testing.T) { + tests := []struct { + name string + backup *velerov1.Backup + apiResources []*test.APIResource + want error + }{ + { + name: "hook with invalid label selector causes backup to fail", + backup: defaultBackup(). + Hooks(velerov1.BackupHooks{ + Resources: []velerov1.BackupResourceHookSpec{ + { + Name: "hook-with-invalid-label-selector", + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "foo", + Operator: metav1.LabelSelectorOperator("nonexistent-operator"), + Values: []string{"bar"}, + }, + }, + }, + }, + }, + }). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("foo", "bar").Result(), + ), + }, + want: errors.New("\"nonexistent-operator\" is not a valid pod selector operator"), + }, } - networkingGroup = &metav1.APIResourceList{ - GroupVersion: "networking.k8s.io/v1", - APIResources: []metav1.APIResource{networkPoliciesResource}, - } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var ( + h = newHarness(t) + req = &Request{Backup: tc.backup} + backupFile = bytes.NewBuffer([]byte{}) + ) - networkingGroupVersion = schema.GroupVersion{ - Group: "networking.k8s.io", - Version: "v1", - } + for _, resource := range tc.apiResources { + h.addItems(t, resource) + } - networkPoliciesResource = metav1.APIResource{ - Name: "networkpolicies", - SingularName: "networkpolicy", - Namespaced: true, - Kind: "Deployment", - Verbs: metav1.Verbs([]string{"create", "update", "get", "list", "watch", "delete"}), + assert.EqualError(t, h.backupper.Backup(h.log, req, backupFile, nil, nil), tc.want.Error()) + }) } -) +} -func parseLabelSelectorOrDie(s string) labels.Selector { - ret, err := labels.Parse(s) - if err != nil { - panic(err) +// TestBackupWithHooks runs backups with valid hook specifications and verifies that the +// hooks are run. It uses a MockPodCommandExecutor since hooks can't actually be executed +// in running pods during the unit test. Verification is done by asserting expected method +// calls on the mock object. +func TestBackupWithHooks(t *testing.T) { + type expectedCall struct { + podNamespace string + podName string + hookName string + hook *velerov1.ExecHook + err error } - return ret -} -func TestBackup(t *testing.T) { tests := []struct { - name string - backup *v1.Backup - actions []velero.BackupItemAction - expectedNamespaces *collections.IncludesExcludes - expectedResources *collections.IncludesExcludes - expectedHooks []resourceHook - backupGroupErrors map[*metav1.APIResourceList]error - expectedError error + name string + backup *velerov1.Backup + apiResources []*test.APIResource + wantExecutePodCommandCalls []*expectedCall + wantBackedUp []string }{ { - name: "error resolving hooks returns an error", - backup: &v1.Backup{ - Spec: v1.BackupSpec{ - // cm - shortcut in legacy api group - // csr - shortcut in certificates.k8s.io api group - // roles - fully qualified in rbac.authorization.k8s.io api group - IncludedResources: []string{"cm", "csr", "roles"}, - IncludedNamespaces: []string{"a", "b"}, - ExcludedNamespaces: []string{"c", "d"}, - Hooks: v1.BackupHooks{ - Resources: []v1.BackupResourceHookSpec{ - { - Name: "hook-with-invalid-label-selector", - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: "foo", - Operator: metav1.LabelSelectorOperator("nonexistent-operator"), - Values: []string{"bar"}, - }, + name: "pre hook with no resource filters runs for all pods", + backup: defaultBackup(). + Hooks(velerov1.BackupHooks{ + Resources: []velerov1.BackupResourceHookSpec{ + { + Name: "hook-1", + PreHooks: []velerov1.BackupResourceHook{ + { + Exec: &velerov1.ExecHook{ + Command: []string{"ls", "/tmp"}, }, }, }, }, }, + }). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), + }, + wantExecutePodCommandCalls: []*expectedCall{ + { + podNamespace: "ns-1", + podName: "pod-1", + hookName: "hook-1", + hook: &velerov1.ExecHook{ + Command: []string{"ls", "/tmp"}, + }, + err: nil, + }, + { + podNamespace: "ns-2", + podName: "pod-2", + hookName: "hook-1", + hook: &velerov1.ExecHook{ + Command: []string{"ls", "/tmp"}, + }, + err: nil, }, }, - expectedNamespaces: collections.NewIncludesExcludes().Includes("a", "b").Excludes("c", "d"), - expectedResources: collections.NewIncludesExcludes().Includes("configmaps", "certificatesigningrequests.certificates.k8s.io", "roles.rbac.authorization.k8s.io"), - expectedHooks: []resourceHook{}, - expectedError: errors.New("\"nonexistent-operator\" is not a valid pod selector operator"), - }, - { - name: "backupGroup errors", - backup: &v1.Backup{}, - expectedNamespaces: collections.NewIncludesExcludes(), - expectedResources: collections.NewIncludesExcludes(), - expectedHooks: []resourceHook{}, - backupGroupErrors: map[*metav1.APIResourceList]error{ - v1Group: errors.New("v1 error"), - certificatesGroup: nil, - rbacGroup: errors.New("rbac error"), - }, - expectedError: nil, - }, - { - name: "hooks", - backup: &v1.Backup{ - Spec: v1.BackupSpec{ - Hooks: v1.BackupHooks{ - Resources: []v1.BackupResourceHookSpec{ - { - Name: "hook1", - IncludedNamespaces: []string{"a"}, - ExcludedNamespaces: []string{"b"}, - IncludedResources: []string{"cm"}, - ExcludedResources: []string{"roles"}, - LabelSelector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"1": "2"}, - }, - PreHooks: []v1.BackupResourceHook{ - { - Exec: &v1.ExecHook{ - Command: []string{"ls", "/tmp"}, - }, + wantBackedUp: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/pods/namespaces/ns-2/pod-2.json", + }, + }, + { + name: "post hook with no resource filters runs for all pods", + backup: defaultBackup(). + Hooks(velerov1.BackupHooks{ + Resources: []velerov1.BackupResourceHookSpec{ + { + Name: "hook-1", + PostHooks: []velerov1.BackupResourceHook{ + { + Exec: &velerov1.ExecHook{ + Command: []string{"ls", "/tmp"}, }, }, }, }, }, - }, + }). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), }, - expectedNamespaces: collections.NewIncludesExcludes(), - expectedResources: collections.NewIncludesExcludes(), - expectedHooks: []resourceHook{ + wantExecutePodCommandCalls: []*expectedCall{ { - name: "hook1", - namespaces: collections.NewIncludesExcludes().Includes("a").Excludes("b"), - resources: collections.NewIncludesExcludes().Includes("configmaps").Excludes("roles.rbac.authorization.k8s.io"), - labelSelector: parseLabelSelectorOrDie("1=2"), - pre: []v1.BackupResourceHook{ + podNamespace: "ns-1", + podName: "pod-1", + hookName: "hook-1", + hook: &velerov1.ExecHook{ + Command: []string{"ls", "/tmp"}, + }, + err: nil, + }, + { + podNamespace: "ns-2", + podName: "pod-2", + hookName: "hook-1", + hook: &velerov1.ExecHook{ + Command: []string{"ls", "/tmp"}, + }, + err: nil, + }, + }, + wantBackedUp: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", + "resources/pods/namespaces/ns-2/pod-2.json", + }, + }, + { + name: "pre and post hooks run for a pod", + backup: defaultBackup(). + Hooks(velerov1.BackupHooks{ + Resources: []velerov1.BackupResourceHookSpec{ { - Exec: &v1.ExecHook{ - Command: []string{"ls", "/tmp"}, + Name: "hook-1", + PreHooks: []velerov1.BackupResourceHook{ + { + Exec: &velerov1.ExecHook{ + Command: []string{"pre"}, + }, + }, + }, + PostHooks: []velerov1.BackupResourceHook{ + { + Exec: &velerov1.ExecHook{ + Command: []string{"post"}, + }, + }, }, }, }, + }). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + ), + }, + wantExecutePodCommandCalls: []*expectedCall{ + { + podNamespace: "ns-1", + podName: "pod-1", + hookName: "hook-1", + hook: &velerov1.ExecHook{ + Command: []string{"pre"}, + }, + err: nil, + }, + { + podNamespace: "ns-1", + podName: "pod-1", + hookName: "hook-1", + hook: &velerov1.ExecHook{ + Command: []string{"post"}, + }, + err: nil, }, }, - backupGroupErrors: map[*metav1.APIResourceList]error{ - v1Group: nil, - certificatesGroup: nil, - rbacGroup: nil, + wantBackedUp: []string{ + "resources/pods/namespaces/ns-1/pod-1.json", }, }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - req := &Request{ - Backup: test.backup, - } - - discoveryHelper := &velerotest.FakeDiscoveryHelper{ - Mapper: &velerotest.FakeMapper{ - Resources: map[schema.GroupVersionResource]schema.GroupVersionResource{ - {Resource: "cm"}: {Group: "", Version: "v1", Resource: "configmaps"}, - {Resource: "csr"}: {Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}, - {Resource: "roles"}: {Group: "rbac.authorization.k8s.io", Version: "v1beta1", Resource: "roles"}, + { + name: "item is not backed up if hook returns an error when OnError=Fail", + backup: defaultBackup(). + Hooks(velerov1.BackupHooks{ + Resources: []velerov1.BackupResourceHookSpec{ + { + Name: "hook-1", + PreHooks: []velerov1.BackupResourceHook{ + { + Exec: &velerov1.ExecHook{ + Command: []string{"ls", "/tmp"}, + OnError: velerov1.HookErrorModeFail, + }, + }, + }, + }, + }, + }). + Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ), + }, + wantExecutePodCommandCalls: []*expectedCall{ + { + podNamespace: "ns-1", + podName: "pod-1", + hookName: "hook-1", + hook: &velerov1.ExecHook{ + Command: []string{"ls", "/tmp"}, + OnError: velerov1.HookErrorModeFail, }, + err: errors.New("exec hook error"), }, - ResourceList: []*metav1.APIResourceList{ - v1Group, - certificatesGroup, - rbacGroup, + { + podNamespace: "ns-2", + podName: "pod-2", + hookName: "hook-1", + hook: &velerov1.ExecHook{ + Command: []string{"ls", "/tmp"}, + OnError: velerov1.HookErrorModeFail, + }, + err: nil, }, - } + }, + wantBackedUp: []string{ + "resources/pods/namespaces/ns-2/pod-2.json", + }, + }, + } - dynamicFactory := new(velerotest.FakeDynamicFactory) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var ( + h = newHarness(t) + req = &Request{Backup: tc.backup} + backupFile = bytes.NewBuffer([]byte{}) + podCommandExecutor = new(testutil.MockPodCommandExecutor) + ) - podCommandExecutor := &velerotest.MockPodCommandExecutor{} + h.backupper.podCommandExecutor = podCommandExecutor defer podCommandExecutor.AssertExpectations(t) - groupBackupperFactory := &mockGroupBackupperFactory{} - defer groupBackupperFactory.AssertExpectations(t) - - groupBackupper := &mockGroupBackupper{} - defer groupBackupper.AssertExpectations(t) - - groupBackupperFactory.On("newGroupBackupper", - mock.Anything, // log - req, - dynamicFactory, - discoveryHelper, - map[itemKey]struct{}{}, // backedUpItems - cohabitatingResources(), - podCommandExecutor, - mock.Anything, // tarWriter - mock.Anything, // restic backupper - mock.Anything, // pvc snapshot tracker - mock.Anything, // volume snapshotter getter - ).Maybe().Return(groupBackupper) - - for group, err := range test.backupGroupErrors { - groupBackupper.On("backupGroup", group).Return(err) + for _, expect := range tc.wantExecutePodCommandCalls { + podCommandExecutor.On("ExecutePodCommand", + mock.Anything, + mock.Anything, + expect.podNamespace, + expect.podName, + expect.hookName, + expect.hook, + ).Return(expect.err) } - kb := &kubernetesBackupper{ - discoveryHelper: discoveryHelper, - dynamicFactory: dynamicFactory, - podCommandExecutor: podCommandExecutor, - groupBackupperFactory: groupBackupperFactory, + for _, resource := range tc.apiResources { + h.addItems(t, resource) } - err := kb.Backup(logging.DefaultLogger(logrus.DebugLevel), req, new(bytes.Buffer), test.actions, nil) - - assert.Equal(t, test.expectedNamespaces, req.NamespaceIncludesExcludes) - assert.Equal(t, test.expectedResources, req.ResourceIncludesExcludes) - assert.Equal(t, test.expectedHooks, req.ResourceHooks) - - if test.expectedError != nil { - assert.EqualError(t, err, test.expectedError.Error()) - return - } - assert.NoError(t, err) + require.NoError(t, h.backupper.Backup(h.log, req, backupFile, nil, nil)) + assertTarballContents(t, backupFile, append(tc.wantBackedUp, "metadata/version")...) }) } } -type mockGroupBackupperFactory struct { - mock.Mock +type fakeResticBackupperFactory struct { + podVolumeBackups []*velerov1.PodVolumeBackup } -func (f *mockGroupBackupperFactory) newGroupBackupper( - log logrus.FieldLogger, - backup *Request, - dynamicFactory client.DynamicFactory, - discoveryHelper discovery.Helper, - backedUpItems map[itemKey]struct{}, - cohabitatingResources map[string]*cohabitatingResource, - podCommandExecutor podexec.PodCommandExecutor, - tarWriter tarWriter, - resticBackupper restic.Backupper, - resticSnapshotTracker *pvcSnapshotTracker, - volumeSnapshotterGetter VolumeSnapshotterGetter, -) groupBackupper { - args := f.Called( - log, - backup, - dynamicFactory, - discoveryHelper, - backedUpItems, - cohabitatingResources, - podCommandExecutor, - tarWriter, - resticBackupper, - resticSnapshotTracker, - volumeSnapshotterGetter, - ) - return args.Get(0).(groupBackupper) +func (f *fakeResticBackupperFactory) NewBackupper(context.Context, *velerov1.Backup) (restic.Backupper, error) { + return &fakeResticBackupper{ + podVolumeBackups: f.podVolumeBackups, + }, nil } -type mockGroupBackupper struct { - mock.Mock +type fakeResticBackupper struct { + podVolumeBackups []*velerov1.PodVolumeBackup } -func (gb *mockGroupBackupper) backupGroup(group *metav1.APIResourceList) error { - args := gb.Called(group) - return args.Error(0) +func (b *fakeResticBackupper) BackupPodVolumes(backup *velerov1.Backup, pod *corev1.Pod, _ logrus.FieldLogger) ([]*velerov1.PodVolumeBackup, []error) { + return b.podVolumeBackups, nil } -func toRuntimeObject(t *testing.T, data string) runtime.Object { - o, _, err := unstructured.UnstructuredJSONScheme.Decode([]byte(data), nil, nil) - require.NoError(t, err) - return o -} - -func TestGetResourceHook(t *testing.T) { +// TestBackupWithRestic runs backups of pods that are annotated for restic backup, +// and ensures that the restic backupper is called, that the returned PodVolumeBackups +// are added to the Request object, and that when PVCs are backed up with restic, the +// claimed PVs are not also snapshotted using a VolumeSnapshotter. +func TestBackupWithRestic(t *testing.T) { tests := []struct { - name string - hookSpec v1.BackupResourceHookSpec - expected resourceHook + name string + backup *velerov1.Backup + apiResources []*test.APIResource + vsl *velerov1.VolumeSnapshotLocation + snapshotterGetter volumeSnapshotterGetter + want []*velerov1.PodVolumeBackup }{ { - name: "Full test", - hookSpec: v1.BackupResourceHookSpec{ - Name: "spec1", - IncludedNamespaces: []string{"ns1", "ns2"}, - ExcludedNamespaces: []string{"ns3", "ns4"}, - IncludedResources: []string{"foo", "fie"}, - ExcludedResources: []string{"bar", "baz"}, - PreHooks: []v1.BackupResourceHook{ - { - Exec: &v1.ExecHook{ - Container: "a", - Command: []string{"b"}, - }, - }, - }, - PostHooks: []v1.BackupResourceHook{ - { - Exec: &v1.ExecHook{ - Container: "c", - Command: []string{"d"}, - }, - }, - }, + name: "a pod annotated for restic backup should result in pod volume backups being returned", + backup: defaultBackup().Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").ObjectMeta(builder.WithAnnotations("backup.velero.io/backup-volumes", "foo")).Result(), + ), }, - expected: resourceHook{ - name: "spec1", - namespaces: collections.NewIncludesExcludes().Includes("ns1", "ns2").Excludes("ns3", "ns4"), - resources: collections.NewIncludesExcludes().Includes("foodies.somegroup", "fields.somegroup").Excludes("barnacles.anothergroup", "bazaars.anothergroup"), - pre: []v1.BackupResourceHook{ - { - Exec: &v1.ExecHook{ - Container: "a", - Command: []string{"b"}, - }, - }, - }, - post: []v1.BackupResourceHook{ - { - Exec: &v1.ExecHook{ - Container: "c", - Command: []string{"d"}, - }, - }, - }, + want: []*velerov1.PodVolumeBackup{ + builder.ForPodVolumeBackup("velero", "pvb-1").Result(), + }, + }, + { + name: "when PVC pod volumes are backed up using restic, their claimed PVs are not also snapshotted", + backup: defaultBackup().Result(), + apiResources: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1"). + Volumes( + builder.ForVolume("vol-1").PersistentVolumeClaimSource("pvc-1").Result(), + builder.ForVolume("vol-2").PersistentVolumeClaimSource("pvc-2").Result(), + ). + ObjectMeta( + builder.WithAnnotations("backup.velero.io/backup-volumes", "vol-1,vol-2"), + ). + Result(), + ), + test.PVCs( + builder.ForPersistentVolumeClaim("ns-1", "pvc-1").VolumeName("pv-1").Result(), + builder.ForPersistentVolumeClaim("ns-1", "pvc-2").VolumeName("pv-2").Result(), + ), + test.PVs( + + builder.ForPersistentVolume("pv-1").ClaimRef("ns-1", "pvc-1").Result(), + builder.ForPersistentVolume("pv-2").ClaimRef("ns-1", "pvc-2").Result(), + ), + }, + vsl: newSnapshotLocation("velero", "default", "default"), + snapshotterGetter: map[string]velero.VolumeSnapshotter{ + "default": new(fakeVolumeSnapshotter). + WithVolume("pv-1", "vol-1", "", "type-1", 100, false). + WithVolume("pv-2", "vol-2", "", "type-1", 100, false), + }, + want: []*velerov1.PodVolumeBackup{ + builder.ForPodVolumeBackup("velero", "pvb-1").Result(), + builder.ForPodVolumeBackup("velero", "pvb-2").Result(), }, }, } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - resources := map[schema.GroupVersionResource]schema.GroupVersionResource{ - {Resource: "foo"}: {Group: "somegroup", Resource: "foodies"}, - {Resource: "fie"}: {Group: "somegroup", Resource: "fields"}, - {Resource: "bar"}: {Group: "anothergroup", Resource: "barnacles"}, - {Resource: "baz"}: {Group: "anothergroup", Resource: "bazaars"}, + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var ( + h = newHarness(t) + req = &Request{Backup: tc.backup, SnapshotLocations: []*velerov1.VolumeSnapshotLocation{tc.vsl}} + backupFile = bytes.NewBuffer([]byte{}) + ) + + h.backupper.resticBackupperFactory = &fakeResticBackupperFactory{ + podVolumeBackups: tc.want, + } + + for _, resource := range tc.apiResources { + h.addItems(t, resource) } - discoveryHelper := velerotest.NewFakeDiscoveryHelper(false, resources) - actual, err := getResourceHook(test.hookSpec, discoveryHelper) - require.NoError(t, err) - assert.Equal(t, test.expected, actual) + require.NoError(t, h.backupper.Backup(h.log, req, backupFile, nil, tc.snapshotterGetter)) + + assert.Equal(t, tc.want, req.PodVolumeBackups) + + // this assumes that we don't have any test cases where some PVs should be snapshotted using a VolumeSnapshotter + assert.Nil(t, req.VolumeSnapshots) }) } } + +// pluggableAction is a backup item action that can be plugged with an Execute +// function body at runtime. +type pluggableAction struct { + selector velero.ResourceSelector + executeFunc func(runtime.Unstructured, *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) +} + +func (a *pluggableAction) Execute(item runtime.Unstructured, backup *velerov1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { + if a.executeFunc == nil { + return item, nil, nil + } + + return a.executeFunc(item, backup) +} + +func (a *pluggableAction) AppliesTo() (velero.ResourceSelector, error) { + return a.selector, nil +} + +type harness struct { + *test.APIServer + backupper *kubernetesBackupper + log logrus.FieldLogger +} + +func (h *harness) addItems(t *testing.T, resource *test.APIResource) { + t.Helper() + + h.DiscoveryClient.WithAPIResource(resource) + require.NoError(t, h.backupper.discoveryHelper.Refresh()) + + for _, item := range resource.Items { + obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(item) + require.NoError(t, err) + + unstructuredObj := &unstructured.Unstructured{Object: obj} + + if resource.Namespaced { + _, err = h.DynamicClient.Resource(resource.GVR()).Namespace(item.GetNamespace()).Create(unstructuredObj, metav1.CreateOptions{}) + } else { + _, err = h.DynamicClient.Resource(resource.GVR()).Create(unstructuredObj, metav1.CreateOptions{}) + } + require.NoError(t, err) + } +} + +func newHarness(t *testing.T) *harness { + t.Helper() + + apiServer := test.NewAPIServer(t) + log := logrus.StandardLogger() + + discoveryHelper, err := discovery.NewHelper(apiServer.DiscoveryClient, log) + require.NoError(t, err) + + return &harness{ + APIServer: apiServer, + backupper: &kubernetesBackupper{ + dynamicFactory: client.NewDynamicFactory(apiServer.DynamicClient), + discoveryHelper: discoveryHelper, + groupBackupperFactory: new(defaultGroupBackupperFactory), + + // unsupported + podCommandExecutor: nil, + resticBackupperFactory: nil, + resticTimeout: 0, + }, + log: log, + } +} + +func newSnapshotLocation(ns, name, provider string) *velerov1.VolumeSnapshotLocation { + return &velerov1.VolumeSnapshotLocation{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + Spec: velerov1.VolumeSnapshotLocationSpec{ + Provider: provider, + }, + } +} + +func defaultBackup() *builder.BackupBuilder { + return builder.ForBackup(velerov1.DefaultNamespace, "backup-1") +} + +func toUnstructuredOrFail(t *testing.T, obj interface{}) map[string]interface{} { + t.Helper() + + res, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj) + require.NoError(t, err) + + return res +} + +// assertTarballContents verifies that the gzipped tarball stored in the provided +// backupFile contains exactly the file names specified. +func assertTarballContents(t *testing.T, backupFile io.Reader, items ...string) { + t.Helper() + + gzr, err := gzip.NewReader(backupFile) + require.NoError(t, err) + + r := tar.NewReader(gzr) + + var files []string + for { + hdr, err := r.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + + files = append(files, hdr.Name) + } + + sort.Strings(files) + sort.Strings(items) + assert.Equal(t, items, files) +} + +// unstructuredObject is a type alias to improve readability. +type unstructuredObject map[string]interface{} + +// assertTarballFileContents verifies that the gzipped tarball stored in the provided +// backupFile contains the files specified as keys in 'want', and for each of those +// files verifies that the content of the file is JSON and is equivalent to the JSON +// content stored as values in 'want'. +func assertTarballFileContents(t *testing.T, backupFile io.Reader, want map[string]unstructuredObject) { + t.Helper() + + gzr, err := gzip.NewReader(backupFile) + require.NoError(t, err) + + r := tar.NewReader(gzr) + items := make(map[string][]byte) + + for { + hdr, err := r.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + + bytes, err := ioutil.ReadAll(r) + require.NoError(t, err) + + items[hdr.Name] = bytes + } + + for name, wantItem := range want { + gotData, ok := items[name] + assert.True(t, ok, "did not find item %s in tarball", name) + if !ok { + continue + } + + // json-unmarshal the data from the tarball + var got unstructuredObject + err := json.Unmarshal(gotData, &got) + assert.NoError(t, err) + if err != nil { + continue + } + + assert.Equal(t, wantItem, got) + } +} + +// assertTarballOrdering ensures that resources were written to the tarball in the expected +// order. Any resources *not* in orderedResources are required to come *after* all resources +// in orderedResources, in any order. +func assertTarballOrdering(t *testing.T, backupFile io.Reader, orderedResources ...string) { + t.Helper() + + gzr, err := gzip.NewReader(backupFile) + require.NoError(t, err) + + r := tar.NewReader(gzr) + + // lastSeen tracks the index in 'orderedResources' of the last resource type + // we saw in the tarball. Once we've seen a resource in 'orderedResources', + // we should never see another instance of a prior resource. + lastSeen := 0 + + for { + hdr, err := r.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + + // ignore files like metadata/version + if !strings.HasPrefix(hdr.Name, "resources/") { + continue + } + + // get the resource name + parts := strings.Split(hdr.Name, "/") + require.True(t, len(parts) >= 2) + resourceName := parts[1] + + // Find the index in 'orderedResources' of the resource type for + // the current tar item, if it exists. This index ('current') *must* + // be greater than or equal to 'lastSeen', which was the last resource + // we saw, since otherwise the current resource would be out of order. By + // initializing current to len(ordered), we're saying that if the resource + // is not explicitly in orederedResources, then it must come *after* + // all orderedResources. + current := len(orderedResources) + for i, item := range orderedResources { + if item == resourceName { + current = i + break + } + } + + // the index of the current resource must be the same as or greater than the index of + // the last resource we saw for the backed-up order to be correct. + assert.True(t, current >= lastSeen, "%s was backed up out of order", resourceName) + lastSeen = current + } +} diff --git a/pkg/backup/builder.go b/pkg/backup/builder.go deleted file mode 100644 index 9763b1f3c20..00000000000 --- a/pkg/backup/builder.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -Copyright 2019 the Velero contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package backup - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" -) - -// Builder is a helper for concisely constructing Backup API objects. -type Builder struct { - backup velerov1api.Backup -} - -// NewBuilder returns a Builder for a Backup with no namespace/name. -func NewBuilder() *Builder { - return NewNamedBuilder("", "") -} - -// NewNamedBuilder returns a Builder for a Backup with the specified namespace -// and name. -func NewNamedBuilder(namespace, name string) *Builder { - return &Builder{ - backup: velerov1api.Backup{ - TypeMeta: metav1.TypeMeta{ - APIVersion: velerov1api.SchemeGroupVersion.String(), - Kind: "Backup", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: name, - }, - }, - } -} - -// Backup returns the built Backup API object. -func (b *Builder) Backup() *velerov1api.Backup { - return &b.backup -} - -// Namespace sets the Backup's namespace. -func (b *Builder) Namespace(namespace string) *Builder { - b.backup.Namespace = namespace - return b -} - -// Name sets the Backup's name. -func (b *Builder) Name(name string) *Builder { - b.backup.Name = name - return b -} - -// IncludedNamespaces sets the Backup's included namespaces. -func (b *Builder) IncludedNamespaces(namespaces ...string) *Builder { - b.backup.Spec.IncludedNamespaces = namespaces - return b -} - -// ExcludedNamespaces sets the Backup's excluded namespaces. -func (b *Builder) ExcludedNamespaces(namespaces ...string) *Builder { - b.backup.Spec.ExcludedNamespaces = namespaces - return b -} - -// IncludedResources sets the Backup's included resources. -func (b *Builder) IncludedResources(resources ...string) *Builder { - b.backup.Spec.IncludedResources = resources - return b -} - -// ExcludedResources sets the Backup's excluded resources. -func (b *Builder) ExcludedResources(resources ...string) *Builder { - b.backup.Spec.ExcludedResources = resources - return b -} - -// IncludeClusterResources sets the Backup's "include cluster resources" flag. -func (b *Builder) IncludeClusterResources(val bool) *Builder { - b.backup.Spec.IncludeClusterResources = &val - return b -} - -// LabelSelector sets the Backup's label selector. -func (b *Builder) LabelSelector(selector *metav1.LabelSelector) *Builder { - b.backup.Spec.LabelSelector = selector - return b -} - -// SnapshotVolumes sets the Backup's "snapshot volumes" flag. -func (b *Builder) SnapshotVolumes(val bool) *Builder { - b.backup.Spec.SnapshotVolumes = &val - return b -} diff --git a/pkg/backup/group_backupper.go b/pkg/backup/group_backupper.go index 237f96e6403..cee6ce8b302 100644 --- a/pkg/backup/group_backupper.go +++ b/pkg/backup/group_backupper.go @@ -37,7 +37,6 @@ type groupBackupperFactory interface { backupRequest *Request, dynamicFactory client.DynamicFactory, discoveryHelper discovery.Helper, - backedUpItems map[itemKey]struct{}, cohabitatingResources map[string]*cohabitatingResource, podCommandExecutor podexec.PodCommandExecutor, tarWriter tarWriter, @@ -54,7 +53,6 @@ func (f *defaultGroupBackupperFactory) newGroupBackupper( backupRequest *Request, dynamicFactory client.DynamicFactory, discoveryHelper discovery.Helper, - backedUpItems map[itemKey]struct{}, cohabitatingResources map[string]*cohabitatingResource, podCommandExecutor podexec.PodCommandExecutor, tarWriter tarWriter, @@ -67,7 +65,6 @@ func (f *defaultGroupBackupperFactory) newGroupBackupper( backupRequest: backupRequest, dynamicFactory: dynamicFactory, discoveryHelper: discoveryHelper, - backedUpItems: backedUpItems, cohabitatingResources: cohabitatingResources, podCommandExecutor: podCommandExecutor, tarWriter: tarWriter, @@ -88,7 +85,6 @@ type defaultGroupBackupper struct { backupRequest *Request dynamicFactory client.DynamicFactory discoveryHelper discovery.Helper - backedUpItems map[itemKey]struct{} cohabitatingResources map[string]*cohabitatingResource podCommandExecutor podexec.PodCommandExecutor tarWriter tarWriter @@ -120,7 +116,6 @@ func (gb *defaultGroupBackupper) backupGroup(group *metav1.APIResourceList) erro gb.backupRequest, gb.dynamicFactory, gb.discoveryHelper, - gb.backedUpItems, gb.cohabitatingResources, gb.podCommandExecutor, gb.tarWriter, diff --git a/pkg/backup/item_backupper.go b/pkg/backup/item_backupper.go index 6b4b8369812..2532ad5f03d 100644 --- a/pkg/backup/item_backupper.go +++ b/pkg/backup/item_backupper.go @@ -19,6 +19,7 @@ package backup import ( "archive/tar" "encoding/json" + "fmt" "path/filepath" "time" @@ -33,6 +34,7 @@ import ( kubeerrs "k8s.io/apimachinery/pkg/util/errors" api "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" "github.com/heptio/velero/pkg/client" "github.com/heptio/velero/pkg/discovery" "github.com/heptio/velero/pkg/kuberesource" @@ -45,7 +47,6 @@ import ( type itemBackupperFactory interface { newItemBackupper( backup *Request, - backedUpItems map[itemKey]struct{}, podCommandExecutor podexec.PodCommandExecutor, tarWriter tarWriter, dynamicFactory client.DynamicFactory, @@ -60,7 +61,6 @@ type defaultItemBackupperFactory struct{} func (f *defaultItemBackupperFactory) newItemBackupper( backupRequest *Request, - backedUpItems map[itemKey]struct{}, podCommandExecutor podexec.PodCommandExecutor, tarWriter tarWriter, dynamicFactory client.DynamicFactory, @@ -71,7 +71,6 @@ func (f *defaultItemBackupperFactory) newItemBackupper( ) ItemBackupper { ib := &defaultItemBackupper{ backupRequest: backupRequest, - backedUpItems: backedUpItems, tarWriter: tarWriter, dynamicFactory: dynamicFactory, discoveryHelper: discoveryHelper, @@ -96,7 +95,6 @@ type ItemBackupper interface { type defaultItemBackupper struct { backupRequest *Request - backedUpItems map[itemKey]struct{} tarWriter tarWriter dynamicFactory client.DynamicFactory discoveryHelper discovery.Helper @@ -148,17 +146,18 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim log.Info("Skipping item because it's being deleted.") return nil } + key := itemKey{ - resource: groupResource.String(), + resource: resourceKey(obj), namespace: namespace, name: name, } - if _, exists := ib.backedUpItems[key]; exists { + if _, exists := ib.backupRequest.BackedUpItems[key]; exists { log.Info("Skipping item because it's already been backed up.") return nil } - ib.backedUpItems[key] = struct{}{} + ib.backupRequest.BackedUpItems[key] = struct{}{} log.Info("Backing up item") @@ -206,6 +205,9 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim if metadata, err = meta.Accessor(obj); err != nil { return errors.WithStack(err) } + // update name and namespace in case they were modified in an action + name = metadata.GetName() + namespace = metadata.GetNamespace() if groupResource == kuberesource.PersistentVolumes { if err := ib.takePVSnapshot(obj, log); err != nil { @@ -214,15 +216,11 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim } if groupResource == kuberesource.Pods && pod != nil { - // this function will return partial results, so process volumeSnapshots + // this function will return partial results, so process podVolumeBackups // even if there are errors. - volumeSnapshots, errs := ib.backupPodVolumes(log, pod, resticVolumesToBackup) - - // annotate the pod with the successful volume snapshots - for volume, snapshot := range volumeSnapshots { - restic.SetPodSnapshotAnnotation(metadata, volume, snapshot) - } + podVolumeBackups, errs := ib.backupPodVolumes(log, pod, resticVolumesToBackup) + ib.backupRequest.PodVolumeBackups = append(ib.backupRequest.PodVolumeBackups, podVolumeBackups...) backupErrs = append(backupErrs, errs...) } @@ -266,9 +264,9 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim return nil } -// backupPodVolumes triggers restic backups of the specified pod volumes, and returns a map of volume name -> snapshot ID +// backupPodVolumes triggers restic backups of the specified pod volumes, and returns a list of PodVolumeBackups // for volumes that were successfully backed up, and a slice of any errors that were encountered. -func (ib *defaultItemBackupper) backupPodVolumes(log logrus.FieldLogger, pod *corev1api.Pod, volumes []string) (map[string]string, []error) { +func (ib *defaultItemBackupper) backupPodVolumes(log logrus.FieldLogger, pod *corev1api.Pod, volumes []string) ([]*velerov1api.PodVolumeBackup, []error) { if len(volumes) == 0 { return nil, nil } @@ -299,6 +297,11 @@ func (ib *defaultItemBackupper) executeActions( continue } + if namespace == "" && !action.namespaceIncludesExcludes.IncludeEverything() { + log.Debug("Skipping action because resource is cluster-scoped and action only applies to specific namespaces") + continue + } + if !action.selector.Matches(labels.Set(metadata.GetLabels())) { log.Debug("Skipping action because label selector does not match") continue @@ -433,10 +436,12 @@ func (ib *defaultItemBackupper) takePVSnapshot(obj runtime.Unstructured, log log log = log.WithField("volumeID", volumeID) - tags := map[string]string{ - "velero.io/backup": ib.backupRequest.Name, - "velero.io/pv": pv.Name, + tags := ib.backupRequest.GetLabels() + if tags == nil { + tags = map[string]string{} } + tags["velero.io/backup"] = ib.backupRequest.Name + tags["velero.io/pv"] = pv.Name log.Info("Getting volume information") volumeType, iops, err := volumeSnapshotter.GetVolumeInfo(volumeID, pvFailureDomainZone) @@ -479,3 +484,10 @@ func volumeSnapshot(backup *api.Backup, volumeName, volumeID, volumeType, az, lo }, } } + +// resourceKey returns a string representing the object's GroupVersionKind (e.g. +// apps/v1/Deployment). +func resourceKey(obj runtime.Unstructured) string { + gvk := obj.GetObjectKind().GroupVersionKind() + return fmt.Sprintf("%s/%s", gvk.GroupVersion().String(), gvk.Kind) +} diff --git a/pkg/backup/item_backupper_test.go b/pkg/backup/item_backupper_test.go index 5900a5890c3..7200e27d555 100644 --- a/pkg/backup/item_backupper_test.go +++ b/pkg/backup/item_backupper_test.go @@ -1,5 +1,5 @@ /* -Copyright 2017, 2019 the Velero contributors. +Copyright 2019 the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,342 +17,31 @@ limitations under the License. package backup import ( - "archive/tar" - "encoding/json" - "reflect" "testing" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" - v1 "github.com/heptio/velero/pkg/apis/velero/v1" - "github.com/heptio/velero/pkg/plugin/velero" - resticmocks "github.com/heptio/velero/pkg/restic/mocks" - "github.com/heptio/velero/pkg/util/collections" - velerotest "github.com/heptio/velero/pkg/util/test" + "github.com/heptio/velero/pkg/builder" ) -func TestBackupItemNoSkips(t *testing.T) { +func Test_resourceKey(t *testing.T) { tests := []struct { - name string - item string - namespaceIncludesExcludes *collections.IncludesExcludes - expectError bool - expectExcluded bool - expectedTarHeaderName string - tarWriteError bool - tarHeaderWriteError bool - groupResource string - snapshottableVolumes map[string]velerotest.VolumeBackupInfo - snapshotError error - trackedPVCs sets.String - expectedTrackedPVCs sets.String + resource metav1.Object + want string }{ - { - name: "tar header write error", - item: `{"metadata":{"name":"bar"},"spec":{"color":"green"},"status":{"foo":"bar"}}`, - expectError: true, - tarHeaderWriteError: true, - }, - { - name: "tar write error", - item: `{"metadata":{"name":"bar"},"spec":{"color":"green"},"status":{"foo":"bar"}}`, - expectError: true, - tarWriteError: true, - }, - { - name: "takePVSnapshot is not invoked for PVs when their claim is tracked in the restic PVC tracker", - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - item: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"claimRef": {"namespace": "pvc-ns", "name": "pvc"}, "awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`, - expectError: false, - expectExcluded: false, - expectedTarHeaderName: "resources/persistentvolumes/cluster/mypv.json", - groupResource: "persistentvolumes", - // empty snapshottableVolumes causes a volumeSnapshotter to be created, but no - // snapshots are expected to be taken. - snapshottableVolumes: map[string]velerotest.VolumeBackupInfo{}, - trackedPVCs: sets.NewString(key("pvc-ns", "pvc"), key("another-pvc-ns", "another-pvc")), - }, - { - name: "pod's restic PVC volume backups (only) are tracked", - item: `{"apiVersion": "v1", "kind": "Pod", "spec": {"volumes": [{"name": "volume-1", "persistentVolumeClaim": {"claimName": "bar"}},{"name": "volume-2", "persistentVolumeClaim": {"claimName": "baz"}},{"name": "volume-1", "emptyDir": {}}]}, "metadata":{"namespace":"foo","name":"bar", "annotations": {"backup.velero.io/backup-volumes": "volume-1,volume-2"}}}`, - namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), - groupResource: "pods", - expectError: false, - expectExcluded: false, - expectedTarHeaderName: "resources/pods/namespaces/foo/bar.json", - expectedTrackedPVCs: sets.NewString(key("foo", "bar"), key("foo", "baz")), - }, + {resource: builder.ForPod("default", "test").Result(), want: "v1/Pod"}, + {resource: builder.ForDeployment("default", "test").Result(), want: "apps/v1/Deployment"}, + {resource: builder.ForPersistentVolume("test").Result(), want: "v1/PersistentVolume"}, + {resource: builder.ForRole("default", "test").Result(), want: "rbac.authorization.k8s.io/v1/Role"}, } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var ( - backup = new(Request) - groupResource = schema.ParseGroupResource("resource.group") - backedUpItems = make(map[itemKey]struct{}) - w = &fakeTarWriter{} - ) - - backup.Backup = new(v1.Backup) - backup.NamespaceIncludesExcludes = collections.NewIncludesExcludes() - backup.ResourceIncludesExcludes = collections.NewIncludesExcludes() - backup.SnapshotLocations = []*v1.VolumeSnapshotLocation{ - newSnapshotLocation("velero", "default", "default"), - } - - if test.groupResource != "" { - groupResource = schema.ParseGroupResource(test.groupResource) - } - - item, err := velerotest.GetAsMap(test.item) - if err != nil { - t.Fatal(err) - } - - namespaces := test.namespaceIncludesExcludes - if namespaces == nil { - namespaces = collections.NewIncludesExcludes() - } - - if test.tarHeaderWriteError { - w.writeHeaderError = errors.New("error") - } - if test.tarWriteError { - w.writeError = errors.New("error") - } - - podCommandExecutor := &velerotest.MockPodCommandExecutor{} - defer podCommandExecutor.AssertExpectations(t) - - dynamicFactory := &velerotest.FakeDynamicFactory{} - defer dynamicFactory.AssertExpectations(t) - - discoveryHelper := velerotest.NewFakeDiscoveryHelper(true, nil) - - volumeSnapshotterGetter := make(volumeSnapshotterGetter) - - b := (&defaultItemBackupperFactory{}).newItemBackupper( - backup, - backedUpItems, - podCommandExecutor, - w, - dynamicFactory, - discoveryHelper, - nil, // restic backupper - newPVCSnapshotTracker(), - volumeSnapshotterGetter, - ).(*defaultItemBackupper) - - var volumeSnapshotter *velerotest.FakeVolumeSnapshotter - if test.snapshottableVolumes != nil { - volumeSnapshotter = &velerotest.FakeVolumeSnapshotter{ - SnapshottableVolumes: test.snapshottableVolumes, - VolumeID: "vol-abc123", - Error: test.snapshotError, - } - - volumeSnapshotterGetter["default"] = volumeSnapshotter - } - - if test.trackedPVCs != nil { - b.resticSnapshotTracker.pvcs = test.trackedPVCs - } - - // make sure the podCommandExecutor was set correctly in the real hook handler - assert.Equal(t, podCommandExecutor, b.itemHookHandler.(*defaultItemHookHandler).podCommandExecutor) - - itemHookHandler := &mockItemHookHandler{} - defer itemHookHandler.AssertExpectations(t) - b.itemHookHandler = itemHookHandler - - obj := &unstructured.Unstructured{Object: item} - itemHookHandler.On("handleHooks", mock.Anything, groupResource, obj, backup.ResourceHooks, hookPhasePre).Return(nil) - itemHookHandler.On("handleHooks", mock.Anything, groupResource, obj, backup.ResourceHooks, hookPhasePost).Return(nil) - - err = b.backupItem(velerotest.NewLogger(), obj, groupResource) - gotError := err != nil - if e, a := test.expectError, gotError; e != a { - t.Fatalf("error: expected %t, got %t: %v", e, a, err) - } - if test.expectError { - return - } - - if test.expectExcluded { - if len(w.headers) > 0 { - t.Errorf("unexpected header write") - } - if len(w.data) > 0 { - t.Errorf("unexpected data write") - } - return - } - - // Convert to JSON for comparing number of bytes to the tar header - itemJSON, err := json.Marshal(&item) - if err != nil { - t.Fatal(err) - } - require.Equal(t, 1, len(w.headers), "headers") - assert.Equal(t, test.expectedTarHeaderName, w.headers[0].Name, "header.name") - assert.Equal(t, int64(len(itemJSON)), w.headers[0].Size, "header.size") - assert.Equal(t, byte(tar.TypeReg), w.headers[0].Typeflag, "header.typeflag") - assert.Equal(t, int64(0755), w.headers[0].Mode, "header.mode") - assert.False(t, w.headers[0].ModTime.IsZero(), "header.modTime set") - assert.Equal(t, 1, len(w.data), "# of data") - - actual, err := velerotest.GetAsMap(string(w.data[0])) - if err != nil { - t.Fatal(err) - } - if e, a := item, actual; !reflect.DeepEqual(e, a) { - t.Errorf("data: expected %s, got %s", e, a) - } - - if test.snapshottableVolumes != nil { - require.Equal(t, len(test.snapshottableVolumes), len(volumeSnapshotter.SnapshotsTaken)) - } - - if len(test.snapshottableVolumes) > 0 { - require.Len(t, backup.VolumeSnapshots, 1) - snapshot := backup.VolumeSnapshots[0] - - assert.Equal(t, test.snapshottableVolumes["vol-abc123"].SnapshotID, snapshot.Status.ProviderSnapshotID) - assert.Equal(t, test.snapshottableVolumes["vol-abc123"].Type, snapshot.Spec.VolumeType) - assert.Equal(t, test.snapshottableVolumes["vol-abc123"].Iops, snapshot.Spec.VolumeIOPS) - assert.Equal(t, test.snapshottableVolumes["vol-abc123"].AvailabilityZone, snapshot.Spec.VolumeAZ) - } - - if test.expectedTrackedPVCs != nil { - require.Equal(t, len(test.expectedTrackedPVCs), len(b.resticSnapshotTracker.pvcs)) - - for key := range test.expectedTrackedPVCs { - assert.True(t, b.resticSnapshotTracker.pvcs.Has(key)) - } - } + for _, tt := range tests { + t.Run(tt.want, func(t *testing.T) { + content, _ := runtime.DefaultUnstructuredConverter.ToUnstructured(tt.resource) + unstructured := &unstructured.Unstructured{Object: content} + assert.Equal(t, tt.want, resourceKey(unstructured)) }) } } - -type addAnnotationAction struct{} - -func (a *addAnnotationAction) Execute(item runtime.Unstructured, backup *v1.Backup) (runtime.Unstructured, []velero.ResourceIdentifier, error) { - // since item actions run out-of-proc, do a deep-copy here to simulate passing data - // across a process boundary. - copy := item.(*unstructured.Unstructured).DeepCopy() - - metadata, err := meta.Accessor(copy) - if err != nil { - return copy, nil, nil - } - - annotations := metadata.GetAnnotations() - if annotations == nil { - annotations = make(map[string]string) - } - annotations["foo"] = "bar" - metadata.SetAnnotations(annotations) - - return copy, nil, nil -} - -func (a *addAnnotationAction) AppliesTo() (velero.ResourceSelector, error) { - panic("not implemented") -} - -func TestResticAnnotationsPersist(t *testing.T) { - var ( - w = &fakeTarWriter{} - obj = &unstructured.Unstructured{ - Object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "namespace": "myns", - "name": "bar", - "annotations": map[string]interface{}{ - "backup.velero.io/backup-volumes": "volume-1,volume-2", - }, - }, - }, - } - req = &Request{ - NamespaceIncludesExcludes: collections.NewIncludesExcludes(), - ResourceIncludesExcludes: collections.NewIncludesExcludes(), - ResolvedActions: []resolvedAction{ - { - BackupItemAction: &addAnnotationAction{}, - namespaceIncludesExcludes: collections.NewIncludesExcludes(), - resourceIncludesExcludes: collections.NewIncludesExcludes(), - selector: labels.Everything(), - }, - }, - } - resticBackupper = &resticmocks.Backupper{} - b = (&defaultItemBackupperFactory{}).newItemBackupper( - req, - make(map[itemKey]struct{}), - nil, - w, - &velerotest.FakeDynamicFactory{}, - velerotest.NewFakeDiscoveryHelper(true, nil), - resticBackupper, - newPVCSnapshotTracker(), - nil, - ).(*defaultItemBackupper) - ) - - resticBackupper. - On("BackupPodVolumes", mock.Anything, mock.Anything, mock.Anything). - Return(map[string]string{"volume-1": "snapshot-1", "volume-2": "snapshot-2"}, nil) - - // our expected backed-up object is the passed-in object, plus the annotation - // that the backup item action adds, plus the annotations that the restic - // backupper adds - expected := obj.DeepCopy() - annotations := expected.GetAnnotations() - if annotations == nil { - annotations = make(map[string]string) - } - annotations["foo"] = "bar" - annotations["snapshot.velero.io/volume-1"] = "snapshot-1" - annotations["snapshot.velero.io/volume-2"] = "snapshot-2" - expected.SetAnnotations(annotations) - - // method under test - require.NoError(t, b.backupItem(velerotest.NewLogger(), obj, schema.ParseGroupResource("pods"))) - - // get the actual backed-up item - require.Len(t, w.data, 1) - actual, err := velerotest.GetAsMap(string(w.data[0])) - require.NoError(t, err) - - assert.EqualValues(t, expected.Object, actual) -} - -type fakeTarWriter struct { - closeCalled bool - headers []*tar.Header - data [][]byte - writeHeaderError error - writeError error -} - -func (w *fakeTarWriter) Close() error { return nil } - -func (w *fakeTarWriter) Write(data []byte) (int, error) { - w.data = append(w.data, data) - return 0, w.writeError -} - -func (w *fakeTarWriter) WriteHeader(header *tar.Header) error { - w.headers = append(w.headers, header) - return w.writeHeaderError -} diff --git a/pkg/backup/item_hook_handler_test.go b/pkg/backup/item_hook_handler_test.go index e5180f68e92..9aa787e378c 100644 --- a/pkg/backup/item_hook_handler_test.go +++ b/pkg/backup/item_hook_handler_test.go @@ -32,8 +32,8 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" v1 "github.com/heptio/velero/pkg/apis/velero/v1" + velerotest "github.com/heptio/velero/pkg/test" "github.com/heptio/velero/pkg/util/collections" - velerotest "github.com/heptio/velero/pkg/util/test" ) type mockItemHookHandler struct { @@ -694,3 +694,11 @@ func TestResourceHookApplicableTo(t *testing.T) { }) } } + +func parseLabelSelectorOrDie(s string) labels.Selector { + ret, err := labels.Parse(s) + if err != nil { + panic(err) + } + return ret +} diff --git a/pkg/backup/pod_action_test.go b/pkg/backup/pod_action_test.go index c933aeb8c0b..59f1c6404a0 100644 --- a/pkg/backup/pod_action_test.go +++ b/pkg/backup/pod_action_test.go @@ -25,7 +25,7 @@ import ( "github.com/heptio/velero/pkg/kuberesource" "github.com/heptio/velero/pkg/plugin/velero" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) func TestPodActionAppliesTo(t *testing.T) { diff --git a/pkg/backup/request.go b/pkg/backup/request.go index d405ea60698..a44c8f00ed8 100644 --- a/pkg/backup/request.go +++ b/pkg/backup/request.go @@ -1,11 +1,36 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package backup import ( + "fmt" + "sort" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" "github.com/heptio/velero/pkg/util/collections" "github.com/heptio/velero/pkg/volume" ) +type itemKey struct { + resource string + namespace string + name string +} + // Request is a request for a backup, with all references to other objects // materialized (e.g. backup/snapshot locations, includes/excludes, etc.) type Request struct { @@ -18,5 +43,27 @@ type Request struct { ResourceHooks []resourceHook ResolvedActions []resolvedAction - VolumeSnapshots []*volume.Snapshot + VolumeSnapshots []*volume.Snapshot + PodVolumeBackups []*velerov1api.PodVolumeBackup + BackedUpItems map[itemKey]struct{} +} + +// BackupResourceList returns the list of backed up resources grouped by the API +// Version and Kind +func (r *Request) BackupResourceList() map[string][]string { + resources := map[string][]string{} + for i := range r.BackedUpItems { + entry := i.name + if i.namespace != "" { + entry = fmt.Sprintf("%s/%s", i.namespace, i.name) + } + resources[i.resource] = append(resources[i.resource], entry) + } + + // sort namespace/name entries for each GVK + for _, v := range resources { + sort.Strings(v) + } + + return resources } diff --git a/pkg/backup/request_test.go b/pkg/backup/request_test.go new file mode 100644 index 00000000000..9b04f0b533f --- /dev/null +++ b/pkg/backup/request_test.go @@ -0,0 +1,82 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRequest_BackupResourceList(t *testing.T) { + items := []itemKey{ + { + resource: "apps/v1/Deployment", + name: "my-deploy", + namespace: "default", + }, + { + resource: "v1/Pod", + name: "pod1", + namespace: "ns1", + }, + { + resource: "v1/Pod", + name: "pod2", + namespace: "ns2", + }, + { + resource: "v1/PersistentVolume", + name: "my-pv", + }, + } + backedUpItems := map[itemKey]struct{}{} + for _, it := range items { + backedUpItems[it] = struct{}{} + } + + req := Request{BackedUpItems: backedUpItems} + assert.Equal(t, map[string][]string{ + "apps/v1/Deployment": {"default/my-deploy"}, + "v1/Pod": {"ns1/pod1", "ns2/pod2"}, + "v1/PersistentVolume": {"my-pv"}, + }, req.BackupResourceList()) +} + +func TestRequest_BackupResourceListEntriesSorted(t *testing.T) { + items := []itemKey{ + { + resource: "v1/Pod", + name: "pod2", + namespace: "ns2", + }, + { + resource: "v1/Pod", + name: "pod1", + namespace: "ns1", + }, + } + backedUpItems := map[itemKey]struct{}{} + for _, it := range items { + backedUpItems[it] = struct{}{} + } + + req := Request{BackedUpItems: backedUpItems} + assert.Equal(t, map[string][]string{ + "v1/Pod": {"ns1/pod1", "ns2/pod2"}, + }, req.BackupResourceList()) +} diff --git a/pkg/backup/resource_backupper.go b/pkg/backup/resource_backupper.go index 86d1ca8a496..771b4494f8d 100644 --- a/pkg/backup/resource_backupper.go +++ b/pkg/backup/resource_backupper.go @@ -40,7 +40,6 @@ type resourceBackupperFactory interface { backupRequest *Request, dynamicFactory client.DynamicFactory, discoveryHelper discovery.Helper, - backedUpItems map[itemKey]struct{}, cohabitatingResources map[string]*cohabitatingResource, podCommandExecutor podexec.PodCommandExecutor, tarWriter tarWriter, @@ -57,7 +56,6 @@ func (f *defaultResourceBackupperFactory) newResourceBackupper( backupRequest *Request, dynamicFactory client.DynamicFactory, discoveryHelper discovery.Helper, - backedUpItems map[itemKey]struct{}, cohabitatingResources map[string]*cohabitatingResource, podCommandExecutor podexec.PodCommandExecutor, tarWriter tarWriter, @@ -70,7 +68,6 @@ func (f *defaultResourceBackupperFactory) newResourceBackupper( backupRequest: backupRequest, dynamicFactory: dynamicFactory, discoveryHelper: discoveryHelper, - backedUpItems: backedUpItems, cohabitatingResources: cohabitatingResources, podCommandExecutor: podCommandExecutor, tarWriter: tarWriter, @@ -91,7 +88,6 @@ type defaultResourceBackupper struct { backupRequest *Request dynamicFactory client.DynamicFactory discoveryHelper discovery.Helper - backedUpItems map[itemKey]struct{} cohabitatingResources map[string]*cohabitatingResource podCommandExecutor podexec.PodCommandExecutor tarWriter tarWriter @@ -156,7 +152,6 @@ func (rb *defaultResourceBackupper) backupResource(group *metav1.APIResourceList itemBackupper := rb.itemBackupperFactory.newItemBackupper( rb.backupRequest, - rb.backedUpItems, rb.podCommandExecutor, rb.tarWriter, rb.dynamicFactory, @@ -221,9 +216,9 @@ func (rb *defaultResourceBackupper) backupResource(group *metav1.APIResourceList continue } - var labelSelector string + labelSelector := "velero.io/exclude-from-backup!=true" if selector := rb.backupRequest.Spec.LabelSelector; selector != nil { - labelSelector = metav1.FormatLabelSelector(selector) + labelSelector = labelSelector + "," + metav1.FormatLabelSelector(selector) } log.Info("Listing items") diff --git a/pkg/backup/service_account_action_test.go b/pkg/backup/service_account_action_test.go index 152fee3090a..44f723aca08 100644 --- a/pkg/backup/service_account_action_test.go +++ b/pkg/backup/service_account_action_test.go @@ -30,7 +30,7 @@ import ( "github.com/heptio/velero/pkg/kuberesource" "github.com/heptio/velero/pkg/plugin/velero" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) func newV1ClusterRoleBindingList(rbacCRBList []rbac.ClusterRoleBinding) []ClusterRoleBinding { diff --git a/pkg/builder/backup_builder.go b/pkg/builder/backup_builder.go new file mode 100644 index 00000000000..4f3a4f27dc6 --- /dev/null +++ b/pkg/builder/backup_builder.go @@ -0,0 +1,171 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" +) + +/* + +Example usage: + +var backup = builder.ForBackup("velero", "backup-1"). + ObjectMeta( + builder.WithLabels("foo", "bar"), + builder.WithClusterName("cluster-1"), + ). + SnapshotVolumes(true). + Result() + +*/ + +// BackupBuilder builds Backup objects. +type BackupBuilder struct { + object *velerov1api.Backup +} + +// ForBackup is the constructor for a BackupBuilder. +func ForBackup(ns, name string) *BackupBuilder { + return &BackupBuilder{ + object: &velerov1api.Backup{ + TypeMeta: metav1.TypeMeta{ + APIVersion: velerov1api.SchemeGroupVersion.String(), + Kind: "Backup", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + }, + } +} + +// Result returns the built Backup. +func (b *BackupBuilder) Result() *velerov1api.Backup { + return b.object +} + +// ObjectMeta applies functional options to the Backup's ObjectMeta. +func (b *BackupBuilder) ObjectMeta(opts ...ObjectMetaOpt) *BackupBuilder { + for _, opt := range opts { + opt(b.object) + } + + return b +} + +// FromSchedule sets the Backup's spec and labels from the Schedule template +func (b *BackupBuilder) FromSchedule(schedule *velerov1api.Schedule) *BackupBuilder { + labels := schedule.Labels + if labels == nil { + labels = make(map[string]string) + } + labels[velerov1api.ScheduleNameLabel] = schedule.Name + + b.object.Spec = schedule.Spec.Template + b.ObjectMeta(WithLabelsMap(labels)) + return b +} + +// IncludedNamespaces sets the Backup's included namespaces. +func (b *BackupBuilder) IncludedNamespaces(namespaces ...string) *BackupBuilder { + b.object.Spec.IncludedNamespaces = namespaces + return b +} + +// ExcludedNamespaces sets the Backup's excluded namespaces. +func (b *BackupBuilder) ExcludedNamespaces(namespaces ...string) *BackupBuilder { + b.object.Spec.ExcludedNamespaces = namespaces + return b +} + +// IncludedResources sets the Backup's included resources. +func (b *BackupBuilder) IncludedResources(resources ...string) *BackupBuilder { + b.object.Spec.IncludedResources = resources + return b +} + +// ExcludedResources sets the Backup's excluded resources. +func (b *BackupBuilder) ExcludedResources(resources ...string) *BackupBuilder { + b.object.Spec.ExcludedResources = resources + return b +} + +// IncludeClusterResources sets the Backup's "include cluster resources" flag. +func (b *BackupBuilder) IncludeClusterResources(val bool) *BackupBuilder { + b.object.Spec.IncludeClusterResources = &val + return b +} + +// LabelSelector sets the Backup's label selector. +func (b *BackupBuilder) LabelSelector(selector *metav1.LabelSelector) *BackupBuilder { + b.object.Spec.LabelSelector = selector + return b +} + +// SnapshotVolumes sets the Backup's "snapshot volumes" flag. +func (b *BackupBuilder) SnapshotVolumes(val bool) *BackupBuilder { + b.object.Spec.SnapshotVolumes = &val + return b +} + +// Phase sets the Backup's phase. +func (b *BackupBuilder) Phase(phase velerov1api.BackupPhase) *BackupBuilder { + b.object.Status.Phase = phase + return b +} + +// StorageLocation sets the Backup's storage location. +func (b *BackupBuilder) StorageLocation(location string) *BackupBuilder { + b.object.Spec.StorageLocation = location + return b +} + +// VolumeSnapshotLocations sets the Backup's volume snapshot locations. +func (b *BackupBuilder) VolumeSnapshotLocations(locations ...string) *BackupBuilder { + b.object.Spec.VolumeSnapshotLocations = locations + return b +} + +// TTL sets the Backup's TTL. +func (b *BackupBuilder) TTL(ttl time.Duration) *BackupBuilder { + b.object.Spec.TTL.Duration = ttl + return b +} + +// Expiration sets the Backup's expiration. +func (b *BackupBuilder) Expiration(val time.Time) *BackupBuilder { + b.object.Status.Expiration.Time = val + return b +} + +// StartTimestamp sets the Backup's start timestamp. +func (b *BackupBuilder) StartTimestamp(val time.Time) *BackupBuilder { + b.object.Status.StartTimestamp.Time = val + return b +} + +// Hooks sets the Backup's hooks. +func (b *BackupBuilder) Hooks(hooks velerov1api.BackupHooks) *BackupBuilder { + b.object.Spec.Hooks = hooks + return b +} diff --git a/pkg/builder/backup_storage_location_builder.go b/pkg/builder/backup_storage_location_builder.go new file mode 100644 index 00000000000..30d4926b685 --- /dev/null +++ b/pkg/builder/backup_storage_location_builder.go @@ -0,0 +1,88 @@ +/* +Copyright 2017, 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" +) + +// BackupStorageLocationBuilder builds BackupStorageLocation objects. +type BackupStorageLocationBuilder struct { + object *velerov1api.BackupStorageLocation +} + +// ForBackupStorageLocation is the constructor for a BackupStorageLocationBuilder. +func ForBackupStorageLocation(ns, name string) *BackupStorageLocationBuilder { + return &BackupStorageLocationBuilder{ + object: &velerov1api.BackupStorageLocation{ + TypeMeta: metav1.TypeMeta{ + APIVersion: velerov1api.SchemeGroupVersion.String(), + Kind: "BackupStorageLocation", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + }, + } +} + +// Result returns the built BackupStorageLocation. +func (b *BackupStorageLocationBuilder) Result() *velerov1api.BackupStorageLocation { + return b.object +} + +// ObjectMeta applies functional options to the BackupStorageLocation's ObjectMeta. +func (b *BackupStorageLocationBuilder) ObjectMeta(opts ...ObjectMetaOpt) *BackupStorageLocationBuilder { + for _, opt := range opts { + opt(b.object) + } + + return b +} + +// Provider sets the BackupStorageLocation's provider. +func (b *BackupStorageLocationBuilder) Provider(name string) *BackupStorageLocationBuilder { + b.object.Spec.Provider = name + return b +} + +// Bucket sets the BackupStorageLocation's object storage bucket. +func (b *BackupStorageLocationBuilder) Bucket(val string) *BackupStorageLocationBuilder { + if b.object.Spec.StorageType.ObjectStorage == nil { + b.object.Spec.StorageType.ObjectStorage = new(velerov1api.ObjectStorageLocation) + } + b.object.Spec.ObjectStorage.Bucket = val + return b +} + +// Prefix sets the BackupStorageLocation's object storage prefix. +func (b *BackupStorageLocationBuilder) Prefix(val string) *BackupStorageLocationBuilder { + if b.object.Spec.StorageType.ObjectStorage == nil { + b.object.Spec.StorageType.ObjectStorage = new(velerov1api.ObjectStorageLocation) + } + b.object.Spec.ObjectStorage.Prefix = val + return b +} + +// AccessMode sets the BackupStorageLocation's access mode. +func (b *BackupStorageLocationBuilder) AccessMode(accessMode velerov1api.BackupStorageLocationAccessMode) *BackupStorageLocationBuilder { + b.object.Spec.AccessMode = accessMode + return b +} diff --git a/pkg/builder/config_map_builder.go b/pkg/builder/config_map_builder.go new file mode 100644 index 00000000000..49881151273 --- /dev/null +++ b/pkg/builder/config_map_builder.go @@ -0,0 +1,63 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ConfigMapBuilder builds ConfigMap objects. +type ConfigMapBuilder struct { + object *corev1api.ConfigMap +} + +// ForConfigMap is the constructor for a ConfigMapBuilder. +func ForConfigMap(ns, name string) *ConfigMapBuilder { + return &ConfigMapBuilder{ + object: &corev1api.ConfigMap{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1api.SchemeGroupVersion.String(), + Kind: "ConfigMap", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + }, + } +} + +// Result returns the built ConfigMap. +func (b *ConfigMapBuilder) Result() *corev1api.ConfigMap { + return b.object +} + +// ObjectMeta applies functional options to the ConfigMap's ObjectMeta. +func (b *ConfigMapBuilder) ObjectMeta(opts ...ObjectMetaOpt) *ConfigMapBuilder { + for _, opt := range opts { + opt(b.object) + } + + return b +} + +// Data set's the ConfigMap's data. +func (b *ConfigMapBuilder) Data(vals ...string) *ConfigMapBuilder { + b.object.Data = setMapEntries(b.object.Data, vals...) + return b +} diff --git a/pkg/builder/container_builder.go b/pkg/builder/container_builder.go new file mode 100644 index 00000000000..84834776346 --- /dev/null +++ b/pkg/builder/container_builder.go @@ -0,0 +1,68 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + corev1api "k8s.io/api/core/v1" +) + +// ContainerBuilder builds Container objects +type ContainerBuilder struct { + object *corev1api.Container +} + +// ForContainer is the constructor for ContainerBuilder. +func ForContainer(name, image string) *ContainerBuilder { + return &ContainerBuilder{ + object: &corev1api.Container{ + Name: name, + Image: image, + }, + } +} + +// Result returns the built Container. +func (b *ContainerBuilder) Result() *corev1api.Container { + return b.object +} + +// Args sets the container's Args. +func (b *ContainerBuilder) Args(args ...string) *ContainerBuilder { + b.object.Args = append(b.object.Args, args...) + return b +} + +// VolumeMounts sets the container's VolumeMounts. +func (b *ContainerBuilder) VolumeMounts(volumeMounts ...*corev1api.VolumeMount) *ContainerBuilder { + for _, v := range volumeMounts { + b.object.VolumeMounts = append(b.object.VolumeMounts, *v) + } + return b +} + +// Resources sets the container's Resources. +func (b *ContainerBuilder) Resources(resources *corev1api.ResourceRequirements) *ContainerBuilder { + b.object.Resources = *resources + return b +} + +func (b *ContainerBuilder) Env(vars ...*corev1api.EnvVar) *ContainerBuilder { + for _, v := range vars { + b.object.Env = append(b.object.Env, *v) + } + return b +} diff --git a/pkg/builder/deployment_builder.go b/pkg/builder/deployment_builder.go new file mode 100644 index 00000000000..294d2f70596 --- /dev/null +++ b/pkg/builder/deployment_builder.go @@ -0,0 +1,57 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + appsv1api "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// DeploymentBuilder builds Deployment objects. +type DeploymentBuilder struct { + object *appsv1api.Deployment +} + +// ForDeployment is the constructor for a DeploymentBuilder. +func ForDeployment(ns, name string) *DeploymentBuilder { + return &DeploymentBuilder{ + object: &appsv1api.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: appsv1api.SchemeGroupVersion.String(), + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + }, + } +} + +// Result returns the built Deployment. +func (b *DeploymentBuilder) Result() *appsv1api.Deployment { + return b.object +} + +// ObjectMeta applies functional options to the Deployment's ObjectMeta. +func (b *DeploymentBuilder) ObjectMeta(opts ...ObjectMetaOpt) *DeploymentBuilder { + for _, opt := range opts { + opt(b.object) + } + + return b +} diff --git a/pkg/builder/namespace_builder.go b/pkg/builder/namespace_builder.go new file mode 100644 index 00000000000..6f6de9a2fdd --- /dev/null +++ b/pkg/builder/namespace_builder.go @@ -0,0 +1,62 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NamespaceBuilder builds Namespace objects. +type NamespaceBuilder struct { + object *corev1api.Namespace +} + +// ForNamespace is the constructor for a NamespaceBuilder. +func ForNamespace(name string) *NamespaceBuilder { + return &NamespaceBuilder{ + object: &corev1api.Namespace{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1api.SchemeGroupVersion.String(), + Kind: "Namespace", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + }, + } +} + +// Result returns the built Namespace. +func (b *NamespaceBuilder) Result() *corev1api.Namespace { + return b.object +} + +// ObjectMeta applies functional options to the Namespace's ObjectMeta. +func (b *NamespaceBuilder) ObjectMeta(opts ...ObjectMetaOpt) *NamespaceBuilder { + for _, opt := range opts { + opt(b.object) + } + + return b +} + +// Phase sets the namespace's phase +func (b *NamespaceBuilder) Phase(val corev1api.NamespacePhase) *NamespaceBuilder { + b.object.Status.Phase = val + return b +} diff --git a/pkg/builder/object_meta.go b/pkg/builder/object_meta.go new file mode 100644 index 00000000000..931da8034f9 --- /dev/null +++ b/pkg/builder/object_meta.go @@ -0,0 +1,130 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// ObjectMetaOpt is a functional option for ObjectMeta. +type ObjectMetaOpt func(metav1.Object) + +// WithName is a functional option that applies the specified +// name to an object. +func WithName(val string) func(obj metav1.Object) { + return func(obj metav1.Object) { + obj.SetName(val) + } +} + +// WithLabels is a functional option that applies the specified +// label keys/values to an object. +func WithLabels(vals ...string) func(obj metav1.Object) { + return func(obj metav1.Object) { + obj.SetLabels(setMapEntries(obj.GetLabels(), vals...)) + } +} + +// WithLabelsMap is a functional option that applies the specified labels map to +// an object. +func WithLabelsMap(labels map[string]string) func(obj metav1.Object) { + return func(obj metav1.Object) { + objLabels := obj.GetLabels() + if objLabels == nil { + objLabels = make(map[string]string) + } + + // If the label already exists in the object, it will be overwritten + for k, v := range labels { + objLabels[k] = v + } + + obj.SetLabels(objLabels) + } +} + +// WithAnnotations is a functional option that applies the specified +// annotation keys/values to an object. +func WithAnnotations(vals ...string) func(obj metav1.Object) { + return func(obj metav1.Object) { + obj.SetAnnotations(setMapEntries(obj.GetAnnotations(), vals...)) + } +} + +func setMapEntries(m map[string]string, vals ...string) map[string]string { + if m == nil { + m = make(map[string]string) + } + + // if we don't have a value for every key, add an empty + // string at the end to serve as the value for the last + // key. + if len(vals)%2 != 0 { + vals = append(vals, "") + } + + for i := 0; i < len(vals); i += 2 { + key := vals[i] + val := vals[i+1] + + // If the label already exists in the object, it will be overwritten + m[key] = val + } + + return m +} + +// WithClusterName is a functional option that applies the specified +// cluster name to an object. +func WithClusterName(val string) func(obj metav1.Object) { + return func(obj metav1.Object) { + obj.SetClusterName(val) + } +} + +// WithFinalizers is a functional option that applies the specified +// finalizers to an object. +func WithFinalizers(vals ...string) func(obj metav1.Object) { + return func(obj metav1.Object) { + obj.SetFinalizers(vals) + } +} + +// WithDeletionTimestamp is a functional option that applies the specified +// deletion timestamp to an object. +func WithDeletionTimestamp(val time.Time) func(obj metav1.Object) { + return func(obj metav1.Object) { + obj.SetDeletionTimestamp(&metav1.Time{Time: val}) + } +} + +// WithUID is a functional option that applies the specified UID to an object. +func WithUID(val string) func(obj metav1.Object) { + return func(obj metav1.Object) { + obj.SetUID(types.UID(val)) + } +} + +// WithGenerateName is a functional option that applies the specified generate name to an object. +func WithGenerateName(val string) func(obj metav1.Object) { + return func(obj metav1.Object) { + obj.SetGenerateName(val) + } +} diff --git a/pkg/builder/persistent_volume_builder.go b/pkg/builder/persistent_volume_builder.go new file mode 100644 index 00000000000..648778bae08 --- /dev/null +++ b/pkg/builder/persistent_volume_builder.go @@ -0,0 +1,96 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PersistentVolumeBuilder builds PersistentVolume objects. +type PersistentVolumeBuilder struct { + object *corev1api.PersistentVolume +} + +// ForPersistentVolume is the constructor for a PersistentVolumeBuilder. +func ForPersistentVolume(name string) *PersistentVolumeBuilder { + return &PersistentVolumeBuilder{ + object: &corev1api.PersistentVolume{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1api.SchemeGroupVersion.String(), + Kind: "PersistentVolume", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + }, + } +} + +// Result returns the built PersistentVolume. +func (b *PersistentVolumeBuilder) Result() *corev1api.PersistentVolume { + return b.object +} + +// ObjectMeta applies functional options to the PersistentVolume's ObjectMeta. +func (b *PersistentVolumeBuilder) ObjectMeta(opts ...ObjectMetaOpt) *PersistentVolumeBuilder { + for _, opt := range opts { + opt(b.object) + } + + return b +} + +// ReclaimPolicy sets the PersistentVolume's reclaim policy. +func (b *PersistentVolumeBuilder) ReclaimPolicy(policy corev1api.PersistentVolumeReclaimPolicy) *PersistentVolumeBuilder { + b.object.Spec.PersistentVolumeReclaimPolicy = policy + return b +} + +// ClaimRef sets the PersistentVolume's claim ref. +func (b *PersistentVolumeBuilder) ClaimRef(ns, name string) *PersistentVolumeBuilder { + b.object.Spec.ClaimRef = &corev1api.ObjectReference{ + Namespace: ns, + Name: name, + } + return b +} + +// AWSEBSVolumeID sets the PersistentVolume's AWSElasticBlockStore volume ID. +func (b *PersistentVolumeBuilder) AWSEBSVolumeID(volumeID string) *PersistentVolumeBuilder { + if b.object.Spec.AWSElasticBlockStore == nil { + b.object.Spec.AWSElasticBlockStore = new(corev1api.AWSElasticBlockStoreVolumeSource) + } + b.object.Spec.AWSElasticBlockStore.VolumeID = volumeID + return b +} + +// CSI sets the PersistentVolume's CSI. +func (b *PersistentVolumeBuilder) CSI(driver, volumeHandle string) *PersistentVolumeBuilder { + if b.object.Spec.CSI == nil { + b.object.Spec.CSI = new(corev1api.CSIPersistentVolumeSource) + } + b.object.Spec.CSI.Driver = driver + b.object.Spec.CSI.VolumeHandle = volumeHandle + return b +} + +// StorageClass sets the PersistentVolume's storage class name. +func (b *PersistentVolumeBuilder) StorageClass(name string) *PersistentVolumeBuilder { + b.object.Spec.StorageClassName = name + return b +} diff --git a/pkg/builder/persistent_volume_claim_builder.go b/pkg/builder/persistent_volume_claim_builder.go new file mode 100644 index 00000000000..a96cdcfa0a2 --- /dev/null +++ b/pkg/builder/persistent_volume_claim_builder.go @@ -0,0 +1,69 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PersistentVolumeClaimBuilder builds PersistentVolumeClaim objects. +type PersistentVolumeClaimBuilder struct { + object *corev1api.PersistentVolumeClaim +} + +// ForPersistentVolumeClaim is the constructor for a PersistentVolumeClaimBuilder. +func ForPersistentVolumeClaim(ns, name string) *PersistentVolumeClaimBuilder { + return &PersistentVolumeClaimBuilder{ + object: &corev1api.PersistentVolumeClaim{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1api.SchemeGroupVersion.String(), + Kind: "PersistentVolumeClaim", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + }, + } +} + +// Result returns the built PersistentVolumeClaim. +func (b *PersistentVolumeClaimBuilder) Result() *corev1api.PersistentVolumeClaim { + return b.object +} + +// ObjectMeta applies functional options to the PersistentVolumeClaim's ObjectMeta. +func (b *PersistentVolumeClaimBuilder) ObjectMeta(opts ...ObjectMetaOpt) *PersistentVolumeClaimBuilder { + for _, opt := range opts { + opt(b.object) + } + + return b +} + +// VolumeName sets the PersistentVolumeClaim's volume name. +func (b *PersistentVolumeClaimBuilder) VolumeName(name string) *PersistentVolumeClaimBuilder { + b.object.Spec.VolumeName = name + return b +} + +// StorageClass sets the PersistentVolumeClaim's storage class name. +func (b *PersistentVolumeClaimBuilder) StorageClass(name string) *PersistentVolumeClaimBuilder { + b.object.Spec.StorageClassName = &name + return b +} diff --git a/pkg/builder/pod_builder.go b/pkg/builder/pod_builder.go new file mode 100644 index 00000000000..77c6be04399 --- /dev/null +++ b/pkg/builder/pod_builder.go @@ -0,0 +1,78 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PodBuilder builds Pod objects. +type PodBuilder struct { + object *corev1api.Pod +} + +// ForPod is the constructor for a PodBuilder. +func ForPod(ns, name string) *PodBuilder { + return &PodBuilder{ + object: &corev1api.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1api.SchemeGroupVersion.String(), + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + }, + } +} + +// Result returns the built Pod. +func (b *PodBuilder) Result() *corev1api.Pod { + return b.object +} + +// ObjectMeta applies functional options to the Pod's ObjectMeta. +func (b *PodBuilder) ObjectMeta(opts ...ObjectMetaOpt) *PodBuilder { + for _, opt := range opts { + opt(b.object) + } + + return b +} + +// Volumes appends to the pod's volumes +func (b *PodBuilder) Volumes(volumes ...*corev1api.Volume) *PodBuilder { + for _, v := range volumes { + b.object.Spec.Volumes = append(b.object.Spec.Volumes, *v) + } + return b +} + +// NodeName sets the pod's node name +func (b *PodBuilder) NodeName(val string) *PodBuilder { + b.object.Spec.NodeName = val + return b +} + +func (b *PodBuilder) InitContainers(containers ...*corev1api.Container) *PodBuilder { + for _, c := range containers { + b.object.Spec.InitContainers = append(b.object.Spec.InitContainers, *c) + } + return b +} diff --git a/pkg/builder/pod_volume_backup_builder.go b/pkg/builder/pod_volume_backup_builder.go new file mode 100644 index 00000000000..08ffd169abb --- /dev/null +++ b/pkg/builder/pod_volume_backup_builder.go @@ -0,0 +1,82 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" +) + +// PodVolumeBackupBuilder builds PodVolumeBackup objects +type PodVolumeBackupBuilder struct { + object *velerov1api.PodVolumeBackup +} + +// ForPodVolumeBackup is the constructor for a PodVolumeBackupBuilder. +func ForPodVolumeBackup(ns, name string) *PodVolumeBackupBuilder { + return &PodVolumeBackupBuilder{ + object: &velerov1api.PodVolumeBackup{ + TypeMeta: metav1.TypeMeta{ + APIVersion: velerov1api.SchemeGroupVersion.String(), + Kind: "PodVolumeBackup", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + }, + } +} + +// Result returns the built PodVolumeBackup. +func (b *PodVolumeBackupBuilder) Result() *velerov1api.PodVolumeBackup { + return b.object +} + +// ObjectMeta applies functional options to the PodVolumeBackup's ObjectMeta. +func (b *PodVolumeBackupBuilder) ObjectMeta(opts ...ObjectMetaOpt) *PodVolumeBackupBuilder { + for _, opt := range opts { + opt(b.object) + } + + return b +} + +// Phase sets the PodVolumeBackup's phase. +func (b *PodVolumeBackupBuilder) Phase(phase velerov1api.PodVolumeBackupPhase) *PodVolumeBackupBuilder { + b.object.Status.Phase = phase + return b +} + +// SnapshotID sets the PodVolumeBackup's snapshot ID. +func (b *PodVolumeBackupBuilder) SnapshotID(snapshotID string) *PodVolumeBackupBuilder { + b.object.Status.SnapshotID = snapshotID + return b +} + +// PodName sets the name of the pod associated with this PodVolumeBackup. +func (b *PodVolumeBackupBuilder) PodName(name string) *PodVolumeBackupBuilder { + b.object.Spec.Pod.Name = name + return b +} + +// Volume sets the name of the volume associated with this PodVolumeBackup. +func (b *PodVolumeBackupBuilder) Volume(volume string) *PodVolumeBackupBuilder { + b.object.Spec.Volume = volume + return b +} diff --git a/pkg/builder/restore_builder.go b/pkg/builder/restore_builder.go new file mode 100644 index 00000000000..e1a8fe0e8a2 --- /dev/null +++ b/pkg/builder/restore_builder.go @@ -0,0 +1,135 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" +) + +// RestoreBuilder builds Restore objects. +type RestoreBuilder struct { + object *velerov1api.Restore +} + +// ForRestore is the constructor for a RestoreBuilder. +func ForRestore(ns, name string) *RestoreBuilder { + return &RestoreBuilder{ + object: &velerov1api.Restore{ + TypeMeta: metav1.TypeMeta{ + APIVersion: velerov1api.SchemeGroupVersion.String(), + Kind: "Restore", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + }, + } +} + +// Result returns the built Restore. +func (b *RestoreBuilder) Result() *velerov1api.Restore { + return b.object +} + +// ObjectMeta applies functional options to the Restore's ObjectMeta. +func (b *RestoreBuilder) ObjectMeta(opts ...ObjectMetaOpt) *RestoreBuilder { + for _, opt := range opts { + opt(b.object) + } + + return b +} + +// Backup sets the Restore's backup name. +func (b *RestoreBuilder) Backup(name string) *RestoreBuilder { + b.object.Spec.BackupName = name + return b +} + +// Schedule sets the Restore's schedule name. +func (b *RestoreBuilder) Schedule(name string) *RestoreBuilder { + b.object.Spec.ScheduleName = name + return b +} + +// IncludedNamespaces appends to the Restore's included namespaces. +func (b *RestoreBuilder) IncludedNamespaces(namespaces ...string) *RestoreBuilder { + b.object.Spec.IncludedNamespaces = append(b.object.Spec.IncludedNamespaces, namespaces...) + return b +} + +// ExcludedNamespaces appends to the Restore's excluded namespaces. +func (b *RestoreBuilder) ExcludedNamespaces(namespaces ...string) *RestoreBuilder { + b.object.Spec.ExcludedNamespaces = append(b.object.Spec.ExcludedNamespaces, namespaces...) + return b +} + +// IncludedResources appends to the Restore's included resources. +func (b *RestoreBuilder) IncludedResources(resources ...string) *RestoreBuilder { + b.object.Spec.IncludedResources = append(b.object.Spec.IncludedResources, resources...) + return b +} + +// ExcludedResources appends to the Restore's excluded resources. +func (b *RestoreBuilder) ExcludedResources(resources ...string) *RestoreBuilder { + b.object.Spec.ExcludedResources = append(b.object.Spec.ExcludedResources, resources...) + return b +} + +// IncludeClusterResources sets the Restore's "include cluster resources" flag. +func (b *RestoreBuilder) IncludeClusterResources(val bool) *RestoreBuilder { + b.object.Spec.IncludeClusterResources = &val + return b +} + +// LabelSelector sets the Restore's label selector. +func (b *RestoreBuilder) LabelSelector(selector *metav1.LabelSelector) *RestoreBuilder { + b.object.Spec.LabelSelector = selector + return b +} + +// NamespaceMappings sets the Restore's namespace mappings. +func (b *RestoreBuilder) NamespaceMappings(mapping ...string) *RestoreBuilder { + if b.object.Spec.NamespaceMapping == nil { + b.object.Spec.NamespaceMapping = make(map[string]string) + } + + if len(mapping)%2 != 0 { + panic("mapping must contain an even number of values") + } + + for i := 0; i < len(mapping); i += 2 { + b.object.Spec.NamespaceMapping[mapping[i]] = mapping[i+1] + } + + return b +} + +// Phase sets the Restore's phase. +func (b *RestoreBuilder) Phase(phase velerov1api.RestorePhase) *RestoreBuilder { + b.object.Status.Phase = phase + return b +} + +// RestorePVs sets the Restore's restore PVs. +func (b *RestoreBuilder) RestorePVs(val bool) *RestoreBuilder { + b.object.Spec.RestorePVs = &val + return b +} diff --git a/pkg/builder/role_builder.go b/pkg/builder/role_builder.go new file mode 100644 index 00000000000..4c167b2bc77 --- /dev/null +++ b/pkg/builder/role_builder.go @@ -0,0 +1,57 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + rbacv1api "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// RoleBuilder builds Role objects. +type RoleBuilder struct { + object *rbacv1api.Role +} + +// ForRole is the constructor for a RoleBuilder. +func ForRole(ns, name string) *RoleBuilder { + return &RoleBuilder{ + object: &rbacv1api.Role{ + TypeMeta: metav1.TypeMeta{ + APIVersion: rbacv1api.SchemeGroupVersion.String(), + Kind: "Role", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + }, + } +} + +// Result returns the built Role. +func (b *RoleBuilder) Result() *rbacv1api.Role { + return b.object +} + +// ObjectMeta applies functional options to the Role's ObjectMeta. +func (b *RoleBuilder) ObjectMeta(opts ...ObjectMetaOpt) *RoleBuilder { + for _, opt := range opts { + opt(b.object) + } + + return b +} diff --git a/pkg/builder/schedule_builder.go b/pkg/builder/schedule_builder.go new file mode 100644 index 00000000000..9396d9c4645 --- /dev/null +++ b/pkg/builder/schedule_builder.go @@ -0,0 +1,91 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" +) + +// ScheduleBuilder builds Schedule objects. +type ScheduleBuilder struct { + object *velerov1api.Schedule +} + +// ForSchedule is the constructor for a ScheduleBuilder. +func ForSchedule(ns, name string) *ScheduleBuilder { + return &ScheduleBuilder{ + object: &velerov1api.Schedule{ + TypeMeta: metav1.TypeMeta{ + APIVersion: velerov1api.SchemeGroupVersion.String(), + Kind: "Schedule", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + }, + } +} + +// Result returns the built Schedule. +func (b *ScheduleBuilder) Result() *velerov1api.Schedule { + return b.object +} + +// ObjectMeta applies functional options to the Schedule's ObjectMeta. +func (b *ScheduleBuilder) ObjectMeta(opts ...ObjectMetaOpt) *ScheduleBuilder { + for _, opt := range opts { + opt(b.object) + } + + return b +} + +// Phase sets the Schedule's phase. +func (b *ScheduleBuilder) Phase(phase velerov1api.SchedulePhase) *ScheduleBuilder { + b.object.Status.Phase = phase + return b +} + +// ValidationError appends to the Schedule's validation errors. +func (b *ScheduleBuilder) ValidationError(err string) *ScheduleBuilder { + b.object.Status.ValidationErrors = append(b.object.Status.ValidationErrors, err) + return b +} + +// CronSchedule sets the Schedule's cron schedule. +func (b *ScheduleBuilder) CronSchedule(expression string) *ScheduleBuilder { + b.object.Spec.Schedule = expression + return b +} + +// LastBackupTime sets the Schedule's last backup time. +func (b *ScheduleBuilder) LastBackupTime(val string) *ScheduleBuilder { + t, _ := time.Parse("2006-01-02 15:04:05", val) + b.object.Status.LastBackup.Time = t + return b +} + +// Template sets the Schedule's template. +func (b *ScheduleBuilder) Template(spec velerov1api.BackupSpec) *ScheduleBuilder { + b.object.Spec.Template = spec + return b +} diff --git a/pkg/builder/secret_builder.go b/pkg/builder/secret_builder.go new file mode 100644 index 00000000000..405ef33cbd5 --- /dev/null +++ b/pkg/builder/secret_builder.go @@ -0,0 +1,57 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SecretBuilder builds Secret objects. +type SecretBuilder struct { + object *corev1api.Secret +} + +// ForSecret is the constructor for a SecretBuilder. +func ForSecret(ns, name string) *SecretBuilder { + return &SecretBuilder{ + object: &corev1api.Secret{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1api.SchemeGroupVersion.String(), + Kind: "Secret", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + }, + } +} + +// Result returns the built Secret. +func (b *SecretBuilder) Result() *corev1api.Secret { + return b.object +} + +// ObjectMeta applies functional options to the Secret's ObjectMeta. +func (b *SecretBuilder) ObjectMeta(opts ...ObjectMetaOpt) *SecretBuilder { + for _, opt := range opts { + opt(b.object) + } + + return b +} diff --git a/pkg/builder/server_status_request_builder.go b/pkg/builder/server_status_request_builder.go new file mode 100644 index 00000000000..7b5d67234b8 --- /dev/null +++ b/pkg/builder/server_status_request_builder.go @@ -0,0 +1,84 @@ +/* +Copyright 2018 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" +) + +// ServerStatusRequestBuilder builds ServerStatusRequest objects. +type ServerStatusRequestBuilder struct { + object *velerov1api.ServerStatusRequest +} + +// ForServerStatusRequest is the constructor for for a ServerStatusRequestBuilder. +func ForServerStatusRequest(ns, name string) *ServerStatusRequestBuilder { + return &ServerStatusRequestBuilder{ + object: &velerov1api.ServerStatusRequest{ + TypeMeta: metav1.TypeMeta{ + APIVersion: velerov1api.SchemeGroupVersion.String(), + Kind: "ServerStatusRequest", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + }, + } +} + +// Result returns the built ServerStatusRequest. +func (b *ServerStatusRequestBuilder) Result() *velerov1api.ServerStatusRequest { + return b.object +} + +// ObjectMeta applies functional options to the ServerStatusRequest's ObjectMeta. +func (b *ServerStatusRequestBuilder) ObjectMeta(opts ...ObjectMetaOpt) *ServerStatusRequestBuilder { + for _, opt := range opts { + opt(b.object) + } + + return b +} + +// Phase sets the ServerStatusRequest's phase. +func (b *ServerStatusRequestBuilder) Phase(phase velerov1api.ServerStatusRequestPhase) *ServerStatusRequestBuilder { + b.object.Status.Phase = phase + return b +} + +// ProcessedTimestamp sets the ServerStatusRequest's processed timestamp. +func (b *ServerStatusRequestBuilder) ProcessedTimestamp(time time.Time) *ServerStatusRequestBuilder { + b.object.Status.ProcessedTimestamp.Time = time + return b +} + +// ServerVersion sets the ServerStatusRequest's server version. +func (b *ServerStatusRequestBuilder) ServerVersion(version string) *ServerStatusRequestBuilder { + b.object.Status.ServerVersion = version + return b +} + +// Plugins sets the ServerStatusRequest's plugins. +func (b *ServerStatusRequestBuilder) Plugins(plugins []velerov1api.PluginInfo) *ServerStatusRequestBuilder { + b.object.Status.Plugins = plugins + return b +} diff --git a/pkg/builder/service_account_builder.go b/pkg/builder/service_account_builder.go new file mode 100644 index 00000000000..7042c560412 --- /dev/null +++ b/pkg/builder/service_account_builder.go @@ -0,0 +1,57 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ServiceAccountBuilder builds ServiceAccount objects. +type ServiceAccountBuilder struct { + object *corev1api.ServiceAccount +} + +// ForServiceAccount is the constructor for a ServiceAccountBuilder. +func ForServiceAccount(ns, name string) *ServiceAccountBuilder { + return &ServiceAccountBuilder{ + object: &corev1api.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: corev1api.SchemeGroupVersion.String(), + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + }, + } +} + +// Result returns the built ServiceAccount. +func (b *ServiceAccountBuilder) Result() *corev1api.ServiceAccount { + return b.object +} + +// ObjectMeta applies functional options to the ServiceAccount's ObjectMeta. +func (b *ServiceAccountBuilder) ObjectMeta(opts ...ObjectMetaOpt) *ServiceAccountBuilder { + for _, opt := range opts { + opt(b.object) + } + + return b +} diff --git a/pkg/builder/storage_class_builder.go b/pkg/builder/storage_class_builder.go new file mode 100644 index 00000000000..99454333258 --- /dev/null +++ b/pkg/builder/storage_class_builder.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + storagev1api "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// StorageClassBuilder builds StorageClass objects. +type StorageClassBuilder struct { + object *storagev1api.StorageClass +} + +// ForStorageClass is the constructor for a StorageClassBuilder. +func ForStorageClass(name string) *StorageClassBuilder { + return &StorageClassBuilder{ + object: &storagev1api.StorageClass{ + TypeMeta: metav1.TypeMeta{ + APIVersion: storagev1api.SchemeGroupVersion.String(), + Kind: "StorageClass", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + }, + } +} + +// Result returns the built StorageClass. +func (b *StorageClassBuilder) Result() *storagev1api.StorageClass { + return b.object +} + +// ObjectMeta applies functional options to the StorageClass's ObjectMeta. +func (b *StorageClassBuilder) ObjectMeta(opts ...ObjectMetaOpt) *StorageClassBuilder { + for _, opt := range opts { + opt(b.object) + } + + return b +} diff --git a/pkg/builder/volume_builder.go b/pkg/builder/volume_builder.go new file mode 100644 index 00000000000..cdc373aa4a5 --- /dev/null +++ b/pkg/builder/volume_builder.go @@ -0,0 +1,56 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + corev1api "k8s.io/api/core/v1" +) + +// VolumeBuilder builds Volume objects. +type VolumeBuilder struct { + object *corev1api.Volume +} + +// ForVolume is the constructor for a VolumeBuilder. +func ForVolume(name string) *VolumeBuilder { + return &VolumeBuilder{ + object: &corev1api.Volume{ + Name: name, + }, + } +} + +// Result returns the built Volume. +func (b *VolumeBuilder) Result() *corev1api.Volume { + return b.object +} + +// PersistentVolumeClaimSource sets the Volume's persistent volume claim source. +func (b *VolumeBuilder) PersistentVolumeClaimSource(claimName string) *VolumeBuilder { + b.object.PersistentVolumeClaim = &corev1api.PersistentVolumeClaimVolumeSource{ + ClaimName: claimName, + } + return b +} + +// CSISource sets the Volume's CSI source. +func (b *VolumeBuilder) CSISource(driver string) *VolumeBuilder { + b.object.CSI = &corev1api.CSIVolumeSource{ + Driver: driver, + } + return b +} diff --git a/pkg/builder/volume_mount_builder.go b/pkg/builder/volume_mount_builder.go new file mode 100644 index 00000000000..9c794f5a127 --- /dev/null +++ b/pkg/builder/volume_mount_builder.go @@ -0,0 +1,41 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + corev1api "k8s.io/api/core/v1" +) + +// VolumeMountBuilder builds VolumeMount objects. +type VolumeMountBuilder struct { + object *corev1api.VolumeMount +} + +// ForVolumeMount is the constructor for a VolumeMountBuilder. +func ForVolumeMount(name, mountPath string) *VolumeMountBuilder { + return &VolumeMountBuilder{ + object: &corev1api.VolumeMount{ + Name: name, + MountPath: mountPath, + }, + } +} + +// Result returns the built VolumeMount. +func (b *VolumeMountBuilder) Result() *corev1api.VolumeMount { + return b.object +} diff --git a/pkg/builder/volume_snapshot_location_builder.go b/pkg/builder/volume_snapshot_location_builder.go new file mode 100644 index 00000000000..4c028b2b6d9 --- /dev/null +++ b/pkg/builder/volume_snapshot_location_builder.go @@ -0,0 +1,64 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" +) + +// VolumeSnapshotLocationBuilder builds VolumeSnapshotLocation objects. +type VolumeSnapshotLocationBuilder struct { + object *velerov1api.VolumeSnapshotLocation +} + +// ForVolumeSnapshotLocation is the constructor for a VolumeSnapshotLocationBuilder. +func ForVolumeSnapshotLocation(ns, name string) *VolumeSnapshotLocationBuilder { + return &VolumeSnapshotLocationBuilder{ + object: &velerov1api.VolumeSnapshotLocation{ + TypeMeta: metav1.TypeMeta{ + APIVersion: velerov1api.SchemeGroupVersion.String(), + Kind: "VolumeSnapshotLocation", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns, + Name: name, + }, + }, + } +} + +// Result returns the built VolumeSnapshotLocation. +func (b *VolumeSnapshotLocationBuilder) Result() *velerov1api.VolumeSnapshotLocation { + return b.object +} + +// ObjectMeta applies functional options to the VolumeSnapshotLocation's ObjectMeta. +func (b *VolumeSnapshotLocationBuilder) ObjectMeta(opts ...ObjectMetaOpt) *VolumeSnapshotLocationBuilder { + for _, opt := range opts { + opt(b.object) + } + + return b +} + +// Provider sets the VolumeSnapshotLocation's provider. +func (b *VolumeSnapshotLocationBuilder) Provider(name string) *VolumeSnapshotLocationBuilder { + b.object.Spec.Provider = name + return b +} diff --git a/pkg/client/client.go b/pkg/client/client.go index d158b31e3d8..28a77359011 100644 --- a/pkg/client/client.go +++ b/pkg/client/client.go @@ -1,5 +1,5 @@ /* -Copyright 2017 the Velero contributors. +Copyright 2017, 2019 the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -29,12 +29,20 @@ import ( // Config returns a *rest.Config, using either the kubeconfig (if specified) or an in-cluster // configuration. -func Config(kubeconfig, kubecontext, baseName string) (*rest.Config, error) { +func Config(kubeconfig, kubecontext, baseName string, qps float32, burst int) (*rest.Config, error) { loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() loadingRules.ExplicitPath = kubeconfig configOverrides := &clientcmd.ConfigOverrides{CurrentContext: kubecontext} kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides) clientConfig, err := kubeConfig.ClientConfig() + if qps > 0.0 { + clientConfig.QPS = qps + } + + if burst > 0 { + clientConfig.Burst = burst + } + if err != nil { return nil, errors.WithStack(err) } diff --git a/pkg/client/factory.go b/pkg/client/factory.go index 9ecad61d0ca..aad003399f0 100644 --- a/pkg/client/factory.go +++ b/pkg/client/factory.go @@ -1,5 +1,5 @@ /* -Copyright 2017 the Velero contributors. +Copyright 2017, 2019 the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,7 +22,9 @@ import ( "github.com/pkg/errors" "github.com/spf13/pflag" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" v1 "github.com/heptio/velero/pkg/apis/velero/v1" clientset "github.com/heptio/velero/pkg/generated/clientset/versioned" @@ -38,6 +40,20 @@ type Factory interface { // KubeClient returns a Kubernetes client. It uses the following priority to specify the cluster // configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration. KubeClient() (kubernetes.Interface, error) + // DynamicClient returns a Kubernetes dynamic client. It uses the following priority to specify the cluster + // configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration. + DynamicClient() (dynamic.Interface, error) + // SetBasename changes the basename for an already-constructed client. + // This is useful for generating clients that require a different user-agent string below the root `velero` + // command, such as the server subcommand. + SetBasename(string) + // SetClientQPS sets the Queries Per Second for a client. + SetClientQPS(float32) + // SetClientBurst sets the Burst for a client. + SetClientBurst(int) + // ClientConfig returns a rest.Config struct used for client-go clients. + ClientConfig() (*rest.Config, error) + // Namespace returns the namespace which the Factory will create clients for. Namespace() string } @@ -47,6 +63,8 @@ type factory struct { kubecontext string baseName string namespace string + clientQPS float32 + clientBurst int } // NewFactory returns a Factory. @@ -56,12 +74,19 @@ func NewFactory(baseName string) Factory { baseName: baseName, } + f.namespace = os.Getenv("VELERO_NAMESPACE") + if config, err := LoadConfig(); err == nil { - f.namespace = config[ConfigKeyNamespace] + // Only override the namespace if the config key is set + if _, ok := config[ConfigKeyNamespace]; ok { + f.namespace = config[ConfigKeyNamespace] + } } else { fmt.Fprintf(os.Stderr, "WARNING: error retrieving namespace from config file: %v\n", err) } + // We didn't get the namespace via env var or config file, so use the default. + // Command line flags will override when BindFlags is called. if f.namespace == "" { f.namespace = v1.DefaultNamespace } @@ -77,8 +102,12 @@ func (f *factory) BindFlags(flags *pflag.FlagSet) { flags.AddFlagSet(f.flags) } +func (f *factory) ClientConfig() (*rest.Config, error) { + return Config(f.kubeconfig, f.kubecontext, f.baseName, f.clientQPS, f.clientBurst) +} + func (f *factory) Client() (clientset.Interface, error) { - clientConfig, err := Config(f.kubeconfig, f.kubecontext, f.baseName) + clientConfig, err := f.ClientConfig() if err != nil { return nil, err } @@ -91,7 +120,7 @@ func (f *factory) Client() (clientset.Interface, error) { } func (f *factory) KubeClient() (kubernetes.Interface, error) { - clientConfig, err := Config(f.kubeconfig, f.kubecontext, f.baseName) + clientConfig, err := f.ClientConfig() if err != nil { return nil, err } @@ -103,6 +132,30 @@ func (f *factory) KubeClient() (kubernetes.Interface, error) { return kubeClient, nil } +func (f *factory) DynamicClient() (dynamic.Interface, error) { + clientConfig, err := f.ClientConfig() + if err != nil { + return nil, err + } + dynamicClient, err := dynamic.NewForConfig(clientConfig) + if err != nil { + return nil, errors.WithStack(err) + } + return dynamicClient, nil +} + +func (f *factory) SetBasename(name string) { + f.baseName = name +} + +func (f *factory) SetClientQPS(qps float32) { + f.clientQPS = qps +} + +func (f *factory) SetClientBurst(burst int) { + f.clientBurst = burst +} + func (f *factory) Namespace() string { return f.namespace } diff --git a/pkg/client/factory_test.go b/pkg/client/factory_test.go new file mode 100644 index 00000000000..475269eb3ae --- /dev/null +++ b/pkg/client/factory_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package client + +import ( + "os" + "testing" + + "github.com/spf13/pflag" + "github.com/stretchr/testify/assert" +) + +// TestFactory tests the client.Factory interface. +func TestFactory(t *testing.T) { + // Velero client configuration is currently omitted due to requiring a + // test filesystem in pkg/test. This causes an import cycle as pkg/test + // uses pkg/client's interfaces to implement fakes + + // Env variable should set the namespace if no config or argument are used + os.Setenv("VELERO_NAMESPACE", "env-velero") + f := NewFactory("velero") + + assert.Equal(t, "env-velero", f.Namespace()) + + os.Unsetenv("VELERO_NAMESPACE") + + // Argument should change the namespace + f = NewFactory("velero") + s := "flag-velero" + flags := new(pflag.FlagSet) + + f.BindFlags(flags) + + flags.Parse([]string{"--namespace", s}) + + assert.Equal(t, s, f.Namespace()) + + // An arugment overrides the env variable if both are set. + os.Setenv("VELERO_NAMESPACE", "env-velero") + f = NewFactory("velero") + flags = new(pflag.FlagSet) + + f.BindFlags(flags) + flags.Parse([]string{"--namespace", s}) + assert.Equal(t, s, f.Namespace()) + + os.Unsetenv("VELERO_NAMESPACE") +} diff --git a/pkg/cloudprovider/aws/object_store_test.go b/pkg/cloudprovider/aws/object_store_test.go index 0dbc0b5beb2..e66ebbe3cc5 100644 --- a/pkg/cloudprovider/aws/object_store_test.go +++ b/pkg/cloudprovider/aws/object_store_test.go @@ -28,7 +28,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/heptio/velero/pkg/util/test" + "github.com/heptio/velero/pkg/test" ) func TestIsValidSignatureVersion(t *testing.T) { diff --git a/pkg/cloudprovider/config.go b/pkg/cloudprovider/config.go index 4a02419c38b..7e604feef34 100644 --- a/pkg/cloudprovider/config.go +++ b/pkg/cloudprovider/config.go @@ -23,11 +23,11 @@ import ( // ValidateObjectStoreConfigKeys ensures that an object store's config // is valid by making sure each `config` key is in the `validKeys` list. -// The special key "bucket" is always considered valid. +// The special keys "bucket" and "prefix" are always considered valid. func ValidateObjectStoreConfigKeys(config map[string]string, validKeys ...string) error { - // `bucket` is automatically added to all object store config by - // velero, so add it as a valid key. - return validateConfigKeys(config, append(validKeys, "bucket")...) + // `bucket` and `prefix` are automatically added to all object + // store config by velero, so add them as valid keys. + return validateConfigKeys(config, append(validKeys, "bucket", "prefix")...) } // ValidateVolumeSnapshotterConfigKeys ensures that a volume snapshotter's diff --git a/pkg/cloudprovider/gcp/object_store_test.go b/pkg/cloudprovider/gcp/object_store_test.go index e9ca7c53a04..b483e1bc641 100644 --- a/pkg/cloudprovider/gcp/object_store_test.go +++ b/pkg/cloudprovider/gcp/object_store_test.go @@ -25,7 +25,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) type mockWriteCloser struct { diff --git a/pkg/cloudprovider/gcp/volume_snapshotter_test.go b/pkg/cloudprovider/gcp/volume_snapshotter_test.go index c8d77f598b1..f14cdde72ba 100644 --- a/pkg/cloudprovider/gcp/volume_snapshotter_test.go +++ b/pkg/cloudprovider/gcp/volume_snapshotter_test.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) func TestGetVolumeID(t *testing.T) { diff --git a/pkg/cloudprovider/in_memory_object_store.go b/pkg/cloudprovider/in_memory_object_store.go index 66be661aa25..c2249b33c77 100644 --- a/pkg/cloudprovider/in_memory_object_store.go +++ b/pkg/cloudprovider/in_memory_object_store.go @@ -76,11 +76,8 @@ func (o *InMemoryObjectStore) ObjectExists(bucket, key string) (bool, error) { return false, errors.New("bucket not found") } - if _, ok = bucketData[key]; !ok { - return false, errors.New("key not found") - } - - return true, nil + _, ok = bucketData[key] + return ok, nil } func (o *InMemoryObjectStore) GetObject(bucket, key string) (io.ReadCloser, error) { diff --git a/pkg/cmd/cli/backup/create.go b/pkg/cmd/cli/backup/create.go index 61bc105b1b3..aa46bdeced4 100644 --- a/pkg/cmd/cli/backup/create.go +++ b/pkg/cmd/cli/backup/create.go @@ -25,7 +25,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" - api "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/builder" "github.com/heptio/velero/pkg/client" "github.com/heptio/velero/pkg/cmd" "github.com/heptio/velero/pkg/cmd/util/flag" @@ -34,6 +35,8 @@ import ( v1 "github.com/heptio/velero/pkg/generated/informers/externalversions/velero/v1" ) +const DefaultBackupTTL time.Duration = 30 * 24 * time.Hour + func NewCreateCommand(f client.Factory, use string) *cobra.Command { o := NewCreateOptions() @@ -64,6 +67,7 @@ func NewCreateCommand(f client.Factory, use string) *cobra.Command { o.BindFlags(c.Flags()) o.BindWait(c.Flags()) + o.BindFromSchedule(c.Flags()) output.BindFlags(c.Flags()) output.ClearOutputFlagDefault(c) @@ -84,13 +88,14 @@ type CreateOptions struct { Wait bool StorageLocation string SnapshotLocations []string + FromSchedule string client veleroclient.Interface } func NewCreateOptions() *CreateOptions { return &CreateOptions{ - TTL: 30 * 24 * time.Hour, + TTL: DefaultBackupTTL, IncludeNamespaces: flag.NewStringArray("*"), Labels: flag.NewMap(), SnapshotVolumes: flag.NewOptionalBool(nil), @@ -123,6 +128,12 @@ func (o *CreateOptions) BindWait(flags *pflag.FlagSet) { flags.BoolVarP(&o.Wait, "wait", "w", o.Wait, "wait for the operation to complete") } +// BindFromSchedule binds the from-schedule flag separately so it is not called +// by other create commands that reuse CreateOptions's BindFlags method. +func (o *CreateOptions) BindFromSchedule(flags *pflag.FlagSet) { + flags.StringVar(&o.FromSchedule, "from-schedule", "", "create a backup from the template of an existing schedule. Cannot be used with any other filters.") +} + func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Factory) error { if err := output.ValidateFlags(c); err != nil { return err @@ -154,44 +165,33 @@ func (o *CreateOptions) Complete(args []string, f client.Factory) error { } func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { - backup := &api.Backup{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: f.Namespace(), - Name: o.Name, - Labels: o.Labels.Data(), - }, - Spec: api.BackupSpec{ - IncludedNamespaces: o.IncludeNamespaces, - ExcludedNamespaces: o.ExcludeNamespaces, - IncludedResources: o.IncludeResources, - ExcludedResources: o.ExcludeResources, - LabelSelector: o.Selector.LabelSelector, - SnapshotVolumes: o.SnapshotVolumes.Value, - TTL: metav1.Duration{Duration: o.TTL}, - IncludeClusterResources: o.IncludeClusterResources.Value, - StorageLocation: o.StorageLocation, - VolumeSnapshotLocations: o.SnapshotLocations, - }, + backup, err := o.BuildBackup(f.Namespace()) + if err != nil { + return err } if printed, err := output.PrintWithFormat(c, backup); printed || err != nil { return err } + if o.FromSchedule != "" { + fmt.Println("Creating backup from schedule, all other filters are ignored.") + } + var backupInformer cache.SharedIndexInformer - var updates chan *api.Backup + var updates chan *velerov1api.Backup if o.Wait { stop := make(chan struct{}) defer close(stop) - updates = make(chan *api.Backup) + updates = make(chan *velerov1api.Backup) backupInformer = v1.NewBackupInformer(o.client, f.Namespace(), 0, nil) backupInformer.AddEventHandler( cache.FilteringResourceEventHandler{ FilterFunc: func(obj interface{}) bool { - backup, ok := obj.(*api.Backup) + backup, ok := obj.(*velerov1api.Backup) if !ok { return false } @@ -199,14 +199,14 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { }, Handler: cache.ResourceEventHandlerFuncs{ UpdateFunc: func(_, obj interface{}) { - backup, ok := obj.(*api.Backup) + backup, ok := obj.(*velerov1api.Backup) if !ok { return } updates <- backup }, DeleteFunc: func(obj interface{}) { - backup, ok := obj.(*api.Backup) + backup, ok := obj.(*velerov1api.Backup) if !ok { return } @@ -218,7 +218,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { go backupInformer.Run(stop) } - _, err := o.client.VeleroV1().Backups(backup.Namespace).Create(backup) + _, err = o.client.VeleroV1().Backups(backup.Namespace).Create(backup) if err != nil { return err } @@ -239,7 +239,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { return nil } - if backup.Status.Phase != api.BackupPhaseNew && backup.Status.Phase != api.BackupPhaseInProgress { + if backup.Status.Phase != velerov1api.BackupPhaseNew && backup.Status.Phase != velerov1api.BackupPhaseInProgress { fmt.Printf("\nBackup completed with status: %s. You may check for more information using the commands `velero backup describe %s` and `velero backup logs %s`.\n", backup.Status.Phase, backup.Name, backup.Name) return nil } @@ -253,3 +253,35 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { return nil } + +func (o *CreateOptions) BuildBackup(namespace string) (*velerov1api.Backup, error) { + backupBuilder := builder.ForBackup(namespace, o.Name) + + if o.FromSchedule != "" { + schedule, err := o.client.VeleroV1().Schedules(namespace).Get(o.FromSchedule, metav1.GetOptions{}) + if err != nil { + return nil, err + } + backupBuilder.FromSchedule(schedule) + } else { + backupBuilder. + IncludedNamespaces(o.IncludeNamespaces...). + ExcludedNamespaces(o.ExcludeNamespaces...). + IncludedResources(o.IncludeResources...). + ExcludedResources(o.ExcludeResources...). + LabelSelector(o.Selector.LabelSelector). + TTL(o.TTL). + StorageLocation(o.StorageLocation). + VolumeSnapshotLocations(o.SnapshotLocations...) + + if o.SnapshotVolumes.Value != nil { + backupBuilder.SnapshotVolumes(*o.SnapshotVolumes.Value) + } + if o.IncludeClusterResources.Value != nil { + backupBuilder.IncludeClusterResources(*o.IncludeClusterResources.Value) + } + } + + backup := backupBuilder.ObjectMeta(builder.WithLabelsMap(o.Labels.Data())).Result() + return backup, nil +} diff --git a/pkg/cmd/cli/backup/create_test.go b/pkg/cmd/cli/backup/create_test.go new file mode 100644 index 00000000000..4797a8d868e --- /dev/null +++ b/pkg/cmd/cli/backup/create_test.go @@ -0,0 +1,88 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package backup + +import ( + "testing" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/builder" + "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" +) + +const testNamespace = "velero" + +func TestCreateOptions_BuildBackup(t *testing.T) { + o := NewCreateOptions() + o.Labels.Set("velero.io/test=true") + + backup, err := o.BuildBackup(testNamespace) + assert.NoError(t, err) + + assert.Equal(t, velerov1api.BackupSpec{ + TTL: metav1.Duration{Duration: o.TTL}, + IncludedNamespaces: []string(o.IncludeNamespaces), + SnapshotVolumes: o.SnapshotVolumes.Value, + IncludeClusterResources: o.IncludeClusterResources.Value, + }, backup.Spec) + + assert.Equal(t, map[string]string{ + "velero.io/test": "true", + }, backup.GetLabels()) +} + +func TestCreateOptions_BuildBackupFromSchedule(t *testing.T) { + o := NewCreateOptions() + o.FromSchedule = "test" + o.client = fake.NewSimpleClientset() + + t.Run("inexistant schedule", func(t *testing.T) { + _, err := o.BuildBackup(testNamespace) + assert.Error(t, err) + }) + + expectedBackupSpec := builder.ForBackup("test", testNamespace).IncludedNamespaces("test").Result().Spec + schedule := builder.ForSchedule(testNamespace, "test").Template(expectedBackupSpec).ObjectMeta(builder.WithLabels("velero.io/test", "true")).Result() + o.client.VeleroV1().Schedules(testNamespace).Create(schedule) + + t.Run("existing schedule", func(t *testing.T) { + backup, err := o.BuildBackup(testNamespace) + assert.NoError(t, err) + + assert.Equal(t, expectedBackupSpec, backup.Spec) + assert.Equal(t, map[string]string{ + "velero.io/test": "true", + velerov1api.ScheduleNameLabel: "test", + }, backup.GetLabels()) + }) + + t.Run("command line labels take precedence over schedule labels", func(t *testing.T) { + o.Labels.Set("velero.io/test=yes,custom-label=true") + backup, err := o.BuildBackup(testNamespace) + assert.NoError(t, err) + + assert.Equal(t, expectedBackupSpec, backup.Spec) + assert.Equal(t, map[string]string{ + "velero.io/test": "yes", + velerov1api.ScheduleNameLabel: "test", + "custom-label": "true", + }, backup.GetLabels()) + }) +} diff --git a/pkg/cmd/cli/install/install.go b/pkg/cmd/cli/install/install.go index 8b4374ff086..190dde93b4d 100644 --- a/pkg/cmd/cli/install/install.go +++ b/pkg/cmd/cli/install/install.go @@ -26,14 +26,14 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/pflag" - "k8s.io/client-go/dynamic" - api "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" "github.com/heptio/velero/pkg/client" "github.com/heptio/velero/pkg/cmd" "github.com/heptio/velero/pkg/cmd/util/flag" "github.com/heptio/velero/pkg/cmd/util/output" "github.com/heptio/velero/pkg/install" + kubeutil "github.com/heptio/velero/pkg/util/kube" ) // InstallOptions collects all the options for installing Velero into a Kubernetes cluster. @@ -43,8 +43,18 @@ type InstallOptions struct { BucketName string Prefix string ProviderName string + PodAnnotations flag.Map + VeleroPodCPURequest string + VeleroPodMemRequest string + VeleroPodCPULimit string + VeleroPodMemLimit string + ResticPodCPURequest string + ResticPodMemRequest string + ResticPodCPULimit string + ResticPodMemLimit string RestoreOnly bool SecretFile string + NoSecret bool DryRun bool BackupStorageConfig flag.Map VolumeSnapshotConfig flag.Map @@ -57,10 +67,19 @@ type InstallOptions struct { func (o *InstallOptions) BindFlags(flags *pflag.FlagSet) { flags.StringVar(&o.ProviderName, "provider", o.ProviderName, "provider name for backup and volume storage") flags.StringVar(&o.BucketName, "bucket", o.BucketName, "name of the object storage bucket where backups should be stored") - flags.StringVar(&o.SecretFile, "secret-file", o.SecretFile, "file containing credentials for backup and volume provider") + flags.StringVar(&o.SecretFile, "secret-file", o.SecretFile, "file containing credentials for backup and volume provider. If not specified, --no-secret must be used for confirmation. Optional.") + flags.BoolVar(&o.NoSecret, "no-secret", o.NoSecret, "flag indicating if a secret should be created. Must be used as confirmation if --secret-file is not provided. Optional.") flags.StringVar(&o.Image, "image", o.Image, "image to use for the Velero and restic server pods. Optional.") flags.StringVar(&o.Prefix, "prefix", o.Prefix, "prefix under which all Velero data should be stored within the bucket. Optional.") - flags.StringVar(&o.Namespace, "namespace", o.Namespace, "namespace to install Velero and associated data into. Optional.") + flags.Var(&o.PodAnnotations, "pod-annotations", "annotations to add to the Velero and restic pods. Optional. Format is key1=value1,key2=value2") + flags.StringVar(&o.VeleroPodCPURequest, "velero-pod-cpu-request", o.VeleroPodCPURequest, `CPU request for Velero pod. A value of "0" is treated as unbounded. Optional.`) + flags.StringVar(&o.VeleroPodMemRequest, "velero-pod-mem-request", o.VeleroPodMemRequest, `memory request for Velero pod. A value of "0" is treated as unbounded. Optional.`) + flags.StringVar(&o.VeleroPodCPULimit, "velero-pod-cpu-limit", o.VeleroPodCPULimit, `CPU limit for Velero pod. A value of "0" is treated as unbounded. Optional.`) + flags.StringVar(&o.VeleroPodMemLimit, "velero-pod-mem-limit", o.VeleroPodMemLimit, `memory limit for Velero pod. A value of "0" is treated as unbounded. Optional.`) + flags.StringVar(&o.ResticPodCPURequest, "restic-pod-cpu-request", o.ResticPodCPURequest, `CPU request for restic pod. A value of "0" is treated as unbounded. Optional.`) + flags.StringVar(&o.ResticPodMemRequest, "restic-pod-mem-request", o.ResticPodMemRequest, `memory request for restic pod. A value of "0" is treated as unbounded. Optional.`) + flags.StringVar(&o.ResticPodCPULimit, "restic-pod-cpu-limit", o.ResticPodCPULimit, `CPU limit for restic pod. A value of "0" is treated as unbounded. Optional.`) + flags.StringVar(&o.ResticPodMemLimit, "restic-pod-mem-limit", o.ResticPodMemLimit, `memory limit for restic pod. A value of "0" is treated as unbounded. Optional.`) flags.Var(&o.BackupStorageConfig, "backup-location-config", "configuration to use for the backup storage location. Format is key1=value1,key2=value2") flags.Var(&o.VolumeSnapshotConfig, "snapshot-location-config", "configuration to use for the volume snapshot location. Format is key1=value1,key2=value2") flags.BoolVar(&o.UseVolumeSnapshots, "use-volume-snapshots", o.UseVolumeSnapshots, "whether or not to create snapshot location automatically. Set to false if you do not plan to create volume snapshots via a storage provider.") @@ -70,13 +89,22 @@ func (o *InstallOptions) BindFlags(flags *pflag.FlagSet) { flags.BoolVar(&o.Wait, "wait", o.Wait, "wait for Velero deployment to be ready. Optional.") } -// NewInstallOptions instantiates a new, default InstallOptions stuct. +// NewInstallOptions instantiates a new, default InstallOptions struct. func NewInstallOptions() *InstallOptions { return &InstallOptions{ - Namespace: api.DefaultNamespace, + Namespace: velerov1api.DefaultNamespace, Image: install.DefaultImage, BackupStorageConfig: flag.NewMap(), VolumeSnapshotConfig: flag.NewMap(), + PodAnnotations: flag.NewMap(), + VeleroPodCPURequest: install.DefaultVeleroPodCPURequest, + VeleroPodMemRequest: install.DefaultVeleroPodMemRequest, + VeleroPodCPULimit: install.DefaultVeleroPodCPULimit, + VeleroPodMemLimit: install.DefaultVeleroPodMemLimit, + ResticPodCPURequest: install.DefaultResticPodCPURequest, + ResticPodMemRequest: install.DefaultResticPodMemRequest, + ResticPodCPULimit: install.DefaultResticPodCPULimit, + ResticPodMemLimit: install.DefaultResticPodMemLimit, // Default to creating a VSL unless we're told otherwise UseVolumeSnapshots: true, } @@ -84,20 +112,35 @@ func NewInstallOptions() *InstallOptions { // AsVeleroOptions translates the values provided at the command line into values used to instantiate Kubernetes resources func (o *InstallOptions) AsVeleroOptions() (*install.VeleroOptions, error) { - realPath, err := filepath.Abs(o.SecretFile) + var secretData []byte + if o.SecretFile != "" && !o.NoSecret { + realPath, err := filepath.Abs(o.SecretFile) + if err != nil { + return nil, err + } + secretData, err = ioutil.ReadFile(realPath) + if err != nil { + return nil, err + } + } + veleroPodResources, err := kubeutil.ParseResourceRequirements(o.VeleroPodCPURequest, o.VeleroPodMemRequest, o.VeleroPodCPULimit, o.VeleroPodMemLimit) if err != nil { return nil, err } - secretData, err := ioutil.ReadFile(realPath) + resticPodResources, err := kubeutil.ParseResourceRequirements(o.ResticPodCPURequest, o.ResticPodMemRequest, o.ResticPodCPULimit, o.ResticPodMemLimit) if err != nil { return nil, err } + return &install.VeleroOptions{ Namespace: o.Namespace, Image: o.Image, ProviderName: o.ProviderName, Bucket: o.BucketName, Prefix: o.Prefix, + PodAnnotations: o.PodAnnotations.Data(), + VeleroPodResources: veleroPodResources, + ResticPodResources: resticPodResources, SecretData: secretData, RestoreOnly: o.RestoreOnly, UseRestic: o.UseRestic, @@ -124,7 +167,9 @@ Velero Deployment and associated Restic DaemonSet. The provided secret data will be created in a Secret named 'cloud-credentials'. -All namespaced resources will be placed in the 'velero' namespace. +All namespaced resources will be placed in the 'velero' namespace by default. + +The '--namespace' flag can be used to specify a different namespace to install into. Use '--wait' to wait for the Velero Deployment to be ready before proceeding. @@ -139,11 +184,17 @@ This is useful as a starting point for more customized installations. # velero install --bucket gcp-backups --provider gcp --secret-file ./gcp-creds.json --wait + # velero install --bucket backups --provider aws --backup-location-config region=us-west-2 --snapshot-location-config region=us-west-2 --no-secret --pod-annotations iam.amazonaws.com/role=arn:aws:iam:::role/ + + # velero install --bucket gcp-backups --provider gcp --secret-file ./gcp-creds.json --velero-pod-cpu-request=1000m --velero-pod-cpu-limit=5000m --velero-pod-mem-request=512Mi --velero-pod-mem-limit=1024Mi + + # velero install --bucket gcp-backups --provider gcp --secret-file ./gcp-creds.json --restic-pod-cpu-request=1000m --restic-pod-cpu-limit=5000m --restic-pod-mem-request=512Mi --restic-pod-mem-limit=1024Mi + `, Run: func(c *cobra.Command, args []string) { cmd.CheckError(o.Validate(c, args, f)) cmd.CheckError(o.Complete(args, f)) - cmd.CheckError(o.Run(c)) + cmd.CheckError(o.Run(c, f)) }, } @@ -155,7 +206,7 @@ This is useful as a starting point for more customized installations. } // Run executes a command in the context of the provided arguments. -func (o *InstallOptions) Run(c *cobra.Command) error { +func (o *InstallOptions) Run(c *cobra.Command, f client.Factory) error { vo, err := o.AsVeleroOptions() if err != nil { return err @@ -173,12 +224,7 @@ func (o *InstallOptions) Run(c *cobra.Command) error { if o.DryRun { return nil } - - clientConfig, err := client.Config("", "", fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name())) - if err != nil { - return err - } - dynamicClient, err := dynamic.NewForConfig(clientConfig) + dynamicClient, err := f.DynamicClient() if err != nil { return err } @@ -197,12 +243,16 @@ func (o *InstallOptions) Run(c *cobra.Command) error { return errors.Wrap(err, errorMsg) } } + if o.SecretFile == "" { + fmt.Printf("\nNo secret file was specified, no Secret created.\n\n") + } fmt.Printf("Velero is installed! ⛵ Use 'kubectl logs deployment/velero -n %s' to view the status.\n", o.Namespace) return nil } //Complete completes options for a command. func (o *InstallOptions) Complete(args []string, f client.Factory) error { + o.Namespace = f.Namespace() return nil } @@ -227,8 +277,11 @@ func (o *InstallOptions) Validate(c *cobra.Command, args []string, f client.Fact return errors.New("--provider is required") } - if o.SecretFile == "" { - return errors.New("--secret-file is required") + switch { + case o.SecretFile == "" && !o.NoSecret: + return errors.New("One of --secret-file or --no-secret is required") + case o.SecretFile != "" && o.NoSecret: + return errors.New("Cannot use both --secret-file and --no-secret") } return nil diff --git a/pkg/cmd/cli/plugin/add.go b/pkg/cmd/cli/plugin/add.go index 276f624990c..ed8c94cd087 100644 --- a/pkg/cmd/cli/plugin/add.go +++ b/pkg/cmd/cli/plugin/add.go @@ -55,7 +55,7 @@ func NewAddCommand(f client.Factory) *cobra.Command { cmd.CheckError(err) } - veleroDeploy, err := kubeClient.AppsV1beta1().Deployments(f.Namespace()).Get(veleroDeployment, metav1.GetOptions{}) + veleroDeploy, err := kubeClient.AppsV1().Deployments(f.Namespace()).Get(veleroDeployment, metav1.GetOptions{}) if err != nil { cmd.CheckError(err) } @@ -125,7 +125,7 @@ func NewAddCommand(f client.Factory) *cobra.Command { patchBytes, err := jsonpatch.CreateMergePatch(original, updated) cmd.CheckError(err) - _, err = kubeClient.AppsV1beta1().Deployments(veleroDeploy.Namespace).Patch(veleroDeploy.Name, types.MergePatchType, patchBytes) + _, err = kubeClient.AppsV1().Deployments(veleroDeploy.Namespace).Patch(veleroDeploy.Name, types.MergePatchType, patchBytes) cmd.CheckError(err) }, } diff --git a/pkg/cmd/cli/plugin/remove.go b/pkg/cmd/cli/plugin/remove.go index 32f2d11a7b1..b4619815e80 100644 --- a/pkg/cmd/cli/plugin/remove.go +++ b/pkg/cmd/cli/plugin/remove.go @@ -40,7 +40,7 @@ func NewRemoveCommand(f client.Factory) *cobra.Command { cmd.CheckError(err) } - veleroDeploy, err := kubeClient.AppsV1beta1().Deployments(f.Namespace()).Get(veleroDeployment, metav1.GetOptions{}) + veleroDeploy, err := kubeClient.AppsV1().Deployments(f.Namespace()).Get(veleroDeployment, metav1.GetOptions{}) if err != nil { cmd.CheckError(err) } @@ -72,7 +72,7 @@ func NewRemoveCommand(f client.Factory) *cobra.Command { patchBytes, err := jsonpatch.CreateMergePatch(original, updated) cmd.CheckError(err) - _, err = kubeClient.AppsV1beta1().Deployments(veleroDeploy.Namespace).Patch(veleroDeploy.Name, types.MergePatchType, patchBytes) + _, err = kubeClient.AppsV1().Deployments(veleroDeploy.Namespace).Patch(veleroDeploy.Name, types.MergePatchType, patchBytes) cmd.CheckError(err) }, } diff --git a/pkg/cmd/cli/restic/server.go b/pkg/cmd/cli/restic/server.go index 49b067e9a13..c85bd2734ee 100644 --- a/pkg/cmd/cli/restic/server.go +++ b/pkg/cmd/cli/restic/server.go @@ -1,5 +1,5 @@ /* -Copyright 2018 the Velero contributors. +Copyright 2019 the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -25,7 +25,9 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/spf13/cobra" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" kubeinformers "k8s.io/client-go/informers" corev1informers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes" @@ -39,11 +41,13 @@ import ( clientset "github.com/heptio/velero/pkg/generated/clientset/versioned" informers "github.com/heptio/velero/pkg/generated/informers/externalversions" "github.com/heptio/velero/pkg/restic" + "github.com/heptio/velero/pkg/util/filesystem" "github.com/heptio/velero/pkg/util/logging" ) func NewServerCommand(f client.Factory) *cobra.Command { logLevelFlag := logging.LogLevelFlag(logrus.InfoLevel) + formatFlag := logging.NewFormatFlag() command := &cobra.Command{ Use: "server", @@ -54,10 +58,11 @@ func NewServerCommand(f client.Factory) *cobra.Command { logLevel := logLevelFlag.Parse() logrus.Infof("Setting log-level to %s", strings.ToUpper(logLevel.String())) - logger := logging.DefaultLogger(logLevel) + logger := logging.DefaultLogger(logLevel, formatFlag.Parse()) logger.Infof("Starting Velero restic server %s (%s)", buildinfo.Version, buildinfo.FormattedGitSHA()) - s, err := newResticServer(logger, fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name())) + f.SetBasename(fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name())) + s, err := newResticServer(logger, f) cmd.CheckError(err) s.run() @@ -65,6 +70,7 @@ func NewServerCommand(f client.Factory) *cobra.Command { } command.Flags().Var(logLevelFlag, "log-level", fmt.Sprintf("the level at which to log. Valid values are %s.", strings.Join(logLevelFlag.AllowedValues(), ", "))) + command.Flags().Var(formatFlag, "log-format", fmt.Sprintf("the format for log output. Valid values are %s.", strings.Join(formatFlag.AllowedValues(), ", "))) return command } @@ -79,22 +85,19 @@ type resticServer struct { logger logrus.FieldLogger ctx context.Context cancelFunc context.CancelFunc + fileSystem filesystem.Interface } -func newResticServer(logger logrus.FieldLogger, baseName string) (*resticServer, error) { - clientConfig, err := client.Config("", "", baseName) - if err != nil { - return nil, err - } +func newResticServer(logger logrus.FieldLogger, factory client.Factory) (*resticServer, error) { - kubeClient, err := kubernetes.NewForConfig(clientConfig) + kubeClient, err := factory.KubeClient() if err != nil { - return nil, errors.WithStack(err) + return nil, err } - veleroClient, err := clientset.NewForConfig(clientConfig) + veleroClient, err := factory.Client() if err != nil { - return nil, errors.WithStack(err) + return nil, err } // use a stand-alone pod informer because we want to use a field selector to @@ -117,7 +120,7 @@ func newResticServer(logger logrus.FieldLogger, baseName string) (*resticServer, // to fully-encrypted backups and have unique keys per repository. secretInformer := corev1informers.NewFilteredSecretInformer( kubeClient, - os.Getenv("VELERO_NAMESPACE"), + factory.Namespace(), 0, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, func(opts *metav1.ListOptions) { @@ -127,17 +130,24 @@ func newResticServer(logger logrus.FieldLogger, baseName string) (*resticServer, ctx, cancelFunc := context.WithCancel(context.Background()) - return &resticServer{ + s := &resticServer{ kubeClient: kubeClient, veleroClient: veleroClient, - veleroInformerFactory: informers.NewFilteredSharedInformerFactory(veleroClient, 0, os.Getenv("VELERO_NAMESPACE"), nil), + veleroInformerFactory: informers.NewFilteredSharedInformerFactory(veleroClient, 0, factory.Namespace(), nil), kubeInformerFactory: kubeinformers.NewSharedInformerFactory(kubeClient, 0), podInformer: podInformer, secretInformer: secretInformer, logger: logger, ctx: ctx, cancelFunc: cancelFunc, - }, nil + fileSystem: filesystem.NewFileSystem(), + } + + if err := s.validatePodVolumesHostPath(); err != nil { + return nil, err + } + + return s, nil } func (s *resticServer) run() { @@ -154,6 +164,7 @@ func (s *resticServer) run() { s.podInformer, s.secretInformer, s.kubeInformerFactory.Core().V1().PersistentVolumeClaims(), + s.kubeInformerFactory.Core().V1().PersistentVolumes(), s.veleroInformerFactory.Velero().V1().BackupStorageLocations(), os.Getenv("NODE_NAME"), ) @@ -170,6 +181,7 @@ func (s *resticServer) run() { s.podInformer, s.secretInformer, s.kubeInformerFactory.Core().V1().PersistentVolumeClaims(), + s.kubeInformerFactory.Core().V1().PersistentVolumes(), s.veleroInformerFactory.Velero().V1().BackupStorageLocations(), os.Getenv("NODE_NAME"), ) @@ -191,3 +203,50 @@ func (s *resticServer) run() { s.logger.Info("Waiting for all controllers to shut down gracefully") wg.Wait() } + +// validatePodVolumesHostPath validates that the pod volumes path contains a +// directory for each Pod running on this node +func (s *resticServer) validatePodVolumesHostPath() error { + files, err := s.fileSystem.ReadDir("/host_pods/") + if err != nil { + return errors.Wrap(err, "could not read pod volumes host path") + } + + // create a map of directory names inside the pod volumes path + dirs := sets.NewString() + for _, f := range files { + if f.IsDir() { + dirs.Insert(f.Name()) + } + } + + pods, err := s.kubeClient.CoreV1().Pods("").List(metav1.ListOptions{FieldSelector: fmt.Sprintf("spec.nodeName=%s,status.phase=Running", os.Getenv("NODE_NAME"))}) + if err != nil { + return errors.WithStack(err) + } + + valid := true + for _, pod := range pods.Items { + dirName := string(pod.GetUID()) + + // if the pod is a mirror pod, the directory name is the hash value of the + // mirror pod annotation + if hash, ok := pod.GetAnnotations()[v1.MirrorPodAnnotationKey]; ok { + dirName = hash + } + + if !dirs.Has(dirName) { + valid = false + s.logger.WithFields(logrus.Fields{ + "pod": fmt.Sprintf("%s/%s", pod.GetNamespace(), pod.GetName()), + "path": "/host_pods/" + dirName, + }).Debug("could not find volumes for pod in host path") + } + } + + if !valid { + return errors.New("unexpected directory structure for host-pods volume, ensure that the host-pods volume corresponds to the pods subdirectory of the kubelet root directory") + } + + return nil +} diff --git a/pkg/cmd/cli/restic/server_test.go b/pkg/cmd/cli/restic/server_test.go new file mode 100644 index 00000000000..b1c1367ffc9 --- /dev/null +++ b/pkg/cmd/cli/restic/server_test.go @@ -0,0 +1,109 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package restic + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/fake" + + "github.com/heptio/velero/pkg/builder" + testutil "github.com/heptio/velero/pkg/test" +) + +func Test_validatePodVolumesHostPath(t *testing.T) { + tests := []struct { + name string + pods []*corev1.Pod + dirs []string + wantErr bool + }{ + { + name: "no error when pod volumes are present", + pods: []*corev1.Pod{ + builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(), + builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo")).Result(), + }, + dirs: []string{"foo", "zoo"}, + wantErr: false, + }, + { + name: "no error when pod volumes are present and there are mirror pods", + pods: []*corev1.Pod{ + builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(), + builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo"), builder.WithAnnotations(v1.MirrorPodAnnotationKey, "baz")).Result(), + }, + dirs: []string{"foo", "baz"}, + wantErr: false, + }, + { + name: "error when all pod volumes missing", + pods: []*corev1.Pod{ + builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(), + builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo")).Result(), + }, + dirs: []string{"unexpected-dir"}, + wantErr: true, + }, + { + name: "error when some pod volumes missing", + pods: []*corev1.Pod{ + builder.ForPod("foo", "bar").ObjectMeta(builder.WithUID("foo")).Result(), + builder.ForPod("zoo", "raz").ObjectMeta(builder.WithUID("zoo")).Result(), + }, + dirs: []string{"foo"}, + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + fs := testutil.NewFakeFileSystem() + + for _, dir := range tt.dirs { + err := fs.MkdirAll(filepath.Join("/host_pods/", dir), os.ModePerm) + if err != nil { + t.Error(err) + } + } + + kubeClient := fake.NewSimpleClientset() + for _, pod := range tt.pods { + _, err := kubeClient.CoreV1().Pods(pod.GetNamespace()).Create(pod) + if err != nil { + t.Error(err) + } + } + + s := &resticServer{ + kubeClient: kubeClient, + logger: testutil.NewLogger(), + fileSystem: fs, + } + + err := s.validatePodVolumesHostPath() + if tt.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} diff --git a/pkg/cmd/cli/schedule/create.go b/pkg/cmd/cli/schedule/create.go index 012ab36c42e..fc7350645a7 100644 --- a/pkg/cmd/cli/schedule/create.go +++ b/pkg/cmd/cli/schedule/create.go @@ -45,7 +45,11 @@ func NewCreateCommand(f client.Factory, use string) *cobra.Command { | 2 | Hour | 0-23,* | | 3 | Day of Month | 1-31,* | | 4 | Month | 1-12,* | -| 5 | Day of Week | 0-7,* |`, +| 5 | Day of Week | 0-7,* | + +The schedule can also be expressed using "@every " syntax. The duration +can be specified using a combination of seconds (s), minutes (m), and hours (h), for +example: "@every 2h30m".`, Example: ` # Create a backup every 6 hours velero create schedule NAME --schedule="0 */6 * * *" @@ -54,10 +58,10 @@ func NewCreateCommand(f client.Factory, use string) *cobra.Command { velero create schedule NAME --schedule="@every 6h" # Create a daily backup of the web namespace - velero create schedule NAME --schedule="@every 1d" --included-namespaces web + velero create schedule NAME --schedule="@every 24h" --include-namespaces web # Create a weekly backup, each living for 90 days (2160 hours) - velero create schedule NAME --schedules="@every 7d" --ttl 2160h0m0s + velero create schedule NAME --schedule="@every 168h" --ttl 2160h0m0s `, Args: cobra.ExactArgs(1), Run: func(c *cobra.Command, args []string) { diff --git a/pkg/cmd/cli/serverstatus/server_status.go b/pkg/cmd/cli/serverstatus/server_status.go index 5dc41c900d6..f80670de3ec 100644 --- a/pkg/cmd/cli/serverstatus/server_status.go +++ b/pkg/cmd/cli/serverstatus/server_status.go @@ -24,8 +24,8 @@ import ( "k8s.io/apimachinery/pkg/watch" velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/builder" velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" - "github.com/heptio/velero/pkg/serverstatusrequest" ) type ServerStatusGetter interface { @@ -38,7 +38,10 @@ type DefaultServerStatusGetter struct { } func (g *DefaultServerStatusGetter) GetServerStatus(client velerov1client.ServerStatusRequestsGetter) (*velerov1api.ServerStatusRequest, error) { - req := serverstatusrequest.NewBuilder().Namespace(g.Namespace).GenerateName("velero-cli-").ServerStatusRequest() + req := builder.ForServerStatusRequest(g.Namespace, ""). + ObjectMeta( + builder.WithGenerateName("velero-cli-"), + ).Result() created, err := client.ServerStatusRequests(g.Namespace).Create(req) if err != nil { diff --git a/pkg/cmd/cli/version/version.go b/pkg/cmd/cli/version/version.go index fb0d344dcb5..138a4a668ea 100644 --- a/pkg/cmd/cli/version/version.go +++ b/pkg/cmd/cli/version/version.go @@ -50,7 +50,7 @@ func NewCommand(f client.Factory) *cobra.Command { veleroClient = client.VeleroV1() } - + serverStatusGetter.Namespace = f.Namespace() printVersion(os.Stdout, clientOnly, veleroClient, serverStatusGetter) }, } diff --git a/pkg/cmd/cli/version/version_test.go b/pkg/cmd/cli/version/version_test.go index 9bc90313d0c..be316d4843b 100644 --- a/pkg/cmd/cli/version/version_test.go +++ b/pkg/cmd/cli/version/version_test.go @@ -26,10 +26,10 @@ import ( "github.com/stretchr/testify/mock" velerov1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/builder" "github.com/heptio/velero/pkg/buildinfo" "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" v1 "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" - "github.com/heptio/velero/pkg/serverstatusrequest" ) func TestPrintVersion(t *testing.T) { @@ -73,7 +73,7 @@ func TestPrintVersion(t *testing.T) { { name: "server status getter returns normally", clientOnly: false, - serverStatusRequest: serverstatusrequest.NewBuilder().ServerVersion("v1.0.1").ServerStatusRequest(), + serverStatusRequest: builder.ForServerStatusRequest("velero", "ssr-1").ServerVersion("v1.0.1").Result(), getterError: nil, want: clientVersion + "Server:\n\tVersion: v1.0.1\n", }, diff --git a/pkg/cmd/errors.go b/pkg/cmd/errors.go index d6ff984b601..2ba4420a81d 100644 --- a/pkg/cmd/errors.go +++ b/pkg/cmd/errors.go @@ -27,7 +27,7 @@ import ( func CheckError(err error) { if err != nil { if err != context.Canceled { - fmt.Fprintf(os.Stderr, fmt.Sprintf("An error occurred: %v\n", err)) + fmt.Fprintf(os.Stderr, "An error occurred: %v\n", err) } os.Exit(1) } diff --git a/pkg/cmd/server/plugin/plugin.go b/pkg/cmd/server/plugin/plugin.go index 506bbddff77..10f0bbcb80c 100644 --- a/pkg/cmd/server/plugin/plugin.go +++ b/pkg/cmd/server/plugin/plugin.go @@ -46,14 +46,15 @@ func NewCommand(f client.Factory) *cobra.Command { RegisterVolumeSnapshotter("velero.io/gcp", newGcpVolumeSnapshotter). RegisterBackupItemAction("velero.io/pv", newPVBackupItemAction). RegisterBackupItemAction("velero.io/pod", newPodBackupItemAction). - RegisterBackupItemAction("velero.io/serviceaccount", newServiceAccountBackupItemAction(f)). + RegisterBackupItemAction("velero.io/service-account", newServiceAccountBackupItemAction(f)). RegisterRestoreItemAction("velero.io/job", newJobRestoreItemAction). RegisterRestoreItemAction("velero.io/pod", newPodRestoreItemAction). RegisterRestoreItemAction("velero.io/restic", newResticRestoreItemAction(f)). RegisterRestoreItemAction("velero.io/service", newServiceRestoreItemAction). - RegisterRestoreItemAction("velero.io/serviceaccount", newServiceAccountRestoreItemAction). - RegisterRestoreItemAction("velero.io/addPVCFromPod", newAddPVCFromPodRestoreItemAction). - RegisterRestoreItemAction("velero.io/addPVFromPVC", newAddPVFromPVCRestoreItemAction). + RegisterRestoreItemAction("velero.io/service-account", newServiceAccountRestoreItemAction). + RegisterRestoreItemAction("velero.io/add-pvc-from-pod", newAddPVCFromPodRestoreItemAction). + RegisterRestoreItemAction("velero.io/add-pv-from-pvc", newAddPVFromPVCRestoreItemAction). + RegisterRestoreItemAction("velero.io/change-storage-class", newChangeStorageClassRestoreItemAction(f)). Serve() }, } @@ -135,7 +136,12 @@ func newResticRestoreItemAction(f client.Factory) veleroplugin.HandlerInitialize return nil, err } - return restore.NewResticRestoreAction(logger, client.CoreV1().ConfigMaps(f.Namespace())), nil + veleroClient, err := f.Client() + if err != nil { + return nil, err + } + + return restore.NewResticRestoreAction(logger, client.CoreV1().ConfigMaps(f.Namespace()), veleroClient.VeleroV1().PodVolumeBackups(f.Namespace())), nil } } @@ -154,3 +160,18 @@ func newAddPVCFromPodRestoreItemAction(logger logrus.FieldLogger) (interface{}, func newAddPVFromPVCRestoreItemAction(logger logrus.FieldLogger) (interface{}, error) { return restore.NewAddPVFromPVCAction(logger), nil } + +func newChangeStorageClassRestoreItemAction(f client.Factory) veleroplugin.HandlerInitializer { + return func(logger logrus.FieldLogger) (interface{}, error) { + client, err := f.KubeClient() + if err != nil { + return nil, err + } + + return restore.NewChangeStorageClassAction( + logger, + client.CoreV1().ConfigMaps(f.Namespace()), + client.StorageV1().StorageClasses(), + ), nil + } +} diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index 7d98b0264c6..158f2709c38 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -1,5 +1,5 @@ /* -Copyright 2017 the Velero contributors. +Copyright 2017, 2019 the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,7 +19,6 @@ package server import ( "context" "fmt" - "io/ioutil" "log" "net/http" "net/http/pprof" @@ -32,7 +31,6 @@ import ( "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "github.com/spf13/pflag" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeerrs "k8s.io/apimachinery/pkg/util/errors" @@ -119,6 +117,7 @@ type serverConfig struct { clientQPS float32 clientBurst int profilerAddress string + formatFlag *logging.FormatFlag } type controllerRunInfo struct { @@ -126,7 +125,7 @@ type controllerRunInfo struct { numWorkers int } -func NewCommand() *cobra.Command { +func NewCommand(f client.Factory) *cobra.Command { var ( volumeSnapshotLocations = flag.NewMap().WithKeyValueDelimiter(":") logLevelFlag = logging.LogLevelFlag(logrus.InfoLevel) @@ -143,6 +142,7 @@ func NewCommand() *cobra.Command { clientBurst: defaultClientBurst, profilerAddress: defaultProfilerAddress, resourceTerminatingTimeout: defaultResourceTerminatingTimeout, + formatFlag: logging.NewFormatFlag(), } ) @@ -157,33 +157,25 @@ func NewCommand() *cobra.Command { log.SetOutput(os.Stdout) logLevel := logLevelFlag.Parse() + format := config.formatFlag.Parse() + // Make sure we log to stdout so cloud log dashboards don't show this as an error. logrus.SetOutput(os.Stdout) - logrus.Infof("setting log-level to %s", strings.ToUpper(logLevel.String())) // Velero's DefaultLogger logs to stdout, so all is good there. - logger := logging.DefaultLogger(logLevel) - logger.Infof("Starting Velero server %s (%s)", buildinfo.Version, buildinfo.FormattedGitSHA()) + logger := logging.DefaultLogger(logLevel, format) - // NOTE: the namespace flag is bound to velero's persistent flags when the root velero command - // creates the client Factory and binds the Factory's flags. We're not using a Factory here in - // the server because the Factory gets its basename set at creation time, and the basename is - // used to construct the user-agent for clients. Also, the Factory's Namespace() method uses - // the client config file to determine the appropriate namespace to use, and that isn't - // applicable to the server (it uses the method directly below instead). We could potentially - // add a SetBasename() method to the Factory, and tweak how Namespace() works, if we wanted to - // have the server use the Factory. - namespaceFlag := c.Flag("namespace") - if namespaceFlag == nil { - cmd.CheckError(errors.New("unable to look up namespace flag")) - } - namespace := getServerNamespace(namespaceFlag) + logger.Infof("setting log-level to %s", strings.ToUpper(logLevel.String())) + + logger.Infof("Starting Velero server %s (%s)", buildinfo.Version, buildinfo.FormattedGitSHA()) if volumeSnapshotLocations.Data() != nil { config.defaultVolumeSnapshotLocations = volumeSnapshotLocations.Data() } - s, err := newServer(namespace, fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name()), config, logger) + f.SetBasename(fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name())) + + s, err := newServer(f, config, logger) cmd.CheckError(err) cmd.CheckError(s.run()) @@ -191,6 +183,7 @@ func NewCommand() *cobra.Command { } command.Flags().Var(logLevelFlag, "log-level", fmt.Sprintf("the level at which to log. Valid values are %s.", strings.Join(logLevelFlag.AllowedValues(), ", "))) + command.Flags().Var(config.formatFlag, "log-format", fmt.Sprintf("the format for log output. Valid values are %s.", strings.Join(config.formatFlag.AllowedValues(), ", "))) command.Flags().StringVar(&config.pluginDir, "plugin-dir", config.pluginDir, "directory containing Velero plugins") command.Flags().StringVar(&config.metricsAddress, "metrics-address", config.metricsAddress, "the address to expose prometheus metrics") command.Flags().DurationVar(&config.backupSyncPeriod, "backup-sync-period", config.backupSyncPeriod, "how often to ensure all Velero backups in object storage exist as Backup API objects in the cluster") @@ -209,20 +202,6 @@ func NewCommand() *cobra.Command { return command } -func getServerNamespace(namespaceFlag *pflag.Flag) string { - if namespaceFlag.Changed { - return namespaceFlag.Value.String() - } - - if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { - if ns := strings.TrimSpace(string(data)); len(ns) > 0 { - return ns - } - } - - return api.DefaultNamespace -} - type server struct { namespace string metricsAddress string @@ -244,29 +223,30 @@ type server struct { config serverConfig } -func newServer(namespace, baseName string, config serverConfig, logger *logrus.Logger) (*server, error) { - clientConfig, err := client.Config("", "", baseName) - if err != nil { - return nil, err - } +func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*server, error) { if config.clientQPS < 0.0 { return nil, errors.New("client-qps must be positive") } - clientConfig.QPS = config.clientQPS + f.SetClientQPS(config.clientQPS) if config.clientBurst <= 0 { return nil, errors.New("client-burst must be positive") } - clientConfig.Burst = config.clientBurst + f.SetClientBurst(config.clientBurst) - kubeClient, err := kubernetes.NewForConfig(clientConfig) + kubeClient, err := f.KubeClient() if err != nil { - return nil, errors.WithStack(err) + return nil, err + } + + veleroClient, err := f.Client() + if err != nil { + return nil, err } - veleroClient, err := clientset.NewForConfig(clientConfig) + dynamicClient, err := f.DynamicClient() if err != nil { - return nil, errors.WithStack(err) + return nil, err } pluginRegistry := clientmgmt.NewRegistry(config.pluginDir, logger, logger.Level) @@ -278,22 +258,22 @@ func newServer(namespace, baseName string, config serverConfig, logger *logrus.L return nil, err } - dynamicClient, err := dynamic.NewForConfig(clientConfig) + ctx, cancelFunc := context.WithCancel(context.Background()) + + clientConfig, err := f.ClientConfig() if err != nil { return nil, err } - ctx, cancelFunc := context.WithCancel(context.Background()) - s := &server{ - namespace: namespace, + namespace: f.Namespace(), metricsAddress: config.metricsAddress, kubeClientConfig: clientConfig, kubeClient: kubeClient, veleroClient: veleroClient, discoveryClient: veleroClient.Discovery(), dynamicClient: dynamicClient, - sharedInformerFactory: informers.NewSharedInformerFactoryWithOptions(veleroClient, 0, informers.WithNamespace(namespace)), + sharedInformerFactory: informers.NewSharedInformerFactoryWithOptions(veleroClient, 0, informers.WithNamespace(f.Namespace())), ctx: ctx, cancelFunc: cancelFunc, logger: logger, @@ -557,10 +537,12 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string backupSyncControllerRunInfo := func() controllerRunInfo { backupSyncContoller := controller.NewBackupSyncController( + s.veleroClient.VeleroV1(), s.veleroClient.VeleroV1(), s.veleroClient.VeleroV1(), s.sharedInformerFactory.Velero().V1().Backups(), s.sharedInformerFactory.Velero().V1().BackupStorageLocations(), + s.sharedInformerFactory.Velero().V1().PodVolumeBackups(), s.config.backupSyncPeriod, s.namespace, s.config.defaultBackupLocation, @@ -600,6 +582,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string s.sharedInformerFactory.Velero().V1().VolumeSnapshotLocations(), defaultVolumeSnapshotLocations, s.metrics, + s.config.formatFlag.Parse(), ) return controllerRunInfo{ @@ -690,6 +673,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string newPluginManager, s.config.defaultBackupLocation, s.metrics, + s.config.formatFlag.Parse(), ) return controllerRunInfo{ diff --git a/pkg/cmd/server/server_test.go b/pkg/cmd/server/server_test.go index 4b15965cffb..fd9cd24a394 100644 --- a/pkg/cmd/server/server_test.go +++ b/pkg/cmd/server/server_test.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" v1 "github.com/heptio/velero/pkg/apis/velero/v1" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) func TestVeleroResourcesExist(t *testing.T) { diff --git a/pkg/cmd/util/downloadrequest/downloadrequest.go b/pkg/cmd/util/downloadrequest/downloadrequest.go index c5eb61a232e..2cf129e8322 100644 --- a/pkg/cmd/util/downloadrequest/downloadrequest.go +++ b/pkg/cmd/util/downloadrequest/downloadrequest.go @@ -32,6 +32,10 @@ import ( velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" ) +// ErrNotFound is exported for external packages to check for when a file is +// not found +var ErrNotFound = errors.New("file not found") + func Stream(client velerov1client.DownloadRequestsGetter, namespace, name string, kind v1.DownloadTargetKind, w io.Writer, timeout time.Duration) error { req := &v1.DownloadRequest{ ObjectMeta: metav1.ObjectMeta{ @@ -97,7 +101,7 @@ Loop: } if req.Status.DownloadURL == "" { - return errors.New("file not found") + return ErrNotFound } httpClient := new(http.Client) @@ -124,6 +128,10 @@ Loop: return errors.Wrapf(err, "request failed: unable to decode response body") } + if resp.StatusCode == http.StatusNotFound { + return ErrNotFound + } + return errors.Errorf("request failed: %v", string(body)) } diff --git a/pkg/cmd/util/output/backup_describer.go b/pkg/cmd/util/output/backup_describer.go index 0bfdb2be5ce..c1fd8b1bfc7 100644 --- a/pkg/cmd/util/output/backup_describer.go +++ b/pkg/cmd/util/output/backup_describer.go @@ -233,6 +233,11 @@ func DescribeBackupStatus(d *Describer, backup *velerov1api.Backup, details bool d.Printf("Expiration:\t%s\n", status.Expiration.Time) d.Println() + if details { + describeBackupResourceList(d, backup, veleroClient) + d.Println() + } + if status.VolumeSnapshotsAttempted > 0 { if !details { d.Printf("Persistent Volumes:\t%d of %d snapshots completed successfully (specify --details for more information)\n", status.VolumeSnapshotsCompleted, status.VolumeSnapshotsAttempted) @@ -253,7 +258,7 @@ func DescribeBackupStatus(d *Describer, backup *velerov1api.Backup, details bool d.Printf("Persistent Volumes:\n") for _, snap := range snapshots { - printSnapshot(d, snap.Spec.PersistentVolumeName, snap.Status.ProviderSnapshotID, snap.Spec.VolumeType, snap.Spec.VolumeAZ, snap.Spec.VolumeIOPS) + describeSnapshot(d, snap.Spec.PersistentVolumeName, snap.Status.ProviderSnapshotID, snap.Spec.VolumeType, snap.Spec.VolumeAZ, snap.Spec.VolumeIOPS) } return } @@ -261,7 +266,38 @@ func DescribeBackupStatus(d *Describer, backup *velerov1api.Backup, details bool d.Printf("Persistent Volumes: \n") } -func printSnapshot(d *Describer, pvName, snapshotID, volumeType, volumeAZ string, iops *int64) { +func describeBackupResourceList(d *Describer, backup *velerov1api.Backup, veleroClient clientset.Interface) { + buf := new(bytes.Buffer) + if err := downloadrequest.Stream(veleroClient.VeleroV1(), backup.Namespace, backup.Name, velerov1api.DownloadTargetKindBackupResourceList, buf, downloadRequestTimeout); err != nil { + if err == downloadrequest.ErrNotFound { + d.Println("Resource List:\t") + } else { + d.Printf("Resource List:\t\n", err) + } + return + } + + var resourceList map[string][]string + if err := json.NewDecoder(buf).Decode(&resourceList); err != nil { + d.Printf("Resource List:\t\n", err) + return + } + + d.Println("Resource List:") + + // Sort GVKs in output + gvks := make([]string, 0, len(resourceList)) + for gvk := range resourceList { + gvks = append(gvks, gvk) + } + sort.Strings(gvks) + + for _, gvk := range gvks { + d.Printf("\t%s:\n\t\t- %s\n", gvk, strings.Join(resourceList[gvk], "\n\t\t- ")) + } +} + +func describeSnapshot(d *Describer, pvName, snapshotID, volumeType, volumeAZ string, iops *int64) { d.Printf("\t%s:\n", pvName) d.Printf("\t\tSnapshot ID:\t%s\n", snapshotID) d.Printf("\t\tType:\t%s\n", volumeType) diff --git a/pkg/cmd/util/output/backup_printer.go b/pkg/cmd/util/output/backup_printer.go index 336bb3bf394..1f20f3f4a61 100644 --- a/pkg/cmd/util/output/backup_printer.go +++ b/pkg/cmd/util/output/backup_printer.go @@ -18,12 +18,12 @@ package output import ( "fmt" - "io" "regexp" "sort" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/duration" "k8s.io/kubernetes/pkg/printers" @@ -31,18 +31,30 @@ import ( ) var ( - backupColumns = []string{"NAME", "STATUS", "CREATED", "EXPIRES", "STORAGE LOCATION", "SELECTOR"} + backupColumns = []metav1.TableColumnDefinition{ + // name needs Type and Format defined for the decorator to identify it: + // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 + {Name: "Name", Type: "string", Format: "name"}, + {Name: "Status"}, + {Name: "Created"}, + {Name: "Expires"}, + {Name: "Storage Location"}, + {Name: "Selector"}, + } ) -func printBackupList(list *velerov1api.BackupList, w io.Writer, options printers.PrintOptions) error { +func printBackupList(list *velerov1api.BackupList, options printers.PrintOptions) ([]metav1.TableRow, error) { sortBackupsByPrefixAndTimestamp(list) + rows := make([]metav1.TableRow, 0, len(list.Items)) for i := range list.Items { - if err := printBackup(&list.Items[i], w, options); err != nil { - return err + r, err := printBackup(&list.Items[i], options) + if err != nil { + return nil, err } + rows = append(rows, r...) } - return nil + return rows, nil } // sort by default alphabetically, but if backups stem from a common schedule @@ -71,13 +83,9 @@ func sortBackupsByPrefixAndTimestamp(list *velerov1api.BackupList) { }) } -func printBackup(backup *velerov1api.Backup, w io.Writer, options printers.PrintOptions) error { - name := printers.FormatResourceName(options.Kind, backup.Name, options.WithKind) - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", backup.Namespace); err != nil { - return err - } +func printBackup(backup *velerov1api.Backup, options printers.PrintOptions) ([]metav1.TableRow, error) { + row := metav1.TableRow{ + Object: runtime.RawExtension{Object: backup}, } expiration := backup.Status.Expiration.Time @@ -103,16 +111,9 @@ func printBackup(backup *velerov1api.Backup, w io.Writer, options printers.Print location := backup.Spec.StorageLocation - if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s", name, status, backup.Status.StartTimestamp.Time, humanReadableTimeFromNow(expiration), location, metav1.FormatLabelSelector(backup.Spec.LabelSelector)); err != nil { - return err - } - - if _, err := fmt.Fprint(w, printers.AppendLabels(backup.Labels, options.ColumnLabels)); err != nil { - return err - } + row.Cells = append(row.Cells, backup.Name, status, backup.Status.StartTimestamp.Time, humanReadableTimeFromNow(expiration), location, metav1.FormatLabelSelector(backup.Spec.LabelSelector)) - _, err := fmt.Fprint(w, printers.AppendAllLabels(options.ShowLabels, backup.Labels)) - return err + return []metav1.TableRow{row}, nil } func humanReadableTimeFromNow(when time.Time) string { diff --git a/pkg/cmd/util/output/backup_storage_location_printer.go b/pkg/cmd/util/output/backup_storage_location_printer.go index 47dd8555b77..3eb29d17adc 100644 --- a/pkg/cmd/util/output/backup_storage_location_printer.go +++ b/pkg/cmd/util/output/backup_storage_location_printer.go @@ -17,34 +17,40 @@ limitations under the License. package output import ( - "fmt" - "io" - + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/printers" v1 "github.com/heptio/velero/pkg/apis/velero/v1" ) var ( - backupStorageLocationColumns = []string{"NAME", "PROVIDER", "BUCKET/PREFIX", "ACCESS MODE"} + backupStorageLocationColumns = []metav1.TableColumnDefinition{ + // name needs Type and Format defined for the decorator to identify it: + // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 + {Name: "Name", Type: "string", Format: "name"}, + {Name: "Provider"}, + {Name: "Bucket/Prefix"}, + {Name: "Access Mode"}, + } ) -func printBackupStorageLocationList(list *v1.BackupStorageLocationList, w io.Writer, options printers.PrintOptions) error { +func printBackupStorageLocationList(list *v1.BackupStorageLocationList, options printers.PrintOptions) ([]metav1.TableRow, error) { + rows := make([]metav1.TableRow, 0, len(list.Items)) + for i := range list.Items { - if err := printBackupStorageLocation(&list.Items[i], w, options); err != nil { - return err + r, err := printBackupStorageLocation(&list.Items[i], options) + if err != nil { + return nil, err } + rows = append(rows, r...) } - return nil + return rows, nil } -func printBackupStorageLocation(location *v1.BackupStorageLocation, w io.Writer, options printers.PrintOptions) error { - name := printers.FormatResourceName(options.Kind, location.Name, options.WithKind) - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", location.Namespace); err != nil { - return err - } +func printBackupStorageLocation(location *v1.BackupStorageLocation, options printers.PrintOptions) ([]metav1.TableRow, error) { + row := metav1.TableRow{ + Object: runtime.RawExtension{Object: location}, } bucketAndPrefix := location.Spec.ObjectStorage.Bucket @@ -57,21 +63,12 @@ func printBackupStorageLocation(location *v1.BackupStorageLocation, w io.Writer, accessMode = v1.BackupStorageLocationAccessModeReadWrite } - if _, err := fmt.Fprintf( - w, - "%s\t%s\t%s\t%s", - name, + row.Cells = append(row.Cells, + location.Name, location.Spec.Provider, bucketAndPrefix, accessMode, - ); err != nil { - return err - } - - if _, err := fmt.Fprint(w, printers.AppendLabels(location.Labels, options.ColumnLabels)); err != nil { - return err - } + ) - _, err := fmt.Fprint(w, printers.AppendAllLabels(options.ShowLabels, location.Labels)) - return err + return []metav1.TableRow{row}, nil } diff --git a/pkg/cmd/util/output/output.go b/pkg/cmd/util/output/output.go index ff6d6fe3db7..0a1f2f70f71 100644 --- a/pkg/cmd/util/output/output.go +++ b/pkg/cmd/util/output/output.go @@ -146,19 +146,19 @@ func printTable(cmd *cobra.Command, obj runtime.Object) (bool, error) { return false, err } - printer.Handler(backupColumns, nil, printBackup) - printer.Handler(backupColumns, nil, printBackupList) - printer.Handler(restoreColumns, nil, printRestore) - printer.Handler(restoreColumns, nil, printRestoreList) - printer.Handler(scheduleColumns, nil, printSchedule) - printer.Handler(scheduleColumns, nil, printScheduleList) - printer.Handler(resticRepoColumns, nil, printResticRepo) - printer.Handler(resticRepoColumns, nil, printResticRepoList) - printer.Handler(backupStorageLocationColumns, nil, printBackupStorageLocation) - printer.Handler(backupStorageLocationColumns, nil, printBackupStorageLocationList) - printer.Handler(volumeSnapshotLocationColumns, nil, printVolumeSnapshotLocation) - printer.Handler(volumeSnapshotLocationColumns, nil, printVolumeSnapshotLocationList) - printer.Handler(pluginColumns, nil, printPluginList) + printer.TableHandler(backupColumns, printBackup) + printer.TableHandler(backupColumns, printBackupList) + printer.TableHandler(restoreColumns, printRestore) + printer.TableHandler(restoreColumns, printRestoreList) + printer.TableHandler(scheduleColumns, printSchedule) + printer.TableHandler(scheduleColumns, printScheduleList) + printer.TableHandler(resticRepoColumns, printResticRepo) + printer.TableHandler(resticRepoColumns, printResticRepoList) + printer.TableHandler(backupStorageLocationColumns, printBackupStorageLocation) + printer.TableHandler(backupStorageLocationColumns, printBackupStorageLocationList) + printer.TableHandler(volumeSnapshotLocationColumns, printVolumeSnapshotLocation) + printer.TableHandler(volumeSnapshotLocationColumns, printVolumeSnapshotLocationList) + printer.TableHandler(pluginColumns, printPluginList) err = printer.PrintObj(obj, os.Stdout) if err != nil { @@ -172,15 +172,11 @@ func printTable(cmd *cobra.Command, obj runtime.Object) (bool, error) { // Velero objects. func NewPrinter(cmd *cobra.Command) (*printers.HumanReadablePrinter, error) { options := printers.PrintOptions{ - NoHeaders: flag.GetOptionalBoolFlag(cmd, "no-headers"), ShowLabels: GetShowLabelsValue(cmd), ColumnLabels: GetLabelColumnsValues(cmd), } - printer := printers.NewHumanReadablePrinter( - nil, // decoder, only needed if we want/need to convert unstructured/unknown to typed objects - options, - ) + printer := printers.NewTablePrinter(options) return printer, nil } diff --git a/pkg/cmd/util/output/plugin_printer.go b/pkg/cmd/util/output/plugin_printer.go index dfb8c54ddc8..1e3e951c6d0 100644 --- a/pkg/cmd/util/output/plugin_printer.go +++ b/pkg/cmd/util/output/plugin_printer.go @@ -17,29 +17,37 @@ limitations under the License. package output import ( - "fmt" - "io" "sort" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/printers" velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" ) var ( - pluginColumns = []string{"NAME", "KIND"} + pluginColumns = []metav1.TableColumnDefinition{ + // name needs Type and Format defined for the decorator to identify it: + // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 + {Name: "Name", Type: "string", Format: "name"}, + {Name: "Kind"}, + } ) -func printPluginList(list *velerov1api.ServerStatusRequest, w io.Writer, options printers.PrintOptions) error { +func printPluginList(list *velerov1api.ServerStatusRequest, options printers.PrintOptions) ([]metav1.TableRow, error) { plugins := list.Status.Plugins sortByKindAndName(plugins) + rows := make([]metav1.TableRow, 0, len(plugins)) + for _, plugin := range plugins { - if err := printPlugin(plugin, w, options); err != nil { - return err + r, err := printPlugin(plugin, options) + if err != nil { + return nil, err } + rows = append(rows, r...) } - return nil + return rows, nil } func sortByKindAndName(plugins []velerov1api.PluginInfo) { @@ -51,12 +59,10 @@ func sortByKindAndName(plugins []velerov1api.PluginInfo) { }) } -func printPlugin(plugin velerov1api.PluginInfo, w io.Writer, options printers.PrintOptions) error { - name := printers.FormatResourceName(options.Kind, plugin.Name, options.WithKind) +func printPlugin(plugin velerov1api.PluginInfo, options printers.PrintOptions) ([]metav1.TableRow, error) { + row := metav1.TableRow{} - if _, err := fmt.Fprintf(w, "%s\t%s\n", name, plugin.Kind); err != nil { - return err - } + row.Cells = append(row.Cells, plugin.Name, plugin.Kind) - return nil + return []metav1.TableRow{row}, nil } diff --git a/pkg/cmd/util/output/restic_repo_printer.go b/pkg/cmd/util/output/restic_repo_printer.go index 42efec2aab8..20388e18c69 100644 --- a/pkg/cmd/util/output/restic_repo_printer.go +++ b/pkg/cmd/util/output/restic_repo_printer.go @@ -17,34 +17,39 @@ limitations under the License. package output import ( - "fmt" - "io" - + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/printers" v1 "github.com/heptio/velero/pkg/apis/velero/v1" ) var ( - resticRepoColumns = []string{"NAME", "STATUS", "LAST MAINTENANCE"} + resticRepoColumns = []metav1.TableColumnDefinition{ + // name needs Type and Format defined for the decorator to identify it: + // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 + {Name: "Name", Type: "string", Format: "name"}, + {Name: "Status"}, + {Name: "Last Maintenance"}, + } ) -func printResticRepoList(list *v1.ResticRepositoryList, w io.Writer, options printers.PrintOptions) error { +func printResticRepoList(list *v1.ResticRepositoryList, options printers.PrintOptions) ([]metav1.TableRow, error) { + rows := make([]metav1.TableRow, 0, len(list.Items)) + for i := range list.Items { - if err := printResticRepo(&list.Items[i], w, options); err != nil { - return err + r, err := printResticRepo(&list.Items[i], options) + if err != nil { + return nil, err } + rows = append(rows, r...) } - return nil + return rows, nil } -func printResticRepo(repo *v1.ResticRepository, w io.Writer, options printers.PrintOptions) error { - name := printers.FormatResourceName(options.Kind, repo.Name, options.WithKind) - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", repo.Namespace); err != nil { - return err - } +func printResticRepo(repo *v1.ResticRepository, options printers.PrintOptions) ([]metav1.TableRow, error) { + row := metav1.TableRow{ + Object: runtime.RawExtension{Object: repo}, } status := repo.Status.Phase @@ -57,20 +62,11 @@ func printResticRepo(repo *v1.ResticRepository, w io.Writer, options printers.Pr lastMaintenance = "" } - if _, err := fmt.Fprintf( - w, - "%s\t%s\t%s", - name, + row.Cells = append(row.Cells, + repo.Name, status, lastMaintenance, - ); err != nil { - return err - } - - if _, err := fmt.Fprint(w, printers.AppendLabels(repo.Labels, options.ColumnLabels)); err != nil { - return err - } + ) - _, err := fmt.Fprint(w, printers.AppendAllLabels(options.ShowLabels, repo.Labels)) - return err + return []metav1.TableRow{row}, nil } diff --git a/pkg/cmd/util/output/restore_printer.go b/pkg/cmd/util/output/restore_printer.go index ff158586a3b..37a4fd1baf4 100644 --- a/pkg/cmd/util/output/restore_printer.go +++ b/pkg/cmd/util/output/restore_printer.go @@ -17,35 +17,43 @@ limitations under the License. package output import ( - "fmt" - "io" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/printers" v1 "github.com/heptio/velero/pkg/apis/velero/v1" ) var ( - restoreColumns = []string{"NAME", "BACKUP", "STATUS", "WARNINGS", "ERRORS", "CREATED", "SELECTOR"} + restoreColumns = []metav1.TableColumnDefinition{ + // name needs Type and Format defined for the decorator to identify it: + // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 + {Name: "Name", Type: "string", Format: "name"}, + {Name: "Backup"}, + {Name: "Status"}, + {Name: "Warnings"}, + {Name: "Errors"}, + {Name: "Created"}, + {Name: "Selector"}, + } ) -func printRestoreList(list *v1.RestoreList, w io.Writer, options printers.PrintOptions) error { +func printRestoreList(list *v1.RestoreList, options printers.PrintOptions) ([]metav1.TableRow, error) { + rows := make([]metav1.TableRow, 0, len(list.Items)) + for i := range list.Items { - if err := printRestore(&list.Items[i], w, options); err != nil { - return err + r, err := printRestore(&list.Items[i], options) + if err != nil { + return nil, err } + rows = append(rows, r...) } - return nil + return rows, nil } -func printRestore(restore *v1.Restore, w io.Writer, options printers.PrintOptions) error { - name := printers.FormatResourceName(options.Kind, restore.Name, options.WithKind) - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", restore.Namespace); err != nil { - return err - } +func printRestore(restore *v1.Restore, options printers.PrintOptions) ([]metav1.TableRow, error) { + row := metav1.TableRow{ + Object: runtime.RawExtension{Object: restore}, } status := restore.Status.Phase @@ -53,24 +61,15 @@ func printRestore(restore *v1.Restore, w io.Writer, options printers.PrintOption status = v1.RestorePhaseNew } - if _, err := fmt.Fprintf( - w, - "%s\t%s\t%s\t%d\t%d\t%s\t%s", - name, + row.Cells = append(row.Cells, + restore.Name, restore.Spec.BackupName, status, restore.Status.Warnings, restore.Status.Errors, restore.CreationTimestamp.Time, metav1.FormatLabelSelector(restore.Spec.LabelSelector), - ); err != nil { - return err - } - - if _, err := fmt.Fprint(w, printers.AppendLabels(restore.Labels, options.ColumnLabels)); err != nil { - return err - } + ) - _, err := fmt.Fprint(w, printers.AppendAllLabels(options.ShowLabels, restore.Labels)) - return err + return []metav1.TableRow{row}, nil } diff --git a/pkg/cmd/util/output/schedule_printer.go b/pkg/cmd/util/output/schedule_printer.go index ce516684aa0..b2514b081a0 100644 --- a/pkg/cmd/util/output/schedule_printer.go +++ b/pkg/cmd/util/output/schedule_printer.go @@ -17,35 +17,43 @@ limitations under the License. package output import ( - "fmt" - "io" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/printers" v1 "github.com/heptio/velero/pkg/apis/velero/v1" ) var ( - scheduleColumns = []string{"NAME", "STATUS", "CREATED", "SCHEDULE", "BACKUP TTL", "LAST BACKUP", "SELECTOR"} + scheduleColumns = []metav1.TableColumnDefinition{ + // name needs Type and Format defined for the decorator to identify it: + // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 + {Name: "Name", Type: "string", Format: "name"}, + {Name: "Status"}, + {Name: "Created"}, + {Name: "Schedule"}, + {Name: "Backup TTL"}, + {Name: "Last Backup"}, + {Name: "Selector"}, + } ) -func printScheduleList(list *v1.ScheduleList, w io.Writer, options printers.PrintOptions) error { +func printScheduleList(list *v1.ScheduleList, options printers.PrintOptions) ([]metav1.TableRow, error) { + rows := make([]metav1.TableRow, 0, len(list.Items)) + for i := range list.Items { - if err := printSchedule(&list.Items[i], w, options); err != nil { - return err + r, err := printSchedule(&list.Items[i], options) + if err != nil { + return nil, err } + rows = append(rows, r...) } - return nil + return rows, nil } -func printSchedule(schedule *v1.Schedule, w io.Writer, options printers.PrintOptions) error { - name := printers.FormatResourceName(options.Kind, schedule.Name, options.WithKind) - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", schedule.Namespace); err != nil { - return err - } +func printSchedule(schedule *v1.Schedule, options printers.PrintOptions) ([]metav1.TableRow, error) { + row := metav1.TableRow{ + Object: runtime.RawExtension{Object: schedule}, } status := schedule.Status.Phase @@ -53,10 +61,8 @@ func printSchedule(schedule *v1.Schedule, w io.Writer, options printers.PrintOpt status = v1.SchedulePhaseNew } - _, err := fmt.Fprintf( - w, - "%s\t%s\t%s\t%s\t%s\t%s\t%s", - name, + row.Cells = append(row.Cells, + schedule.Name, status, schedule.CreationTimestamp.Time, schedule.Spec.Schedule, @@ -65,14 +71,5 @@ func printSchedule(schedule *v1.Schedule, w io.Writer, options printers.PrintOpt metav1.FormatLabelSelector(schedule.Spec.Template.LabelSelector), ) - if err != nil { - return err - } - - if _, err := fmt.Fprint(w, printers.AppendLabels(schedule.Labels, options.ColumnLabels)); err != nil { - return err - } - - _, err = fmt.Fprint(w, printers.AppendAllLabels(options.ShowLabels, schedule.Labels)) - return err + return []metav1.TableRow{row}, nil } diff --git a/pkg/cmd/util/output/volume_snapshot_location_printer.go b/pkg/cmd/util/output/volume_snapshot_location_printer.go index 05afae8e2be..c57e202d12a 100644 --- a/pkg/cmd/util/output/volume_snapshot_location_printer.go +++ b/pkg/cmd/util/output/volume_snapshot_location_printer.go @@ -17,49 +17,44 @@ limitations under the License. package output import ( - "fmt" - "io" - + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/printers" v1 "github.com/heptio/velero/pkg/apis/velero/v1" ) var ( - volumeSnapshotLocationColumns = []string{"NAME", "PROVIDER"} + volumeSnapshotLocationColumns = []metav1.TableColumnDefinition{ + // name needs Type and Format defined for the decorator to identify it: + // https://github.com/kubernetes/kubernetes/blob/v1.15.3/pkg/printers/tableprinter.go#L204 + {Name: "Name", Type: "string", Format: "name"}, + {Name: "Provider"}, + } ) -func printVolumeSnapshotLocationList(list *v1.VolumeSnapshotLocationList, w io.Writer, options printers.PrintOptions) error { +func printVolumeSnapshotLocationList(list *v1.VolumeSnapshotLocationList, options printers.PrintOptions) ([]metav1.TableRow, error) { + rows := make([]metav1.TableRow, 0, len(list.Items)) + for i := range list.Items { - if err := printVolumeSnapshotLocation(&list.Items[i], w, options); err != nil { - return err + r, err := printVolumeSnapshotLocation(&list.Items[i], options) + if err != nil { + return nil, err } + rows = append(rows, r...) } - return nil + return rows, nil } -func printVolumeSnapshotLocation(location *v1.VolumeSnapshotLocation, w io.Writer, options printers.PrintOptions) error { - name := printers.FormatResourceName(options.Kind, location.Name, options.WithKind) - - if options.WithNamespace { - if _, err := fmt.Fprintf(w, "%s\t", location.Namespace); err != nil { - return err - } +func printVolumeSnapshotLocation(location *v1.VolumeSnapshotLocation, options printers.PrintOptions) ([]metav1.TableRow, error) { + row := metav1.TableRow{ + Object: runtime.RawExtension{Object: location}, } - if _, err := fmt.Fprintf( - w, - "%s\t%s", - name, + row.Cells = append(row.Cells, + location.Name, location.Spec.Provider, - ); err != nil { - return err - } - - if _, err := fmt.Fprint(w, printers.AppendLabels(location.Labels, options.ColumnLabels)); err != nil { - return err - } + ) - _, err := fmt.Fprint(w, printers.AppendAllLabels(options.ShowLabels, location.Labels)) - return err + return []metav1.TableRow{row}, nil } diff --git a/pkg/cmd/velero/velero.go b/pkg/cmd/velero/velero.go index 1b364b8c705..73decbbbd06 100644 --- a/pkg/cmd/velero/velero.go +++ b/pkg/cmd/velero/velero.go @@ -63,7 +63,7 @@ operations can also be performed as 'velero backup get' and 'velero schedule cre backup.NewCommand(f), schedule.NewCommand(f), restore.NewCommand(f), - server.NewCommand(), + server.NewCommand(f), version.NewCommand(f), get.NewCommand(f), install.NewCommand(f), diff --git a/pkg/controller/backup_controller.go b/pkg/controller/backup_controller.go index def1dac135a..e9b49b83c50 100644 --- a/pkg/controller/backup_controller.go +++ b/pkg/controller/backup_controller.go @@ -70,6 +70,7 @@ type backupController struct { defaultSnapshotLocations map[string]string metrics *metrics.ServerMetrics newBackupStore func(*velerov1api.BackupStorageLocation, persistence.ObjectStoreGetter, logrus.FieldLogger) (persistence.BackupStore, error) + formatFlag logging.Format } func NewBackupController( @@ -86,6 +87,7 @@ func NewBackupController( volumeSnapshotLocationInformer informers.VolumeSnapshotLocationInformer, defaultSnapshotLocations map[string]string, metrics *metrics.ServerMetrics, + formatFlag logging.Format, ) Interface { c := &backupController{ genericController: newGenericController("backup", logger), @@ -102,6 +104,7 @@ func NewBackupController( snapshotLocationLister: volumeSnapshotLocationInformer.Lister(), defaultSnapshotLocations: defaultSnapshotLocations, metrics: metrics, + formatFlag: formatFlag, newBackupStore: persistence.NewObjectBackupStore, } @@ -448,7 +451,7 @@ func (c *backupController) runBackup(backup *pkgbackup.Request) error { // Log the backup to both a backup log file and to stdout. This will help see what happened if the upload of the // backup log failed for whatever reason. - logger := logging.DefaultLogger(c.backupLogLevel) + logger := logging.DefaultLogger(c.backupLogLevel, c.formatFlag) logger.Out = io.MultiWriter(os.Stdout, gzippedLogFile) logCounter := logging.NewLogCounterHook() @@ -567,7 +570,6 @@ func persistBackup(backup *pkgbackup.Request, backupContents, backupLog *os.File volumeSnapshots := new(bytes.Buffer) gzw := gzip.NewWriter(volumeSnapshots) - defer gzw.Close() if err := json.NewEncoder(gzw).Encode(backup.VolumeSnapshots); err != nil { errs = append(errs, errors.Wrap(err, "error encoding list of volume snapshots")) @@ -576,14 +578,44 @@ func persistBackup(backup *pkgbackup.Request, backupContents, backupLog *os.File errs = append(errs, errors.Wrap(err, "error closing gzip writer")) } + podVolumeBackups := new(bytes.Buffer) + gzw = gzip.NewWriter(podVolumeBackups) + + if err := json.NewEncoder(gzw).Encode(backup.PodVolumeBackups); err != nil { + errs = append(errs, errors.Wrap(err, "error encoding pod volume backups")) + } + if err := gzw.Close(); err != nil { + errs = append(errs, errors.Wrap(err, "error closing gzip writer")) + } + + backupResourceList := new(bytes.Buffer) + gzw = gzip.NewWriter(backupResourceList) + + if err := json.NewEncoder(gzw).Encode(backup.BackupResourceList()); err != nil { + errs = append(errs, errors.Wrap(err, "error encoding backup resource list")) + } + if err := gzw.Close(); err != nil { + errs = append(errs, errors.Wrap(err, "error closing gzip writer")) + } + if len(errs) > 0 { // Don't upload the JSON files or backup tarball if encoding to json fails. backupJSON = nil backupContents = nil volumeSnapshots = nil + backupResourceList = nil } - if err := backupStore.PutBackup(backup.Name, backupJSON, backupContents, backupLog, volumeSnapshots); err != nil { + backupInfo := persistence.BackupInfo{ + Name: backup.Name, + Metadata: backupJSON, + Contents: backupContents, + Log: backupLog, + PodVolumeBackups: podVolumeBackups, + VolumeSnapshots: volumeSnapshots, + BackupResourceList: backupResourceList, + } + if err := backupStore.PutBackup(backupInfo); err != nil { errs = append(errs, err) } diff --git a/pkg/controller/backup_controller_test.go b/pkg/controller/backup_controller_test.go index d032ea447aa..7a53d8d2cb1 100644 --- a/pkg/controller/backup_controller_test.go +++ b/pkg/controller/backup_controller_test.go @@ -33,9 +33,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/clock" - v1 "github.com/heptio/velero/pkg/apis/velero/v1" velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" pkgbackup "github.com/heptio/velero/pkg/backup" + "github.com/heptio/velero/pkg/builder" "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" informers "github.com/heptio/velero/pkg/generated/informers/externalversions" "github.com/heptio/velero/pkg/metrics" @@ -45,7 +45,6 @@ import ( pluginmocks "github.com/heptio/velero/pkg/plugin/mocks" "github.com/heptio/velero/pkg/plugin/velero" "github.com/heptio/velero/pkg/util/logging" - velerotest "github.com/heptio/velero/pkg/util/test" ) type fakeBackupper struct { @@ -57,11 +56,15 @@ func (b *fakeBackupper) Backup(logger logrus.FieldLogger, backup *pkgbackup.Requ return args.Error(0) } +func defaultBackup() *builder.BackupBuilder { + return builder.ForBackup(velerov1api.DefaultNamespace, "backup-1") +} + func TestProcessBackupNonProcessedItems(t *testing.T) { tests := []struct { name string key string - backup *v1.Backup + backup *velerov1api.Backup }{ { name: "bad key does not return error", @@ -74,35 +77,37 @@ func TestProcessBackupNonProcessedItems(t *testing.T) { { name: "FailedValidation backup is not processed", key: "velero/backup-1", - backup: velerotest.NewTestBackup().WithName("backup-1").WithPhase(v1.BackupPhaseFailedValidation).Backup, + backup: defaultBackup().Phase(velerov1api.BackupPhaseFailedValidation).Result(), }, { name: "InProgress backup is not processed", key: "velero/backup-1", - backup: velerotest.NewTestBackup().WithName("backup-1").WithPhase(v1.BackupPhaseInProgress).Backup, + backup: defaultBackup().Phase(velerov1api.BackupPhaseInProgress).Result(), }, { name: "Completed backup is not processed", key: "velero/backup-1", - backup: velerotest.NewTestBackup().WithName("backup-1").WithPhase(v1.BackupPhaseCompleted).Backup, + backup: defaultBackup().Phase(velerov1api.BackupPhaseCompleted).Result(), }, { name: "Failed backup is not processed", key: "velero/backup-1", - backup: velerotest.NewTestBackup().WithName("backup-1").WithPhase(v1.BackupPhaseFailed).Backup, + backup: defaultBackup().Phase(velerov1api.BackupPhaseFailed).Result(), }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { + formatFlag := logging.FormatText var ( sharedInformers = informers.NewSharedInformerFactory(fake.NewSimpleClientset(), 0) - logger = logging.DefaultLogger(logrus.DebugLevel) + logger = logging.DefaultLogger(logrus.DebugLevel, formatFlag) ) c := &backupController{ genericController: newGenericController("backup-test", logger), lister: sharedInformers.Velero().V1().Backups().Lister(), + formatFlag: formatFlag, } if test.backup != nil { @@ -121,45 +126,46 @@ func TestProcessBackupNonProcessedItems(t *testing.T) { } func TestProcessBackupValidationFailures(t *testing.T) { - defaultBackupLocation := velerotest.NewTestBackupStorageLocation().WithName("loc-1").BackupStorageLocation + defaultBackupLocation := builder.ForBackupStorageLocation("velero", "loc-1").Result() tests := []struct { name string - backup *v1.Backup - backupLocation *v1.BackupStorageLocation + backup *velerov1api.Backup + backupLocation *velerov1api.BackupStorageLocation expectedErrs []string }{ { name: "invalid included/excluded resources fails validation", - backup: velerotest.NewTestBackup().WithName("backup-1").WithIncludedResources("foo").WithExcludedResources("foo").Backup, + backup: defaultBackup().IncludedResources("foo").ExcludedResources("foo").Result(), backupLocation: defaultBackupLocation, expectedErrs: []string{"Invalid included/excluded resource lists: excludes list cannot contain an item in the includes list: foo"}, }, { name: "invalid included/excluded namespaces fails validation", - backup: velerotest.NewTestBackup().WithName("backup-1").WithIncludedNamespaces("foo").WithExcludedNamespaces("foo").Backup, + backup: defaultBackup().IncludedNamespaces("foo").ExcludedNamespaces("foo").Result(), backupLocation: defaultBackupLocation, expectedErrs: []string{"Invalid included/excluded namespace lists: excludes list cannot contain an item in the includes list: foo"}, }, { name: "non-existent backup location fails validation", - backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("nonexistent").Backup, + backup: defaultBackup().StorageLocation("nonexistent").Result(), expectedErrs: []string{"a BackupStorageLocation CRD with the name specified in the backup spec needs to be created before this backup can be executed. Error: backupstoragelocation.velero.io \"nonexistent\" not found"}, }, { name: "backup for read-only backup location fails validation", - backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("read-only").Backup, - backupLocation: velerotest.NewTestBackupStorageLocation().WithName("read-only").WithAccessMode(velerov1api.BackupStorageLocationAccessModeReadOnly).BackupStorageLocation, + backup: defaultBackup().StorageLocation("read-only").Result(), + backupLocation: builder.ForBackupStorageLocation("velero", "read-only").AccessMode(velerov1api.BackupStorageLocationAccessModeReadOnly).Result(), expectedErrs: []string{"backup can't be created because backup storage location read-only is currently in read-only mode"}, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { + formatFlag := logging.FormatText var ( clientset = fake.NewSimpleClientset(test.backup) sharedInformers = informers.NewSharedInformerFactory(clientset, 0) - logger = logging.DefaultLogger(logrus.DebugLevel) + logger = logging.DefaultLogger(logrus.DebugLevel, formatFlag) ) c := &backupController{ @@ -170,6 +176,7 @@ func TestProcessBackupValidationFailures(t *testing.T) { snapshotLocationLister: sharedInformers.Velero().V1().VolumeSnapshotLocations().Lister(), defaultBackupLocation: defaultBackupLocation.Name, clock: &clock.RealClock{}, + formatFlag: formatFlag, } require.NotNil(t, test.backup) @@ -187,7 +194,7 @@ func TestProcessBackupValidationFailures(t *testing.T) { res, err := clientset.VeleroV1().Backups(test.backup.Namespace).Get(test.backup.Name, metav1.GetOptions{}) require.NoError(t, err) - assert.Equal(t, v1.BackupPhaseFailedValidation, res.Status.Phase) + assert.Equal(t, velerov1api.BackupPhaseFailedValidation, res.Status.Phase) assert.Equal(t, test.expectedErrs, res.Status.ValidationErrors) // Any backup that would actually proceed to processing will cause a segfault because this @@ -201,31 +208,32 @@ func TestProcessBackupValidationFailures(t *testing.T) { func TestBackupLocationLabel(t *testing.T) { tests := []struct { name string - backup *v1.Backup - backupLocation *v1.BackupStorageLocation + backup *velerov1api.Backup + backupLocation *velerov1api.BackupStorageLocation expectedBackupLocation string }{ { name: "valid backup location name should be used as a label", - backup: velerotest.NewTestBackup().WithName("backup-1").Backup, - backupLocation: velerotest.NewTestBackupStorageLocation().WithName("loc-1").BackupStorageLocation, + backup: defaultBackup().Result(), + backupLocation: builder.ForBackupStorageLocation("velero", "loc-1").Result(), expectedBackupLocation: "loc-1", }, { - name: "invalid storage location name should be handled while creating label", - backup: velerotest.NewTestBackup().WithName("backup-1").Backup, - backupLocation: velerotest.NewTestBackupStorageLocation(). - WithName("defaultdefaultdefaultdefaultdefaultdefaultdefaultdefaultdefaultdefault").BackupStorageLocation, + name: "invalid storage location name should be handled while creating label", + backup: defaultBackup().Result(), + backupLocation: builder.ForBackupStorageLocation("velero", "defaultdefaultdefaultdefaultdefaultdefaultdefaultdefaultdefaultdefault").Result(), expectedBackupLocation: "defaultdefaultdefaultdefaultdefaultdefaultdefaultdefaultd58343f", }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { + formatFlag := logging.FormatText + var ( clientset = fake.NewSimpleClientset(test.backup) sharedInformers = informers.NewSharedInformerFactory(clientset, 0) - logger = logging.DefaultLogger(logrus.DebugLevel) + logger = logging.DefaultLogger(logrus.DebugLevel, formatFlag) ) c := &backupController{ @@ -236,6 +244,7 @@ func TestBackupLocationLabel(t *testing.T) { snapshotLocationLister: sharedInformers.Velero().V1().VolumeSnapshotLocations().Lister(), defaultBackupLocation: test.backupLocation.Name, clock: &clock.RealClock{}, + formatFlag: formatFlag, } res := c.prepareBackupRequest(test.backup) @@ -246,7 +255,6 @@ func TestBackupLocationLabel(t *testing.T) { } func TestDefaultBackupTTL(t *testing.T) { - var ( defaultBackupTTL = metav1.Duration{Duration: 24 * 30 * time.Hour} ) @@ -257,29 +265,30 @@ func TestDefaultBackupTTL(t *testing.T) { tests := []struct { name string - backup *v1.Backup - backupLocation *v1.BackupStorageLocation + backup *velerov1api.Backup + backupLocation *velerov1api.BackupStorageLocation expectedTTL metav1.Duration expectedExpiration metav1.Time }{ { name: "backup with no TTL specified", - backup: velerotest.NewTestBackup().WithName("backup-1").Backup, + backup: defaultBackup().Result(), expectedTTL: defaultBackupTTL, expectedExpiration: metav1.NewTime(now.Add(defaultBackupTTL.Duration)), }, { name: "backup with TTL specified", - backup: velerotest.NewTestBackup().WithName("backup-1").WithTTL(1 * time.Hour).Backup, + backup: defaultBackup().TTL(time.Hour).Result(), expectedTTL: metav1.Duration{Duration: 1 * time.Hour}, expectedExpiration: metav1.NewTime(now.Add(1 * time.Hour)), }, } for _, test := range tests { + formatFlag := logging.FormatText var ( clientset = fake.NewSimpleClientset(test.backup) - logger = logging.DefaultLogger(logrus.DebugLevel) + logger = logging.DefaultLogger(logrus.DebugLevel, formatFlag) sharedInformers = informers.NewSharedInformerFactory(clientset, 0) ) @@ -290,6 +299,7 @@ func TestDefaultBackupTTL(t *testing.T) { snapshotLocationLister: sharedInformers.Velero().V1().VolumeSnapshotLocations().Lister(), defaultBackupTTL: defaultBackupTTL.Duration, clock: clock.NewFakeClock(now), + formatFlag: formatFlag, } res := c.prepareBackupRequest(test.backup) @@ -301,7 +311,7 @@ func TestDefaultBackupTTL(t *testing.T) { } func TestProcessBackupCompletions(t *testing.T) { - defaultBackupLocation := velerotest.NewTestBackupStorageLocation().WithName("loc-1").WithObjectStorage("store-1").BackupStorageLocation + defaultBackupLocation := builder.ForBackupStorageLocation("velero", "loc-1").Bucket("store-1").Result() now, err := time.Parse(time.RFC1123Z, time.RFC1123Z) require.NoError(t, err) @@ -309,30 +319,34 @@ func TestProcessBackupCompletions(t *testing.T) { tests := []struct { name string - backup *v1.Backup - backupLocation *v1.BackupStorageLocation - expectedResult *v1.Backup + backup *velerov1api.Backup + backupLocation *velerov1api.BackupStorageLocation + expectedResult *velerov1api.Backup backupExists bool existenceCheckError error }{ // Completed { name: "backup with no backup location gets the default", - backup: velerotest.NewTestBackup().WithName("backup-1").Backup, + backup: defaultBackup().Result(), backupLocation: defaultBackupLocation, - expectedResult: &v1.Backup{ + expectedResult: &velerov1api.Backup{ + TypeMeta: metav1.TypeMeta{ + Kind: "Backup", + APIVersion: "velero.io/v1", + }, ObjectMeta: metav1.ObjectMeta{ - Namespace: v1.DefaultNamespace, + Namespace: velerov1api.DefaultNamespace, Name: "backup-1", Labels: map[string]string{ "velero.io/storage-location": "loc-1", }, }, - Spec: v1.BackupSpec{ + Spec: velerov1api.BackupSpec{ StorageLocation: defaultBackupLocation.Name, }, - Status: v1.BackupStatus{ - Phase: v1.BackupPhaseCompleted, + Status: velerov1api.BackupStatus{ + Phase: velerov1api.BackupPhaseCompleted, Version: 1, StartTimestamp: metav1.NewTime(now), CompletionTimestamp: metav1.NewTime(now), @@ -342,21 +356,25 @@ func TestProcessBackupCompletions(t *testing.T) { }, { name: "backup with a specific backup location keeps it", - backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("alt-loc").Backup, - backupLocation: velerotest.NewTestBackupStorageLocation().WithName("alt-loc").WithObjectStorage("store-1").BackupStorageLocation, - expectedResult: &v1.Backup{ + backup: defaultBackup().StorageLocation("alt-loc").Result(), + backupLocation: builder.ForBackupStorageLocation("velero", "alt-loc").Bucket("store-1").Result(), + expectedResult: &velerov1api.Backup{ + TypeMeta: metav1.TypeMeta{ + Kind: "Backup", + APIVersion: "velero.io/v1", + }, ObjectMeta: metav1.ObjectMeta{ - Namespace: v1.DefaultNamespace, + Namespace: velerov1api.DefaultNamespace, Name: "backup-1", Labels: map[string]string{ "velero.io/storage-location": "alt-loc", }, }, - Spec: v1.BackupSpec{ + Spec: velerov1api.BackupSpec{ StorageLocation: "alt-loc", }, - Status: v1.BackupStatus{ - Phase: v1.BackupPhaseCompleted, + Status: velerov1api.BackupStatus{ + Phase: velerov1api.BackupPhaseCompleted, Version: 1, StartTimestamp: metav1.NewTime(now), CompletionTimestamp: metav1.NewTime(now), @@ -366,25 +384,28 @@ func TestProcessBackupCompletions(t *testing.T) { }, { name: "backup for a location with ReadWrite access mode gets processed", - backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("read-write").Backup, - backupLocation: velerotest.NewTestBackupStorageLocation(). - WithName("read-write"). - WithObjectStorage("store-1"). - WithAccessMode(v1.BackupStorageLocationAccessModeReadWrite). - BackupStorageLocation, - expectedResult: &v1.Backup{ + backup: defaultBackup().StorageLocation("read-write").Result(), + backupLocation: builder.ForBackupStorageLocation("velero", "read-write"). + Bucket("store-1"). + AccessMode(velerov1api.BackupStorageLocationAccessModeReadWrite). + Result(), + expectedResult: &velerov1api.Backup{ + TypeMeta: metav1.TypeMeta{ + Kind: "Backup", + APIVersion: "velero.io/v1", + }, ObjectMeta: metav1.ObjectMeta{ - Namespace: v1.DefaultNamespace, + Namespace: velerov1api.DefaultNamespace, Name: "backup-1", Labels: map[string]string{ "velero.io/storage-location": "read-write", }, }, - Spec: v1.BackupSpec{ + Spec: velerov1api.BackupSpec{ StorageLocation: "read-write", }, - Status: v1.BackupStatus{ - Phase: v1.BackupPhaseCompleted, + Status: velerov1api.BackupStatus{ + Phase: velerov1api.BackupPhaseCompleted, Version: 1, StartTimestamp: metav1.NewTime(now), CompletionTimestamp: metav1.NewTime(now), @@ -394,22 +415,26 @@ func TestProcessBackupCompletions(t *testing.T) { }, { name: "backup with a TTL has expiration set", - backup: velerotest.NewTestBackup().WithName("backup-1").WithTTL(10 * time.Minute).Backup, + backup: defaultBackup().TTL(10 * time.Minute).Result(), backupLocation: defaultBackupLocation, - expectedResult: &v1.Backup{ + expectedResult: &velerov1api.Backup{ + TypeMeta: metav1.TypeMeta{ + Kind: "Backup", + APIVersion: "velero.io/v1", + }, ObjectMeta: metav1.ObjectMeta{ - Namespace: v1.DefaultNamespace, + Namespace: velerov1api.DefaultNamespace, Name: "backup-1", Labels: map[string]string{ "velero.io/storage-location": "loc-1", }, }, - Spec: v1.BackupSpec{ + Spec: velerov1api.BackupSpec{ TTL: metav1.Duration{Duration: 10 * time.Minute}, StorageLocation: defaultBackupLocation.Name, }, - Status: v1.BackupStatus{ - Phase: v1.BackupPhaseCompleted, + Status: velerov1api.BackupStatus{ + Phase: velerov1api.BackupPhaseCompleted, Version: 1, Expiration: metav1.NewTime(now.Add(10 * time.Minute)), StartTimestamp: metav1.NewTime(now), @@ -420,21 +445,25 @@ func TestProcessBackupCompletions(t *testing.T) { { name: "backup without an existing backup will succeed", backupExists: false, - backup: velerotest.NewTestBackup().WithName("backup-1").Backup, + backup: defaultBackup().Result(), backupLocation: defaultBackupLocation, - expectedResult: &v1.Backup{ + expectedResult: &velerov1api.Backup{ + TypeMeta: metav1.TypeMeta{ + Kind: "Backup", + APIVersion: "velero.io/v1", + }, ObjectMeta: metav1.ObjectMeta{ - Namespace: v1.DefaultNamespace, + Namespace: velerov1api.DefaultNamespace, Name: "backup-1", Labels: map[string]string{ "velero.io/storage-location": "loc-1", }, }, - Spec: v1.BackupSpec{ + Spec: velerov1api.BackupSpec{ StorageLocation: defaultBackupLocation.Name, }, - Status: v1.BackupStatus{ - Phase: v1.BackupPhaseCompleted, + Status: velerov1api.BackupStatus{ + Phase: velerov1api.BackupPhaseCompleted, Version: 1, StartTimestamp: metav1.NewTime(now), CompletionTimestamp: metav1.NewTime(now), @@ -447,21 +476,25 @@ func TestProcessBackupCompletions(t *testing.T) { { name: "backup with existing backup will fail", backupExists: true, - backup: velerotest.NewTestBackup().WithName("backup-1").Backup, + backup: defaultBackup().Result(), backupLocation: defaultBackupLocation, - expectedResult: &v1.Backup{ + expectedResult: &velerov1api.Backup{ + TypeMeta: metav1.TypeMeta{ + Kind: "Backup", + APIVersion: "velero.io/v1", + }, ObjectMeta: metav1.ObjectMeta{ - Namespace: v1.DefaultNamespace, + Namespace: velerov1api.DefaultNamespace, Name: "backup-1", Labels: map[string]string{ "velero.io/storage-location": "loc-1", }, }, - Spec: v1.BackupSpec{ + Spec: velerov1api.BackupSpec{ StorageLocation: defaultBackupLocation.Name, }, - Status: v1.BackupStatus{ - Phase: v1.BackupPhaseFailed, + Status: velerov1api.BackupStatus{ + Phase: velerov1api.BackupPhaseFailed, Version: 1, StartTimestamp: metav1.NewTime(now), CompletionTimestamp: metav1.NewTime(now), @@ -471,22 +504,26 @@ func TestProcessBackupCompletions(t *testing.T) { }, { name: "error when checking if backup exists will cause backup to fail", - backup: velerotest.NewTestBackup().WithName("backup-1").Backup, + backup: defaultBackup().Result(), existenceCheckError: errors.New("Backup already exists in object storage"), backupLocation: defaultBackupLocation, - expectedResult: &v1.Backup{ + expectedResult: &velerov1api.Backup{ + TypeMeta: metav1.TypeMeta{ + Kind: "Backup", + APIVersion: "velero.io/v1", + }, ObjectMeta: metav1.ObjectMeta{ - Namespace: v1.DefaultNamespace, + Namespace: velerov1api.DefaultNamespace, Name: "backup-1", Labels: map[string]string{ "velero.io/storage-location": "loc-1", }, }, - Spec: v1.BackupSpec{ + Spec: velerov1api.BackupSpec{ StorageLocation: defaultBackupLocation.Name, }, - Status: v1.BackupStatus{ - Phase: v1.BackupPhaseFailed, + Status: velerov1api.BackupStatus{ + Phase: velerov1api.BackupPhaseFailed, Version: 1, StartTimestamp: metav1.NewTime(now), CompletionTimestamp: metav1.NewTime(now), @@ -498,10 +535,11 @@ func TestProcessBackupCompletions(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + formatFlag := logging.FormatText var ( clientset = fake.NewSimpleClientset(test.backup) sharedInformers = informers.NewSharedInformerFactory(clientset, 0) - logger = logging.DefaultLogger(logrus.DebugLevel) + logger = logging.DefaultLogger(logrus.DebugLevel, formatFlag) pluginManager = new(pluginmocks.Manager) backupStore = new(persistencemocks.BackupStore) backupper = new(fakeBackupper) @@ -518,24 +556,27 @@ func TestProcessBackupCompletions(t *testing.T) { metrics: metrics.NewServerMetrics(), clock: clock.NewFakeClock(now), newPluginManager: func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, - newBackupStore: func(*v1.BackupStorageLocation, persistence.ObjectStoreGetter, logrus.FieldLogger) (persistence.BackupStore, error) { + newBackupStore: func(*velerov1api.BackupStorageLocation, persistence.ObjectStoreGetter, logrus.FieldLogger) (persistence.BackupStore, error) { return backupStore, nil }, - backupper: backupper, + backupper: backupper, + formatFlag: formatFlag, } pluginManager.On("GetBackupItemActions").Return(nil, nil) pluginManager.On("CleanupClients").Return(nil) - backupper.On("Backup", mock.Anything, mock.Anything, mock.Anything, []velero.BackupItemAction(nil), pluginManager).Return(nil) + backupStore.On("BackupExists", test.backupLocation.Spec.StorageType.ObjectStorage.Bucket, test.backup.Name).Return(test.backupExists, test.existenceCheckError) - // Ensure we have a CompletionTimestamp when uploading. + // Ensure we have a CompletionTimestamp when uploading and that the backup name matches the backup in the object store. // Failures will display the bytes in buf. - completionTimestampIsPresent := func(buf *bytes.Buffer) bool { - return strings.Contains(buf.String(), `"completionTimestamp": "2006-01-02T22:04:05Z"`) + hasNameAndCompletionTimestamp := func(info persistence.BackupInfo) bool { + buf := new(bytes.Buffer) + buf.ReadFrom(info.Metadata) + return info.Name == test.backup.Name && + strings.Contains(buf.String(), `"completionTimestamp": "2006-01-02T22:04:05Z"`) } - backupStore.On("BackupExists", test.backupLocation.Spec.StorageType.ObjectStorage.Bucket, test.backup.Name).Return(test.backupExists, test.existenceCheckError) - backupStore.On("PutBackup", test.backup.Name, mock.MatchedBy(completionTimestampIsPresent), mock.Anything, mock.Anything, mock.Anything).Return(nil) + backupStore.On("PutBackup", mock.MatchedBy(hasNameAndCompletionTimestamp)).Return(nil) // add the test's backup to the informer/lister store require.NotNil(t, test.backup) @@ -569,8 +610,8 @@ func TestProcessBackupCompletions(t *testing.T) { func TestValidateAndGetSnapshotLocations(t *testing.T) { tests := []struct { name string - backup *velerotest.TestBackup - locations []*velerotest.TestVolumeSnapshotLocation + backup *velerov1api.Backup + locations []*velerov1api.VolumeSnapshotLocation defaultLocations map[string]string expectedVolumeSnapshotLocationNames []string // adding these in the expected order will allow to test with better msgs in case of a test failure expectedErrors string @@ -578,76 +619,76 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) { }{ { name: "location name does not correspond to any existing location", - backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithVolumeSnapshotLocations("random-name"), - locations: []*velerotest.TestVolumeSnapshotLocation{ - velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-east-1"), - velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-west-1"), - velerotest.NewTestVolumeSnapshotLocation().WithProvider("fake-provider").WithName("some-name"), + backup: defaultBackup().Phase(velerov1api.BackupPhaseNew).VolumeSnapshotLocations("random-name").Result(), + locations: []*velerov1api.VolumeSnapshotLocation{ + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "aws-us-east-1").Provider("aws").Result(), + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "aws-us-west-1").Provider("aws").Result(), + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "some-name").Provider("fake-provider").Result(), }, expectedErrors: "a VolumeSnapshotLocation CRD for the location random-name with the name specified in the backup spec needs to be created before this snapshot can be executed. Error: volumesnapshotlocation.velero.io \"random-name\" not found", expectedSuccess: false, }, { name: "duplicate locationName per provider: should filter out dups", - backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithVolumeSnapshotLocations("aws-us-west-1", "aws-us-west-1"), - locations: []*velerotest.TestVolumeSnapshotLocation{ - velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-east-1"), - velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-west-1"), + backup: defaultBackup().Phase(velerov1api.BackupPhaseNew).VolumeSnapshotLocations("aws-us-west-1", "aws-us-west-1").Result(), + locations: []*velerov1api.VolumeSnapshotLocation{ + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "aws-us-east-1").Provider("aws").Result(), + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "aws-us-west-1").Provider("aws").Result(), }, expectedVolumeSnapshotLocationNames: []string{"aws-us-west-1"}, expectedSuccess: true, }, { name: "multiple non-dupe location names per provider should error", - backup: velerotest.NewTestBackup().WithName("backup1").WithVolumeSnapshotLocations("aws-us-east-1", "aws-us-west-1"), - locations: []*velerotest.TestVolumeSnapshotLocation{ - velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-east-1"), - velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-west-1"), - velerotest.NewTestVolumeSnapshotLocation().WithProvider("fake-provider").WithName("some-name"), + backup: defaultBackup().Phase(velerov1api.BackupPhaseNew).VolumeSnapshotLocations("aws-us-east-1", "aws-us-west-1").Result(), + locations: []*velerov1api.VolumeSnapshotLocation{ + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "aws-us-east-1").Provider("aws").Result(), + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "aws-us-west-1").Provider("aws").Result(), + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "some-name").Provider("fake-provider").Result(), }, expectedErrors: "more than one VolumeSnapshotLocation name specified for provider aws: aws-us-west-1; unexpected name was aws-us-east-1", expectedSuccess: false, }, { name: "no location name for the provider exists, only one VSL for the provider: use it", - backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew), - locations: []*velerotest.TestVolumeSnapshotLocation{ - velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-east-1"), + backup: defaultBackup().Phase(velerov1api.BackupPhaseNew).Result(), + locations: []*velerov1api.VolumeSnapshotLocation{ + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "aws-us-east-1").Provider("aws").Result(), }, expectedVolumeSnapshotLocationNames: []string{"aws-us-east-1"}, expectedSuccess: true, }, { name: "no location name for the provider exists, no default, more than one VSL for the provider: error", - backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew), - locations: []*velerotest.TestVolumeSnapshotLocation{ - velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-east-1"), - velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-west-1"), + backup: defaultBackup().Phase(velerov1api.BackupPhaseNew).Result(), + locations: []*velerov1api.VolumeSnapshotLocation{ + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "aws-us-east-1").Provider("aws").Result(), + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "aws-us-west-1").Provider("aws").Result(), }, expectedErrors: "provider aws has more than one possible volume snapshot location, and none were specified explicitly or as a default", }, { name: "no location name for the provider exists, more than one VSL for the provider: the provider's default should be added", - backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew), + backup: defaultBackup().Phase(velerov1api.BackupPhaseNew).Result(), defaultLocations: map[string]string{"aws": "aws-us-east-1"}, - locations: []*velerotest.TestVolumeSnapshotLocation{ - velerotest.NewTestVolumeSnapshotLocation().WithName("aws-us-east-1").WithProvider("aws"), - velerotest.NewTestVolumeSnapshotLocation().WithName("aws-us-west-1").WithProvider("aws"), + locations: []*velerov1api.VolumeSnapshotLocation{ + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "aws-us-east-1").Provider("aws").Result(), + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "aws-us-west-1").Provider("aws").Result(), }, expectedVolumeSnapshotLocationNames: []string{"aws-us-east-1"}, expectedSuccess: true, }, { name: "no existing location name and no default location name given", - backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew), + backup: defaultBackup().Phase(velerov1api.BackupPhaseNew).Result(), expectedSuccess: true, }, { name: "multiple location names for a provider, default location name for another provider", - backup: velerotest.NewTestBackup().WithName("backup1").WithVolumeSnapshotLocations("aws-us-west-1", "aws-us-west-1"), + backup: defaultBackup().Phase(velerov1api.BackupPhaseNew).VolumeSnapshotLocations("aws-us-west-1", "aws-us-west-1").Result(), defaultLocations: map[string]string{"fake-provider": "some-name"}, - locations: []*velerotest.TestVolumeSnapshotLocation{ - velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-west-1"), - velerotest.NewTestVolumeSnapshotLocation().WithProvider("fake-provider").WithName("some-name"), + locations: []*velerov1api.VolumeSnapshotLocation{ + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "aws-us-west-1").Provider("aws").Result(), + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "some-name").Provider("fake-provider").Result(), }, expectedVolumeSnapshotLocationNames: []string{"aws-us-west-1", "some-name"}, expectedSuccess: true, @@ -670,7 +711,7 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) { backup := test.backup.DeepCopy() backup.Spec.VolumeSnapshotLocations = test.backup.Spec.VolumeSnapshotLocations for _, location := range test.locations { - require.NoError(t, sharedInformers.Velero().V1().VolumeSnapshotLocations().Informer().GetStore().Add(location.VolumeSnapshotLocation)) + require.NoError(t, sharedInformers.Velero().V1().VolumeSnapshotLocations().Informer().GetStore().Add(location)) } providerLocations, errs := c.validateAndGetSnapshotLocations(backup) diff --git a/pkg/controller/backup_deletion_controller_test.go b/pkg/controller/backup_deletion_controller_test.go index 12eb9a8b84b..b888776ac15 100644 --- a/pkg/controller/backup_deletion_controller_test.go +++ b/pkg/controller/backup_deletion_controller_test.go @@ -34,6 +34,7 @@ import ( v1 "github.com/heptio/velero/pkg/apis/velero/v1" pkgbackup "github.com/heptio/velero/pkg/backup" + "github.com/heptio/velero/pkg/builder" "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" informers "github.com/heptio/velero/pkg/generated/informers/externalversions" "github.com/heptio/velero/pkg/metrics" @@ -41,7 +42,7 @@ import ( persistencemocks "github.com/heptio/velero/pkg/persistence/mocks" "github.com/heptio/velero/pkg/plugin/clientmgmt" pluginmocks "github.com/heptio/velero/pkg/plugin/mocks" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" "github.com/heptio/velero/pkg/volume" ) @@ -266,8 +267,8 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { }) t.Run("patching to InProgress fails", func(t *testing.T) { - backup := velerotest.NewTestBackup().WithName("foo").WithStorageLocation("default").Backup - location := velerotest.NewTestBackupStorageLocation().WithName("default").BackupStorageLocation + backup := builder.ForBackup(v1.DefaultNamespace, "foo").StorageLocation("default").Result() + location := builder.ForBackupStorageLocation("velero", "default").Result() td := setupBackupDeletionControllerTest(backup) @@ -298,8 +299,8 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { }) t.Run("patching backup to Deleting fails", func(t *testing.T) { - backup := velerotest.NewTestBackup().WithName("foo").WithStorageLocation("default").Backup - location := velerotest.NewTestBackupStorageLocation().WithName("default").BackupStorageLocation + backup := builder.ForBackup(v1.DefaultNamespace, "foo").StorageLocation("default").Result() + location := builder.ForBackupStorageLocation("velero", "default").Result() td := setupBackupDeletionControllerTest(backup) @@ -364,7 +365,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { }) t.Run("unable to find backup storage location", func(t *testing.T) { - backup := velerotest.NewTestBackup().WithName("foo").WithStorageLocation("default").Backup + backup := builder.ForBackup(v1.DefaultNamespace, "foo").StorageLocation("default").Result() td := setupBackupDeletionControllerTest(backup) @@ -390,8 +391,8 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { }) t.Run("backup storage location is in read-only mode", func(t *testing.T) { - backup := velerotest.NewTestBackup().WithName("foo").WithStorageLocation("default").Backup - location := velerotest.NewTestBackupStorageLocation().WithName("default").WithAccessMode(v1.BackupStorageLocationAccessModeReadOnly).BackupStorageLocation + backup := builder.ForBackup(v1.DefaultNamespace, "foo").StorageLocation("default").Result() + location := builder.ForBackupStorageLocation("velero", "default").AccessMode(v1.BackupStorageLocationAccessModeReadOnly).Result() td := setupBackupDeletionControllerTest(backup) @@ -419,13 +420,13 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { }) t.Run("full delete, no errors", func(t *testing.T) { - backup := velerotest.NewTestBackup().WithName("foo").Backup + backup := builder.ForBackup(v1.DefaultNamespace, "foo").Result() backup.UID = "uid" backup.Spec.StorageLocation = "primary" - restore1 := velerotest.NewTestRestore("velero", "restore-1", v1.RestorePhaseCompleted).WithBackup("foo").Restore - restore2 := velerotest.NewTestRestore("velero", "restore-2", v1.RestorePhaseCompleted).WithBackup("foo").Restore - restore3 := velerotest.NewTestRestore("velero", "restore-3", v1.RestorePhaseCompleted).WithBackup("some-other-backup").Restore + restore1 := builder.ForRestore("velero", "restore-1").Phase(v1.RestorePhaseCompleted).Backup("foo").Result() + restore2 := builder.ForRestore("velero", "restore-2").Phase(v1.RestorePhaseCompleted).Backup("foo").Result() + restore3 := builder.ForRestore("velero", "restore-3").Phase(v1.RestorePhaseCompleted).Backup("some-other-backup").Result() td := setupBackupDeletionControllerTest(backup, restore1, restore2, restore3) @@ -565,16 +566,26 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { }) t.Run("full delete, no errors, with backup name greater than 63 chars", func(t *testing.T) { - backup := velerotest.NewTestBackup().WithName("the-really-long-backup-name-that-is-much-more-than-63-characters").Backup + backup := defaultBackup(). + ObjectMeta( + builder.WithName("the-really-long-backup-name-that-is-much-more-than-63-characters"), + ). + Result() backup.UID = "uid" backup.Spec.StorageLocation = "primary" - restore1 := velerotest.NewTestRestore("velero", "restore-1", v1.RestorePhaseCompleted). - WithBackup("the-really-long-backup-name-that-is-much-more-than-63-characters").Restore - restore2 := velerotest.NewTestRestore("velero", "restore-2", v1.RestorePhaseCompleted). - WithBackup("the-really-long-backup-name-that-is-much-more-than-63-characters").Restore - restore3 := velerotest.NewTestRestore("velero", "restore-3", v1.RestorePhaseCompleted). - WithBackup("some-other-backup").Restore + restore1 := builder.ForRestore("velero", "restore-1"). + Phase(v1.RestorePhaseCompleted). + Backup("the-really-long-backup-name-that-is-much-more-than-63-characters"). + Result() + restore2 := builder.ForRestore("velero", "restore-2"). + Phase(v1.RestorePhaseCompleted). + Backup("the-really-long-backup-name-that-is-much-more-than-63-characters"). + Result() + restore3 := builder.ForRestore("velero", "restore-3"). + Phase(v1.RestorePhaseCompleted). + Backup("some-other-backup"). + Result() td := setupBackupDeletionControllerTest(backup, restore1, restore2, restore3) td.req = pkgbackup.NewDeleteBackupRequest(backup.Name, string(backup.UID)) diff --git a/pkg/controller/backup_sync_controller.go b/pkg/controller/backup_sync_controller.go index b2bdd1adfdf..b67cc7d00ef 100644 --- a/pkg/controller/backup_sync_controller.go +++ b/pkg/controller/backup_sync_controller.go @@ -43,8 +43,10 @@ type backupSyncController struct { backupClient velerov1client.BackupsGetter backupLocationClient velerov1client.BackupStorageLocationsGetter + podVolumeBackupClient velerov1client.PodVolumeBackupsGetter backupLister listers.BackupLister backupStorageLocationLister listers.BackupStorageLocationLister + podVolumeBackupLister listers.PodVolumeBackupLister namespace string defaultBackupLocation string newPluginManager func(logrus.FieldLogger) clientmgmt.Manager @@ -54,8 +56,10 @@ type backupSyncController struct { func NewBackupSyncController( backupClient velerov1client.BackupsGetter, backupLocationClient velerov1client.BackupStorageLocationsGetter, + podVolumeBackupClient velerov1client.PodVolumeBackupsGetter, backupInformer informers.BackupInformer, backupStorageLocationInformer informers.BackupStorageLocationInformer, + podVolumeBackupInformer informers.PodVolumeBackupInformer, syncPeriod time.Duration, namespace string, defaultBackupLocation string, @@ -71,10 +75,12 @@ func NewBackupSyncController( genericController: newGenericController("backup-sync", logger), backupClient: backupClient, backupLocationClient: backupLocationClient, + podVolumeBackupClient: podVolumeBackupClient, namespace: namespace, defaultBackupLocation: defaultBackupLocation, backupLister: backupInformer.Lister(), backupStorageLocationLister: backupStorageLocationInformer.Lister(), + podVolumeBackupLister: podVolumeBackupInformer.Lister(), // use variables to refer to these functions so they can be // replaced with fakes for testing. @@ -159,7 +165,7 @@ func (c *backupSyncController) run() { backupStore, err := c.newBackupStore(location, pluginManager, log) if err != nil { - log.WithError(err).Error("Error getting backup store for location") + log.WithError(err).Error("Error getting backup store for this location") continue } @@ -167,7 +173,7 @@ func (c *backupSyncController) run() { if !ok { continue } - log.Infof("Syncing contents of backup store into cluster") + log.Info("Syncing contents of backup store into cluster") res, err := backupStore.ListBackups() if err != nil { @@ -179,26 +185,16 @@ func (c *backupSyncController) run() { for backupName := range backupStoreBackups { log = log.WithField("backup", backupName) - log.Debug("Checking backup store backup to see if it needs to be synced into the cluster") + log.Debug("Checking this backup to see if it needs to be synced into the cluster") // use the controller's namespace when getting the backup because that's where we // are syncing backups to, regardless of the namespace of the cloud backup. backup, err := c.backupClient.Backups(c.namespace).Get(backupName, metav1.GetOptions{}) if err == nil { log.Debug("Backup already exists in cluster") - - if backup.Spec.StorageLocation != "" { - continue - } - - // pre-v0.10 backups won't initially have a .spec.storageLocation so fill it in - log.Debug("Patching backup's .spec.storageLocation because it's missing") - if err := patchStorageLocation(backup, c.backupClient.Backups(c.namespace), location.Name); err != nil { - log.WithError(err).Error("Error patching backup's .spec.storageLocation") - } - continue } + if !kuberrs.IsNotFound(err) { log.WithError(errors.WithStack(err)).Error("Error getting backup from client, proceeding with sync into cluster") } @@ -220,8 +216,8 @@ func (c *backupSyncController) run() { backup.Labels = make(map[string]string) } backup.Labels[velerov1api.StorageLocationLabel] = label.GetValidName(backup.Spec.StorageLocation) - - _, err = c.backupClient.Backups(backup.Namespace).Create(backup) + // process the regular velero backup + backup, err = c.backupClient.Backups(backup.Namespace).Create(backup) switch { case err != nil && kuberrs.IsAlreadyExists(err): log.Debug("Backup already exists in cluster") @@ -232,6 +228,44 @@ func (c *backupSyncController) run() { default: log.Debug("Synced backup into cluster") } + + // process the pod volume backups from object store, if any + podVolumeBackups, err := backupStore.GetPodVolumeBackups(backupName) + if err != nil { + log.WithError(errors.WithStack(err)).Error("Error getting pod volume backups for this backup from backup store") + continue + } + + for _, podVolumeBackup := range podVolumeBackups { + log = log.WithField("podVolumeBackup", podVolumeBackup.Name) + log.Debug("Checking this pod volume backup to see if it needs to be synced into the cluster") + + for i, ownerRef := range podVolumeBackup.OwnerReferences { + if ownerRef.APIVersion == velerov1api.SchemeGroupVersion.String() && ownerRef.Kind == "Backup" && ownerRef.Name == backup.Name { + log.WithField("uid", backup.UID).Debugf("Updating pod volume backup's owner reference UID") + podVolumeBackup.OwnerReferences[i].UID = backup.UID + } + } + + if _, ok := podVolumeBackup.Labels[velerov1api.BackupUIDLabel]; ok { + podVolumeBackup.Labels[velerov1api.BackupUIDLabel] = string(backup.UID) + } + + podVolumeBackup.Namespace = backup.Namespace + podVolumeBackup.ResourceVersion = "" + + _, err = c.podVolumeBackupClient.PodVolumeBackups(backup.Namespace).Create(podVolumeBackup) + switch { + case err != nil && kuberrs.IsAlreadyExists(err): + log.Debug("Pod volume backup already exists in cluster") + continue + case err != nil && !kuberrs.IsAlreadyExists(err): + log.WithError(errors.WithStack(err)).Error("Error syncing pod volume backup into cluster") + continue + default: + log.Debug("Synced pod volume backup into cluster") + } + } } c.deleteOrphanedBackups(location.Name, backupStoreBackups, log) @@ -280,9 +314,9 @@ func patchStorageLocation(backup *velerov1api.Backup, client velerov1client.Back return nil } -// deleteOrphanedBackups deletes backup objects from Kubernetes that have the specified location +// deleteOrphanedBackups deletes backup objects (CRDs) from Kubernetes that have the specified location // and a phase of Completed, but no corresponding backup in object storage. -func (c *backupSyncController) deleteOrphanedBackups(locationName string, cloudBackupNames sets.String, log logrus.FieldLogger) { +func (c *backupSyncController) deleteOrphanedBackups(locationName string, backupStoreBackups sets.String, log logrus.FieldLogger) { locationSelector := labels.Set(map[string]string{ velerov1api.StorageLocationLabel: label.GetValidName(locationName), }).AsSelector() @@ -298,7 +332,7 @@ func (c *backupSyncController) deleteOrphanedBackups(locationName string, cloudB for _, backup := range backups { log = log.WithField("backup", backup.Name) - if backup.Status.Phase != velerov1api.BackupPhaseCompleted || cloudBackupNames.Has(backup.Name) { + if backup.Status.Phase != velerov1api.BackupPhaseCompleted || backupStoreBackups.Has(backup.Name) { continue } diff --git a/pkg/controller/backup_sync_controller_test.go b/pkg/controller/backup_sync_controller_test.go index 8ce19162cf3..e1bc9e06667 100644 --- a/pkg/controller/backup_sync_controller_test.go +++ b/pkg/controller/backup_sync_controller_test.go @@ -32,6 +32,7 @@ import ( core "k8s.io/client-go/testing" velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/builder" "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" informers "github.com/heptio/velero/pkg/generated/informers/externalversions" "github.com/heptio/velero/pkg/label" @@ -39,7 +40,7 @@ import ( persistencemocks "github.com/heptio/velero/pkg/persistence/mocks" "github.com/heptio/velero/pkg/plugin/clientmgmt" pluginmocks "github.com/heptio/velero/pkg/plugin/mocks" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) func defaultLocationsList(namespace string) []*velerov1api.BackupStorageLocation { @@ -109,13 +110,19 @@ func defaultLocationsListWithLongerLocationName(namespace string) []*velerov1api } func TestBackupSyncControllerRun(t *testing.T) { + type cloudBackupData struct { + backup *velerov1api.Backup + podVolumeBackups []*velerov1api.PodVolumeBackup + } + tests := []struct { - name string - namespace string - locations []*velerov1api.BackupStorageLocation - cloudBackups map[string][]*velerov1api.Backup - existingBackups []*velerov1api.Backup - longLocationNameEnabled bool + name string + namespace string + locations []*velerov1api.BackupStorageLocation + cloudBuckets map[string][]*cloudBackupData + existingBackups []*velerov1api.Backup + existingPodVolumeBackups []*velerov1api.PodVolumeBackup + longLocationNameEnabled bool }{ { name: "no cloud backups", @@ -124,13 +131,19 @@ func TestBackupSyncControllerRun(t *testing.T) { name: "normal case", namespace: "ns-1", locations: defaultLocationsList("ns-1"), - cloudBackups: map[string][]*velerov1api.Backup{ + cloudBuckets: map[string][]*cloudBackupData{ "bucket-1": { - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup, - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-1").Result(), + }, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-2").Result(), + }, }, "bucket-2": { - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").Backup, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-3").Result(), + }, }, }, }, @@ -138,14 +151,22 @@ func TestBackupSyncControllerRun(t *testing.T) { name: "all synced backups get created in Velero server's namespace", namespace: "velero", locations: defaultLocationsList("velero"), - cloudBackups: map[string][]*velerov1api.Backup{ + cloudBuckets: map[string][]*cloudBackupData{ "bucket-1": { - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup, - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-1").Result(), + }, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-2").Result(), + }, }, "bucket-2": { - velerotest.NewTestBackup().WithNamespace("ns-2").WithName("backup-3").Backup, - velerotest.NewTestBackup().WithNamespace("velero").WithName("backup-4").Backup, + &cloudBackupData{ + backup: builder.ForBackup("ns-2", "backup-3").Result(), + }, + &cloudBackupData{ + backup: builder.ForBackup("velero", "backup-4").Result(), + }, }, }, }, @@ -153,49 +174,65 @@ func TestBackupSyncControllerRun(t *testing.T) { name: "new backups get synced when some cloud backups already exist in the cluster", namespace: "ns-1", locations: defaultLocationsList("ns-1"), - cloudBackups: map[string][]*velerov1api.Backup{ + cloudBuckets: map[string][]*cloudBackupData{ "bucket-1": { - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup, - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-1").Result(), + }, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-2").Result(), + }, }, "bucket-2": { - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").Backup, - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-4").Backup, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-3").Result(), + }, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-4").Result(), + }, }, }, existingBackups: []*velerov1api.Backup{ // add a label to each existing backup so we can differentiate it from the cloud // backup during verification - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel("i-exist", "true").WithStorageLocation("location-1").Backup, - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithLabel("i-exist", "true").WithStorageLocation("location-2").Backup, + builder.ForBackup("ns-1", "backup-1").StorageLocation("location-1").ObjectMeta(builder.WithLabels("i-exist", "true")).Result(), + builder.ForBackup("ns-1", "backup-3").StorageLocation("location-2").ObjectMeta(builder.WithLabels("i-exist", "true")).Result(), }, }, { name: "existing backups without a StorageLocation get it filled in", namespace: "ns-1", locations: defaultLocationsList("ns-1"), - cloudBackups: map[string][]*velerov1api.Backup{ + cloudBuckets: map[string][]*cloudBackupData{ "bucket-1": { - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-1").Result(), + }, }, }, existingBackups: []*velerov1api.Backup{ // add a label to each existing backup so we can differentiate it from the cloud // backup during verification - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel("i-exist", "true").Backup, + builder.ForBackup("ns-1", "backup-1").ObjectMeta(builder.WithLabels("i-exist", "true")).StorageLocation("location-1").Result(), }, }, { name: "backup storage location names and labels get updated", namespace: "ns-1", locations: defaultLocationsList("ns-1"), - cloudBackups: map[string][]*velerov1api.Backup{ + cloudBuckets: map[string][]*cloudBackupData{ "bucket-1": { - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithStorageLocation("foo").WithLabel(velerov1api.StorageLocationLabel, "foo").Backup, - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-1").StorageLocation("foo").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "foo")).Result(), + }, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-2").Result(), + }, }, "bucket-2": { - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithStorageLocation("bar").WithLabel(velerov1api.StorageLocationLabel, "bar").Backup, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-3").StorageLocation("bar").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "bar")).Result(), + }, }, }, }, @@ -204,15 +241,93 @@ func TestBackupSyncControllerRun(t *testing.T) { namespace: "ns-1", locations: defaultLocationsListWithLongerLocationName("ns-1"), longLocationNameEnabled: true, - cloudBackups: map[string][]*velerov1api.Backup{ + cloudBuckets: map[string][]*cloudBackupData{ + "bucket-1": { + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-1").StorageLocation("foo").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "foo")).Result(), + }, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-2").Result(), + }, + }, + "bucket-2": { + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-3").StorageLocation("bar").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "bar")).Result(), + }, + }, + }, + }, + { + name: "all synced backups and pod volume backups get created in Velero server's namespace", + namespace: "ns-1", + locations: defaultLocationsList("ns-1"), + cloudBuckets: map[string][]*cloudBackupData{ + "bucket-1": { + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-1").Result(), + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("ns-1", "pvb-1").Result(), + }, + }, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-2").Result(), + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("ns-1", "pvb-2").Result(), + }, + }, + }, + "bucket-2": { + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-3").Result(), + }, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-4").Result(), + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("ns-1", "pvb-1").Result(), + builder.ForPodVolumeBackup("ns-1", "pvb-2").Result(), + builder.ForPodVolumeBackup("ns-1", "pvb-3").Result(), + }, + }, + }, + }, + }, + { + name: "new pod volume backups get synched when some pod volume backups already exist in the cluster", + namespace: "ns-1", + locations: defaultLocationsList("ns-1"), + cloudBuckets: map[string][]*cloudBackupData{ "bucket-1": { - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithStorageLocation("foo").WithLabel(velerov1api.StorageLocationLabel, "foo").Backup, - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-1").Result(), + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("ns-1", "pvb-1").Result(), + }, + }, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-2").Result(), + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("ns-1", "pvb-3").Result(), + }, + }, }, "bucket-2": { - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithStorageLocation("bar").WithLabel(velerov1api.StorageLocationLabel, "bar").Backup, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-3").Result(), + }, + &cloudBackupData{ + backup: builder.ForBackup("ns-1", "backup-4").Result(), + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("ns-1", "pvb-1").Result(), + builder.ForPodVolumeBackup("ns-1", "pvb-5").Result(), + builder.ForPodVolumeBackup("ns-1", "pvb-6").Result(), + }, + }, }, }, + existingPodVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("ns-1", "pvb-1").Result(), + builder.ForPodVolumeBackup("ns-1", "pvb-2").Result(), + }, }, } @@ -226,10 +341,12 @@ func TestBackupSyncControllerRun(t *testing.T) { ) c := NewBackupSyncController( + client.VeleroV1(), client.VeleroV1(), client.VeleroV1(), sharedInformers.Velero().V1().Backups(), sharedInformers.Velero().V1().BackupStorageLocations(), + sharedInformers.Velero().V1().PodVolumeBackups(), time.Duration(0), test.namespace, "", @@ -256,9 +373,10 @@ func TestBackupSyncControllerRun(t *testing.T) { backupStore.On("GetRevision").Return("foo", nil) var backupNames []string - for _, b := range test.cloudBackups[location.Spec.ObjectStorage.Bucket] { - backupNames = append(backupNames, b.Name) - backupStore.On("GetBackupMetadata", b.Name).Return(b, nil) + for _, bucket := range test.cloudBuckets[location.Spec.ObjectStorage.Bucket] { + backupNames = append(backupNames, bucket.backup.Name) + backupStore.On("GetBackupMetadata", bucket.backup.Name).Return(bucket.backup, nil) + backupStore.On("GetPodVolumeBackups", bucket.backup.Name).Return(bucket.podVolumeBackups, nil) } backupStore.On("ListBackups").Return(backupNames, nil) } @@ -269,11 +387,18 @@ func TestBackupSyncControllerRun(t *testing.T) { _, err := client.VeleroV1().Backups(test.namespace).Create(existingBackup) require.NoError(t, err) } + + for _, existingPodVolumeBackup := range test.existingPodVolumeBackups { + require.NoError(t, sharedInformers.Velero().V1().PodVolumeBackups().Informer().GetStore().Add(existingPodVolumeBackup)) + + _, err := client.VeleroV1().PodVolumeBackups(test.namespace).Create(existingPodVolumeBackup) + require.NoError(t, err) + } client.ClearActions() c.run() - for bucket, backups := range test.cloudBackups { + for bucket, backupDataSet := range test.cloudBuckets { // figure out which location this bucket is for; we need this for verification // purposes later var location *velerov1api.BackupStorageLocation @@ -285,14 +410,15 @@ func TestBackupSyncControllerRun(t *testing.T) { } require.NotNil(t, location) - for _, cloudBackup := range backups { - obj, err := client.VeleroV1().Backups(test.namespace).Get(cloudBackup.Name, metav1.GetOptions{}) + // process the cloud backups + for _, cloudBackupData := range backupDataSet { + obj, err := client.VeleroV1().Backups(test.namespace).Get(cloudBackupData.backup.Name, metav1.GetOptions{}) require.NoError(t, err) // did this cloud backup already exist in the cluster? var existing *velerov1api.Backup for _, obj := range test.existingBackups { - if obj.Name == cloudBackup.Name { + if obj.Name == cloudBackupData.backup.Name { existing = obj break } @@ -318,6 +444,28 @@ func TestBackupSyncControllerRun(t *testing.T) { assert.Equal(t, locationName, obj.Labels[velerov1api.StorageLocationLabel]) assert.Equal(t, true, len(obj.Labels[velerov1api.StorageLocationLabel]) <= validation.DNS1035LabelMaxLength) } + + // process the cloud pod volume backups for this backup, if any + for _, podVolumeBackup := range cloudBackupData.podVolumeBackups { + objPodVolumeBackup, err := client.VeleroV1().PodVolumeBackups(test.namespace).Get(podVolumeBackup.Name, metav1.GetOptions{}) + require.NoError(t, err) + + // did this cloud pod volume backup already exist in the cluster? + var existingPodVolumeBackup *velerov1api.PodVolumeBackup + for _, objPodVolumeBackup := range test.existingPodVolumeBackups { + if objPodVolumeBackup.Name == podVolumeBackup.Name { + existingPodVolumeBackup = objPodVolumeBackup + break + } + } + + if existingPodVolumeBackup != nil { + // if this cloud pod volume backup already exists in the cluster, make sure that what we get from the + // client is the existing backup, not the cloud one. + expected := existingPodVolumeBackup.DeepCopy() + assert.Equal(t, expected, objPodVolumeBackup) + } + } } } }) @@ -325,10 +473,14 @@ func TestBackupSyncControllerRun(t *testing.T) { } func TestDeleteOrphanedBackups(t *testing.T) { + baseBuilder := func(name string) *builder.BackupBuilder { + return builder.ForBackup("ns-1", name).ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "default")) + } + tests := []struct { name string cloudBackups sets.String - k8sBackups []*velerotest.TestBackup + k8sBackups []*velerov1api.Backup namespace string expectedDeletes sets.String }{ @@ -336,10 +488,10 @@ func TestDeleteOrphanedBackups(t *testing.T) { name: "no overlapping backups", namespace: "ns-1", cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*velerotest.TestBackup{ - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backupA").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backupB").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backupC").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), + k8sBackups: []*velerov1api.Backup{ + baseBuilder("backupA").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backupB").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backupC").Phase(velerov1api.BackupPhaseCompleted).Result(), }, expectedDeletes: sets.NewString("backupA", "backupB", "backupC"), }, @@ -347,10 +499,10 @@ func TestDeleteOrphanedBackups(t *testing.T) { name: "some overlapping backups", namespace: "ns-1", cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*velerotest.TestBackup{ - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-C").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), + k8sBackups: []*velerov1api.Backup{ + baseBuilder("backup-1").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backup-2").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backup-C").Phase(velerov1api.BackupPhaseCompleted).Result(), }, expectedDeletes: sets.NewString("backup-C"), }, @@ -358,10 +510,10 @@ func TestDeleteOrphanedBackups(t *testing.T) { name: "all overlapping backups", namespace: "ns-1", cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*velerotest.TestBackup{ - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), + k8sBackups: []*velerov1api.Backup{ + baseBuilder("backup-1").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backup-2").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backup-3").Phase(velerov1api.BackupPhaseCompleted).Result(), }, expectedDeletes: sets.NewString(), }, @@ -369,13 +521,13 @@ func TestDeleteOrphanedBackups(t *testing.T) { name: "no overlapping backups but including backups that are not complete", namespace: "ns-1", cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*velerotest.TestBackup{ - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backupA").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("Deleting").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseDeleting), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("Failed").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseFailed), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("FailedValidation").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseFailedValidation), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("InProgress").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseInProgress), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("New").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseNew), + k8sBackups: []*velerov1api.Backup{ + baseBuilder("backupA").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("Deleting").Phase(velerov1api.BackupPhaseDeleting).Result(), + baseBuilder("Failed").Phase(velerov1api.BackupPhaseFailed).Result(), + baseBuilder("FailedValidation").Phase(velerov1api.BackupPhaseFailedValidation).Result(), + baseBuilder("InProgress").Phase(velerov1api.BackupPhaseInProgress).Result(), + baseBuilder("New").Phase(velerov1api.BackupPhaseNew).Result(), }, expectedDeletes: sets.NewString("backupA"), }, @@ -383,10 +535,10 @@ func TestDeleteOrphanedBackups(t *testing.T) { name: "all overlapping backups and all backups that are not complete", namespace: "ns-1", cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*velerotest.TestBackup{ - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseFailed), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseFailedValidation), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseInProgress), + k8sBackups: []*velerov1api.Backup{ + baseBuilder("backup-1").Phase(velerov1api.BackupPhaseFailed).Result(), + baseBuilder("backup-2").Phase(velerov1api.BackupPhaseFailedValidation).Result(), + baseBuilder("backup-3").Phase(velerov1api.BackupPhaseInProgress).Result(), }, expectedDeletes: sets.NewString(), }, @@ -394,13 +546,14 @@ func TestDeleteOrphanedBackups(t *testing.T) { name: "no completed backups in other locations are deleted", namespace: "ns-1", cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*velerotest.TestBackup{ - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-C").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-4").WithLabel(velerov1api.StorageLocationLabel, "alternate").WithPhase(velerov1api.BackupPhaseCompleted), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-5").WithLabel(velerov1api.StorageLocationLabel, "alternate").WithPhase(velerov1api.BackupPhaseCompleted), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-6").WithLabel(velerov1api.StorageLocationLabel, "alternate").WithPhase(velerov1api.BackupPhaseCompleted), + k8sBackups: []*velerov1api.Backup{ + baseBuilder("backup-1").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backup-2").Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backup-C").Phase(velerov1api.BackupPhaseCompleted).Result(), + + baseBuilder("backup-4").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "alternate")).Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backup-5").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "alternate")).Phase(velerov1api.BackupPhaseCompleted).Result(), + baseBuilder("backup-6").ObjectMeta(builder.WithLabels(velerov1api.StorageLocationLabel, "alternate")).Phase(velerov1api.BackupPhaseCompleted).Result(), }, expectedDeletes: sets.NewString("backup-C"), }, @@ -414,10 +567,12 @@ func TestDeleteOrphanedBackups(t *testing.T) { ) c := NewBackupSyncController( + client.VeleroV1(), client.VeleroV1(), client.VeleroV1(), sharedInformers.Velero().V1().Backups(), sharedInformers.Velero().V1().BackupStorageLocations(), + sharedInformers.Velero().V1().PodVolumeBackups(), time.Duration(0), test.namespace, "", @@ -429,10 +584,10 @@ func TestDeleteOrphanedBackups(t *testing.T) { for _, backup := range test.k8sBackups { // add test backup to informer - require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(backup.Backup), "Error adding backup to informer") + require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(backup), "Error adding backup to informer") // add test backup to client - _, err := client.VeleroV1().Backups(test.namespace).Create(backup.Backup) + _, err := client.VeleroV1().Backups(test.namespace).Create(backup) require.NoError(t, err, "Error adding backup to clientset") // if we expect this backup to be deleted, set up the expected DeleteAction @@ -464,7 +619,7 @@ func TestStorageLabelsInDeleteOrphanedBackups(t *testing.T) { tests := []struct { name string cloudBackups sets.String - k8sBackups []*velerotest.TestBackup + k8sBackups []*velerov1api.Backup namespace string expectedDeletes sets.String }{ @@ -472,13 +627,25 @@ func TestStorageLabelsInDeleteOrphanedBackups(t *testing.T) { name: "some overlapping backups", namespace: "ns-1", cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*velerotest.TestBackup{ - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1"). - WithLabel(velerov1api.StorageLocationLabel, "the-really-long-location-name-that-is-much-more-than-63-c69e779").WithPhase(velerov1api.BackupPhaseCompleted), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2"). - WithLabel(velerov1api.StorageLocationLabel, "the-really-long-location-name-that-is-much-more-than-63-c69e779").WithPhase(velerov1api.BackupPhaseCompleted), - velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-C"). - WithLabel(velerov1api.StorageLocationLabel, "the-really-long-location-name-that-is-much-more-than-63-c69e779").WithPhase(velerov1api.BackupPhaseCompleted), + k8sBackups: []*velerov1api.Backup{ + builder.ForBackup("ns-1", "backup-1"). + ObjectMeta( + builder.WithLabels(velerov1api.StorageLocationLabel, "the-really-long-location-name-that-is-much-more-than-63-c69e779"), + ). + Phase(velerov1api.BackupPhaseCompleted). + Result(), + builder.ForBackup("ns-1", "backup-2"). + ObjectMeta( + builder.WithLabels(velerov1api.StorageLocationLabel, "the-really-long-location-name-that-is-much-more-than-63-c69e779"), + ). + Phase(velerov1api.BackupPhaseCompleted). + Result(), + builder.ForBackup("ns-1", "backup-C"). + ObjectMeta( + builder.WithLabels(velerov1api.StorageLocationLabel, "the-really-long-location-name-that-is-much-more-than-63-c69e779"), + ). + Phase(velerov1api.BackupPhaseCompleted). + Result(), }, expectedDeletes: sets.NewString("backup-C"), }, @@ -492,10 +659,12 @@ func TestStorageLabelsInDeleteOrphanedBackups(t *testing.T) { ) c := NewBackupSyncController( + client.VeleroV1(), client.VeleroV1(), client.VeleroV1(), sharedInformers.Velero().V1().Backups(), sharedInformers.Velero().V1().BackupStorageLocations(), + sharedInformers.Velero().V1().PodVolumeBackups(), time.Duration(0), test.namespace, "", @@ -507,10 +676,10 @@ func TestStorageLabelsInDeleteOrphanedBackups(t *testing.T) { for _, backup := range test.k8sBackups { // add test backup to informer - require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(backup.Backup), "Error adding backup to informer") + require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(backup), "Error adding backup to informer") // add test backup to client - _, err := client.VeleroV1().Backups(test.namespace).Create(backup.Backup) + _, err := client.VeleroV1().Backups(test.namespace).Create(backup) require.NoError(t, err, "Error adding backup to clientset") // if we expect this backup to be deleted, set up the expected DeleteAction @@ -645,3 +814,13 @@ func numBackups(t *testing.T, c *fake.Clientset, ns string) (int, error) { return len(existingK8SBackups.Items), nil } + +func numPodVolumeBackups(t *testing.T, c *fake.Clientset, ns string) (int, error) { + t.Helper() + existingK8SPodvolumeBackups, err := c.VeleroV1().PodVolumeBackups(ns).List(metav1.ListOptions{}) + if err != nil { + return 0, err + } + + return len(existingK8SPodvolumeBackups.Items), nil +} diff --git a/pkg/controller/download_request_controller_test.go b/pkg/controller/download_request_controller_test.go index b166cb4a46d..7a0dcc36cd3 100644 --- a/pkg/controller/download_request_controller_test.go +++ b/pkg/controller/download_request_controller_test.go @@ -28,14 +28,15 @@ import ( "k8s.io/apimachinery/pkg/util/clock" v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/builder" "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" informers "github.com/heptio/velero/pkg/generated/informers/externalversions" "github.com/heptio/velero/pkg/persistence" persistencemocks "github.com/heptio/velero/pkg/persistence/mocks" "github.com/heptio/velero/pkg/plugin/clientmgmt" pluginmocks "github.com/heptio/velero/pkg/plugin/mocks" + velerotest "github.com/heptio/velero/pkg/test" kubeutil "github.com/heptio/velero/pkg/util/kube" - velerotest "github.com/heptio/velero/pkg/util/test" ) type downloadRequestTestHarness struct { @@ -119,6 +120,10 @@ func newBackupLocation(name, provider, bucket string) *v1.BackupStorageLocation } func TestProcessDownloadRequest(t *testing.T) { + defaultBackup := func() *v1.Backup { + return builder.ForBackup(v1.DefaultNamespace, "a-backup").StorageLocation("a-location").Result() + } + tests := []struct { name string key string @@ -145,94 +150,94 @@ func TestProcessDownloadRequest(t *testing.T) { { name: "backup contents request for nonexistent backup returns an error", downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupContents, "a-backup"), - backup: velerotest.NewTestBackup().WithName("non-matching-backup").WithStorageLocation("a-location").Backup, + backup: builder.ForBackup(v1.DefaultNamespace, "non-matching-backup").StorageLocation("a-location").Result(), backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), expectedErr: "backup.velero.io \"a-backup\" not found", }, { name: "restore log request for nonexistent restore returns an error", downloadRequest: newDownloadRequest("", v1.DownloadTargetKindRestoreLog, "a-backup-20170912150214"), - restore: velerotest.NewTestRestore(v1.DefaultNamespace, "non-matching-restore", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore, - backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + restore: builder.ForRestore(v1.DefaultNamespace, "non-matching-restore").Phase(v1.RestorePhaseCompleted).Backup("a-backup").Result(), + backup: defaultBackup(), backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), expectedErr: "error getting Restore: restore.velero.io \"a-backup-20170912150214\" not found", }, { name: "backup contents request for backup with nonexistent location returns an error", downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupContents, "a-backup"), - backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + backup: defaultBackup(), backupLocation: newBackupLocation("non-matching-location", "a-provider", "a-bucket"), expectedErr: "backupstoragelocation.velero.io \"a-location\" not found", }, { name: "backup contents request with phase '' gets a url", downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupContents, "a-backup"), - backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + backup: defaultBackup(), backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), expectGetsURL: true, }, { name: "backup contents request with phase 'New' gets a url", downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindBackupContents, "a-backup"), - backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + backup: defaultBackup(), backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), expectGetsURL: true, }, { name: "backup log request with phase '' gets a url", downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupLog, "a-backup"), - backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + backup: defaultBackup(), backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), expectGetsURL: true, }, { name: "backup log request with phase 'New' gets a url", downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindBackupLog, "a-backup"), - backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + backup: defaultBackup(), backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), expectGetsURL: true, }, { name: "restore log request with phase '' gets a url", downloadRequest: newDownloadRequest("", v1.DownloadTargetKindRestoreLog, "a-backup-20170912150214"), - restore: velerotest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore, - backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + restore: builder.ForRestore(v1.DefaultNamespace, "a-backup-20170912150214").Phase(v1.RestorePhaseCompleted).Backup("a-backup").Result(), + backup: defaultBackup(), backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), expectGetsURL: true, }, { name: "restore log request with phase 'New' gets a url", downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindRestoreLog, "a-backup-20170912150214"), - restore: velerotest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore, - backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + restore: builder.ForRestore(v1.DefaultNamespace, "a-backup-20170912150214").Phase(v1.RestorePhaseCompleted).Backup("a-backup").Result(), + backup: defaultBackup(), backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), expectGetsURL: true, }, { name: "restore results request with phase '' gets a url", downloadRequest: newDownloadRequest("", v1.DownloadTargetKindRestoreResults, "a-backup-20170912150214"), - restore: velerotest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore, - backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + restore: builder.ForRestore(v1.DefaultNamespace, "a-backup-20170912150214").Phase(v1.RestorePhaseCompleted).Backup("a-backup").Result(), + backup: defaultBackup(), backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), expectGetsURL: true, }, { name: "restore results request with phase 'New' gets a url", downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindRestoreResults, "a-backup-20170912150214"), - restore: velerotest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore, - backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + restore: builder.ForRestore(v1.DefaultNamespace, "a-backup-20170912150214").Phase(v1.RestorePhaseCompleted).Backup("a-backup").Result(), + backup: defaultBackup(), backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), expectGetsURL: true, }, { name: "request with phase 'Processed' is not deleted if not expired", downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseProcessed, v1.DownloadTargetKindBackupLog, "a-backup-20170912150214"), - backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + backup: defaultBackup(), }, { name: "request with phase 'Processed' is deleted if expired", downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseProcessed, v1.DownloadTargetKindBackupLog, "a-backup-20170912150214"), - backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + backup: defaultBackup(), expired: true, }, } diff --git a/pkg/controller/gc_controller_test.go b/pkg/controller/gc_controller_test.go index 647b0bc0a58..a25c48e97be 100644 --- a/pkg/controller/gc_controller_test.go +++ b/pkg/controller/gc_controller_test.go @@ -33,10 +33,11 @@ import ( core "k8s.io/client-go/testing" api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/builder" "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" informers "github.com/heptio/velero/pkg/generated/informers/externalversions" + velerotest "github.com/heptio/velero/pkg/test" "github.com/heptio/velero/pkg/util/kube" - velerotest "github.com/heptio/velero/pkg/util/test" ) func TestGCControllerEnqueueAllBackups(t *testing.T) { @@ -66,7 +67,7 @@ func TestGCControllerEnqueueAllBackups(t *testing.T) { var expected []string for i := 0; i < 3; i++ { - backup := velerotest.NewTestBackup().WithName(fmt.Sprintf("backup-%d", i)).Backup + backup := builder.ForBackup(api.DefaultNamespace, fmt.Sprintf("backup-%d", i)).Result() sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(backup) expected = append(expected, kube.NamespaceAndName(backup)) } @@ -97,7 +98,7 @@ Loop: } func TestGCControllerHasUpdateFunc(t *testing.T) { - backup := velerotest.NewTestBackup().WithName("backup").Backup + backup := defaultBackup().Result() expected := kube.NamespaceAndName(backup) client := fake.NewSimpleClientset(backup) @@ -151,7 +152,7 @@ func TestGCControllerHasUpdateFunc(t *testing.T) { func TestGCControllerProcessQueueItem(t *testing.T) { fakeClock := clock.NewFakeClock(time.Now()) - defaultBackupLocation := velerotest.NewTestBackupStorageLocation().WithName("default").BackupStorageLocation + defaultBackupLocation := builder.ForBackupStorageLocation("velero", "default").Result() tests := []struct { name string @@ -166,52 +167,32 @@ func TestGCControllerProcessQueueItem(t *testing.T) { name: "can't find backup - no error", }, { - name: "unexpired backup is not deleted", - backup: velerotest.NewTestBackup(). - WithName("backup-1"). - WithExpiration(fakeClock.Now().Add(1 * time.Minute)). - WithStorageLocation("default"). - Backup, + name: "unexpired backup is not deleted", + backup: defaultBackup().Expiration(fakeClock.Now().Add(time.Minute)).StorageLocation("default").Result(), backupLocation: defaultBackupLocation, expectDeletion: false, }, { - name: "expired backup in read-only storage location is not deleted", - backup: velerotest.NewTestBackup(). - WithName("backup-1"). - WithExpiration(fakeClock.Now().Add(-1 * time.Minute)). - WithStorageLocation("read-only"). - Backup, - backupLocation: velerotest.NewTestBackupStorageLocation().WithName("read-only").WithAccessMode(api.BackupStorageLocationAccessModeReadOnly).BackupStorageLocation, + name: "expired backup in read-only storage location is not deleted", + backup: defaultBackup().Expiration(fakeClock.Now().Add(-time.Minute)).StorageLocation("read-only").Result(), + backupLocation: builder.ForBackupStorageLocation("velero", "read-only").AccessMode(api.BackupStorageLocationAccessModeReadOnly).Result(), expectDeletion: false, }, { - name: "expired backup in read-write storage location is deleted", - backup: velerotest.NewTestBackup(). - WithName("backup-1"). - WithExpiration(fakeClock.Now().Add(-1 * time.Minute)). - WithStorageLocation("read-write"). - Backup, - backupLocation: velerotest.NewTestBackupStorageLocation().WithName("read-write").WithAccessMode(api.BackupStorageLocationAccessModeReadWrite).BackupStorageLocation, + name: "expired backup in read-write storage location is deleted", + backup: defaultBackup().Expiration(fakeClock.Now().Add(-time.Minute)).StorageLocation("read-write").Result(), + backupLocation: builder.ForBackupStorageLocation("velero", "read-write").AccessMode(api.BackupStorageLocationAccessModeReadWrite).Result(), expectDeletion: true, }, { - name: "expired backup with no pending deletion requests is deleted", - backup: velerotest.NewTestBackup(). - WithName("backup-1"). - WithExpiration(fakeClock.Now().Add(-1 * time.Second)). - WithStorageLocation("default"). - Backup, + name: "expired backup with no pending deletion requests is deleted", + backup: defaultBackup().Expiration(fakeClock.Now().Add(-time.Second)).StorageLocation("default").Result(), backupLocation: defaultBackupLocation, expectDeletion: true, }, { - name: "expired backup with a pending deletion request is not deleted", - backup: velerotest.NewTestBackup(). - WithName("backup-1"). - WithExpiration(fakeClock.Now().Add(-1 * time.Second)). - WithStorageLocation("default"). - Backup, + name: "expired backup with a pending deletion request is not deleted", + backup: defaultBackup().Expiration(fakeClock.Now().Add(-time.Second)).StorageLocation("default").Result(), backupLocation: defaultBackupLocation, deleteBackupRequests: []*api.DeleteBackupRequest{ { @@ -231,12 +212,8 @@ func TestGCControllerProcessQueueItem(t *testing.T) { expectDeletion: false, }, { - name: "expired backup with only processed deletion requests is deleted", - backup: velerotest.NewTestBackup(). - WithName("backup-1"). - WithExpiration(fakeClock.Now().Add(-1 * time.Second)). - WithStorageLocation("default"). - Backup, + name: "expired backup with only processed deletion requests is deleted", + backup: defaultBackup().Expiration(fakeClock.Now().Add(-time.Second)).StorageLocation("default").Result(), backupLocation: defaultBackupLocation, deleteBackupRequests: []*api.DeleteBackupRequest{ { @@ -256,12 +233,8 @@ func TestGCControllerProcessQueueItem(t *testing.T) { expectDeletion: true, }, { - name: "create DeleteBackupRequest error returns an error", - backup: velerotest.NewTestBackup(). - WithName("backup-1"). - WithExpiration(fakeClock.Now().Add(-1 * time.Second)). - WithStorageLocation("default"). - Backup, + name: "create DeleteBackupRequest error returns an error", + backup: defaultBackup().Expiration(fakeClock.Now().Add(-time.Second)).StorageLocation("default").Result(), backupLocation: defaultBackupLocation, expectDeletion: true, createDeleteBackupRequestError: true, diff --git a/pkg/controller/pod_volume_backup_controller.go b/pkg/controller/pod_volume_backup_controller.go index 458f6a7f2ff..9b3e8cbcb00 100644 --- a/pkg/controller/pod_volume_backup_controller.go +++ b/pkg/controller/pod_volume_backup_controller.go @@ -27,7 +27,9 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/clock" corev1informers "k8s.io/client-go/informers/core/v1" corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" @@ -50,11 +52,13 @@ type podVolumeBackupController struct { secretLister corev1listers.SecretLister podLister corev1listers.PodLister pvcLister corev1listers.PersistentVolumeClaimLister + pvLister corev1listers.PersistentVolumeLister backupLocationLister listers.BackupStorageLocationLister nodeName string processBackupFunc func(*velerov1api.PodVolumeBackup) error fileSystem filesystem.Interface + clock clock.Clock } // NewPodVolumeBackupController creates a new pod volume backup controller. @@ -65,6 +69,7 @@ func NewPodVolumeBackupController( podInformer cache.SharedIndexInformer, secretInformer cache.SharedIndexInformer, pvcInformer corev1informers.PersistentVolumeClaimInformer, + pvInformer corev1informers.PersistentVolumeInformer, backupLocationInformer informers.BackupStorageLocationInformer, nodeName string, ) Interface { @@ -75,10 +80,12 @@ func NewPodVolumeBackupController( podLister: corev1listers.NewPodLister(podInformer.GetIndexer()), secretLister: corev1listers.NewSecretLister(secretInformer.GetIndexer()), pvcLister: pvcInformer.Lister(), + pvLister: pvInformer.Lister(), backupLocationLister: backupLocationInformer.Lister(), nodeName: nodeName, fileSystem: filesystem.NewFileSystem(), + clock: &clock.RealClock{}, } c.syncHandler = c.processQueueItem @@ -173,9 +180,12 @@ func (c *podVolumeBackupController) processBackup(req *velerov1api.PodVolumeBack var err error // update status to InProgress - req, err = c.patchPodVolumeBackup(req, updatePhaseFunc(velerov1api.PodVolumeBackupPhaseInProgress)) + req, err = c.patchPodVolumeBackup(req, func(r *velerov1api.PodVolumeBackup) { + r.Status.Phase = velerov1api.PodVolumeBackupPhaseInProgress + r.Status.StartTimestamp.Time = c.clock.Now() + }) if err != nil { - log.WithError(err).Error("Error setting phase to InProgress") + log.WithError(err).Error("Error setting PodVolumeBackup StartTimestamp and phase to InProgress") return errors.WithStack(err) } @@ -185,7 +195,7 @@ func (c *podVolumeBackupController) processBackup(req *velerov1api.PodVolumeBack return c.fail(req, errors.Wrap(err, "error getting pod").Error(), log) } - volumeDir, err := kube.GetVolumeDirectory(pod, req.Spec.Volume, c.pvcLister) + volumeDir, err := kube.GetVolumeDirectory(pod, req.Spec.Volume, c.pvcLister, c.pvLister) if err != nil { log.WithError(err).Error("Error getting volume directory name") return c.fail(req, errors.Wrap(err, "error getting volume directory name").Error(), log) @@ -226,6 +236,21 @@ func (c *podVolumeBackupController) processBackup(req *velerov1api.PodVolumeBack resticCmd.Env = env } + // If this is a PVC, look for the most recent completed pod volume backup for it and get + // its restic snapshot ID to use as the value of the `--parent` flag. Without this, + // if the pod using the PVC (and therefore the directory path under /host_pods/) has + // changed since the PVC's last backup, restic will not be able to identify a suitable + // parent snapshot to use, and will have to do a full rescan of the contents of the PVC. + if pvcUID, ok := req.Labels[velerov1api.PVCUIDLabel]; ok { + parentSnapshotID := getParentSnapshot(log, pvcUID, c.podVolumeBackupLister.PodVolumeBackups(req.Namespace)) + if parentSnapshotID == "" { + log.Info("No parent snapshot found for PVC, not using --parent flag for this backup") + } else { + log.WithField("parentSnapshotID", parentSnapshotID).Info("Setting --parent flag for this backup") + resticCmd.ExtraFlags = append(resticCmd.ExtraFlags, fmt.Sprintf("--parent=%s", parentSnapshotID)) + } + } + var stdout, stderr string var emptySnapshot bool @@ -253,12 +278,13 @@ func (c *podVolumeBackupController) processBackup(req *velerov1api.PodVolumeBack r.Status.Path = path r.Status.Phase = velerov1api.PodVolumeBackupPhaseCompleted r.Status.SnapshotID = snapshotID + r.Status.CompletionTimestamp.Time = c.clock.Now() if emptySnapshot { r.Status.Message = "volume was empty so no snapshot was taken" } }) if err != nil { - log.WithError(err).Error("Error setting phase to Completed") + log.WithError(err).Error("Error setting PodVolumeBackup phase to Completed") return err } @@ -267,6 +293,45 @@ func (c *podVolumeBackupController) processBackup(req *velerov1api.PodVolumeBack return nil } +// getParentSnapshot finds the most recent completed pod volume backup for the specified PVC and returns its +// restic snapshot ID. Any errors encountered are logged but not returned since they do not prevent a backup +// from proceeding. +func getParentSnapshot(log logrus.FieldLogger, pvcUID string, podVolumeBackupLister listers.PodVolumeBackupNamespaceLister) string { + log = log.WithField("pvcUID", pvcUID) + log.Infof("Looking for most recent completed pod volume backup for this PVC") + + pvcBackups, err := podVolumeBackupLister.List(labels.SelectorFromSet(map[string]string{velerov1api.PVCUIDLabel: pvcUID})) + if err != nil { + log.WithError(errors.WithStack(err)).Error("Error listing pod volume backups for PVC") + return "" + } + + // go through all the pod volume backups for the PVC and look for the most recent completed one + // to use as the parent. + var mostRecentBackup *velerov1api.PodVolumeBackup + for _, backup := range pvcBackups { + if backup.Status.Phase != velerov1api.PodVolumeBackupPhaseCompleted { + continue + } + + if mostRecentBackup == nil || backup.Status.StartTimestamp.After(mostRecentBackup.Status.StartTimestamp.Time) { + mostRecentBackup = backup + } + } + + if mostRecentBackup == nil { + log.Info("No completed pod volume backup found for PVC") + return "" + } + + log.WithFields(map[string]interface{}{ + "parentPodVolumeBackup": mostRecentBackup.Name, + "parentSnapshotID": mostRecentBackup.Status.SnapshotID, + }).Info("Found most recent completed pod volume backup for PVC") + + return mostRecentBackup.Status.SnapshotID +} + func (c *podVolumeBackupController) patchPodVolumeBackup(req *velerov1api.PodVolumeBackup, mutate func(*velerov1api.PodVolumeBackup)) (*velerov1api.PodVolumeBackup, error) { // Record original json oldData, err := json.Marshal(req) @@ -300,19 +365,14 @@ func (c *podVolumeBackupController) fail(req *velerov1api.PodVolumeBackup, msg s if _, err := c.patchPodVolumeBackup(req, func(r *velerov1api.PodVolumeBackup) { r.Status.Phase = velerov1api.PodVolumeBackupPhaseFailed r.Status.Message = msg + r.Status.CompletionTimestamp.Time = c.clock.Now() }); err != nil { - log.WithError(err).Error("Error setting phase to Failed") + log.WithError(err).Error("Error setting PodVolumeBackup phase to Failed") return err } return nil } -func updatePhaseFunc(phase velerov1api.PodVolumeBackupPhase) func(r *velerov1api.PodVolumeBackup) { - return func(r *velerov1api.PodVolumeBackup) { - r.Status.Phase = phase - } -} - func singlePathMatch(path string) (string, error) { matches, err := filepath.Glob(path) if err != nil { diff --git a/pkg/controller/pod_volume_backup_controller_test.go b/pkg/controller/pod_volume_backup_controller_test.go index e051aa751b0..9d17ae64789 100644 --- a/pkg/controller/pod_volume_backup_controller_test.go +++ b/pkg/controller/pod_volume_backup_controller_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/require" velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) func TestPVBHandler(t *testing.T) { diff --git a/pkg/controller/pod_volume_restore_controller.go b/pkg/controller/pod_volume_restore_controller.go index 444b142debb..c3ff87bbb18 100644 --- a/pkg/controller/pod_volume_restore_controller.go +++ b/pkg/controller/pod_volume_restore_controller.go @@ -31,6 +31,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/clock" corev1informers "k8s.io/client-go/informers/core/v1" corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" @@ -54,11 +55,13 @@ type podVolumeRestoreController struct { podLister corev1listers.PodLister secretLister corev1listers.SecretLister pvcLister corev1listers.PersistentVolumeClaimLister + pvLister corev1listers.PersistentVolumeLister backupLocationLister listers.BackupStorageLocationLister nodeName string processRestoreFunc func(*velerov1api.PodVolumeRestore) error fileSystem filesystem.Interface + clock clock.Clock } // NewPodVolumeRestoreController creates a new pod volume restore controller. @@ -69,6 +72,7 @@ func NewPodVolumeRestoreController( podInformer cache.SharedIndexInformer, secretInformer cache.SharedIndexInformer, pvcInformer corev1informers.PersistentVolumeClaimInformer, + pvInformer corev1informers.PersistentVolumeInformer, backupLocationInformer informers.BackupStorageLocationInformer, nodeName string, ) Interface { @@ -79,10 +83,12 @@ func NewPodVolumeRestoreController( podLister: corev1listers.NewPodLister(podInformer.GetIndexer()), secretLister: corev1listers.NewSecretLister(secretInformer.GetIndexer()), pvcLister: pvcInformer.Lister(), + pvLister: pvInformer.Lister(), backupLocationLister: backupLocationInformer.Lister(), nodeName: nodeName, fileSystem: filesystem.NewFileSystem(), + clock: &clock.RealClock{}, } c.syncHandler = c.processQueueItem @@ -258,9 +264,12 @@ func (c *podVolumeRestoreController) processRestore(req *velerov1api.PodVolumeRe var err error // update status to InProgress - req, err = c.patchPodVolumeRestore(req, updatePodVolumeRestorePhaseFunc(velerov1api.PodVolumeRestorePhaseInProgress)) + req, err = c.patchPodVolumeRestore(req, func(r *velerov1api.PodVolumeRestore) { + r.Status.Phase = velerov1api.PodVolumeRestorePhaseInProgress + r.Status.StartTimestamp.Time = c.clock.Now() + }) if err != nil { - log.WithError(err).Error("Error setting phase to InProgress") + log.WithError(err).Error("Error setting PodVolumeRestore startTimestamp and phase to InProgress") return errors.WithStack(err) } @@ -270,7 +279,7 @@ func (c *podVolumeRestoreController) processRestore(req *velerov1api.PodVolumeRe return c.failRestore(req, errors.Wrap(err, "error getting pod").Error(), log) } - volumeDir, err := kube.GetVolumeDirectory(pod, req.Spec.Volume, c.pvcLister) + volumeDir, err := kube.GetVolumeDirectory(pod, req.Spec.Volume, c.pvcLister, c.pvLister) if err != nil { log.WithError(err).Error("Error getting volume directory name") return c.failRestore(req, errors.Wrap(err, "error getting volume directory name").Error(), log) @@ -291,8 +300,11 @@ func (c *podVolumeRestoreController) processRestore(req *velerov1api.PodVolumeRe } // update status to Completed - if _, err = c.patchPodVolumeRestore(req, updatePodVolumeRestorePhaseFunc(velerov1api.PodVolumeRestorePhaseCompleted)); err != nil { - log.WithError(err).Error("Error setting phase to Completed") + if _, err = c.patchPodVolumeRestore(req, func(r *velerov1api.PodVolumeRestore) { + r.Status.Phase = velerov1api.PodVolumeRestorePhaseCompleted + r.Status.CompletionTimestamp.Time = c.clock.Now() + }); err != nil { + log.WithError(err).Error("Error setting PodVolumeRestore completionTimestamp and phase to Completed") return err } @@ -397,15 +409,10 @@ func (c *podVolumeRestoreController) failRestore(req *velerov1api.PodVolumeResto if _, err := c.patchPodVolumeRestore(req, func(pvr *velerov1api.PodVolumeRestore) { pvr.Status.Phase = velerov1api.PodVolumeRestorePhaseFailed pvr.Status.Message = msg + pvr.Status.CompletionTimestamp.Time = c.clock.Now() }); err != nil { - log.WithError(err).Error("Error setting phase to Failed") + log.WithError(err).Error("Error setting PodVolumeRestore phase to Failed") return err } return nil } - -func updatePodVolumeRestorePhaseFunc(phase velerov1api.PodVolumeRestorePhase) func(r *velerov1api.PodVolumeRestore) { - return func(r *velerov1api.PodVolumeRestore) { - r.Status.Phase = phase - } -} diff --git a/pkg/controller/pod_volume_restore_controller_test.go b/pkg/controller/pod_volume_restore_controller_test.go index 01e08f9893e..e50ee0e9459 100644 --- a/pkg/controller/pod_volume_restore_controller_test.go +++ b/pkg/controller/pod_volume_restore_controller_test.go @@ -34,7 +34,7 @@ import ( veleroinformers "github.com/heptio/velero/pkg/generated/informers/externalversions" velerov1listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" "github.com/heptio/velero/pkg/restic" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) func TestPVRHandler(t *testing.T) { diff --git a/pkg/controller/restic_repository_controller.go b/pkg/controller/restic_repository_controller.go index b74814dc98f..7be6ad5d9c5 100644 --- a/pkg/controller/restic_repository_controller.go +++ b/pkg/controller/restic_repository_controller.go @@ -18,6 +18,7 @@ package controller import ( "encoding/json" + "strings" "time" jsonpatch "github.com/evanphx/json-patch" @@ -120,9 +121,19 @@ func (c *resticRepositoryController) processQueueItem(key string) error { // Don't mutate the shared cache reqCopy := req.DeepCopy() - switch req.Status.Phase { - case "", v1.ResticRepositoryPhaseNew: + if req.Status.Phase == "" || req.Status.Phase == v1.ResticRepositoryPhaseNew { return c.initializeRepo(reqCopy, log) + } + + // If the repository is ready or not-ready, check it for stale locks, but if + // this fails for any reason, it's non-critical so we still continue on to the + // rest of the "process" logic. + log.Debug("Checking repository for stale locks") + if err := c.repositoryManager.UnlockRepo(reqCopy); err != nil { + log.WithError(err).Error("Error checking repository for stale locks") + } + + switch req.Status.Phase { case v1.ResticRepositoryPhaseReady: return c.runMaintenanceIfDue(reqCopy, log) case v1.ResticRepositoryPhaseNotReady: @@ -141,9 +152,21 @@ func (c *resticRepositoryController) initializeRepo(req *v1.ResticRepository, lo return c.patchResticRepository(req, repoNotReady(err.Error())) } + repoIdentifier, err := restic.GetRepoIdentifier(loc, req.Spec.VolumeNamespace) + if err != nil { + return c.patchResticRepository(req, func(r *v1.ResticRepository) { + r.Status.Message = err.Error() + r.Status.Phase = v1.ResticRepositoryPhaseNotReady + + if r.Spec.MaintenanceFrequency.Duration <= 0 { + r.Spec.MaintenanceFrequency = metav1.Duration{Duration: restic.DefaultMaintenanceFrequency} + } + }) + } + // defaulting - if the patch fails, return an error so the item is returned to the queue if err := c.patchResticRepository(req, func(r *v1.ResticRepository) { - r.Spec.ResticIdentifier = restic.GetRepoIdentifier(loc, r.Spec.VolumeNamespace) + r.Spec.ResticIdentifier = repoIdentifier if r.Spec.MaintenanceFrequency.Duration <= 0 { r.Spec.MaintenanceFrequency = metav1.Duration{Duration: restic.DefaultMaintenanceFrequency} @@ -162,14 +185,23 @@ func (c *resticRepositoryController) initializeRepo(req *v1.ResticRepository, lo }) } -// ensureRepo first tries to connect to the repo, and returns if it succeeds. If it fails, -// it attempts to init the repo, and returns the result. +// ensureRepo checks to see if a repository exists, and attempts to initialize it if +// it does not exist. An error is returned if the repository can't be connected to +// or initialized. func ensureRepo(repo *v1.ResticRepository, repoManager restic.RepositoryManager) error { - if repoManager.ConnectToRepo(repo) == nil { - return nil + if err := repoManager.ConnectToRepo(repo); err != nil { + // If the repository has not yet been initialized, the error message will always include + // the following string. This is the only scenario where we should try to initialize it. + // Other errors (e.g. "already locked") should be returned as-is since the repository + // does already exist, but it can't be connected to. + if strings.Contains(err.Error(), "Is there a repository at the following location?") { + return repoManager.InitRepo(repo) + } + + return err } - return repoManager.InitRepo(repo) + return nil } func (c *resticRepositoryController) runMaintenanceIfDue(req *v1.ResticRepository, log logrus.FieldLogger) error { @@ -184,11 +216,6 @@ func (c *resticRepositoryController) runMaintenanceIfDue(req *v1.ResticRepositor log.Info("Running maintenance on restic repository") - log.Debug("Checking repo before prune") - if err := c.repositoryManager.CheckRepo(req); err != nil { - return c.patchResticRepository(req, repoNotReady(err.Error())) - } - // prune failures should be displayed in the `.status.message` field but // should not cause the repo to move to `NotReady`. log.Debug("Pruning repo") @@ -201,11 +228,6 @@ func (c *resticRepositoryController) runMaintenanceIfDue(req *v1.ResticRepositor } } - log.Debug("Checking repo after prune") - if err := c.repositoryManager.CheckRepo(req); err != nil { - return c.patchResticRepository(req, repoNotReady(err.Error())) - } - return c.patchResticRepository(req, func(req *v1.ResticRepository) { req.Status.LastMaintenanceTime = metav1.Time{Time: now} }) @@ -216,6 +238,11 @@ func dueForMaintenance(req *v1.ResticRepository, now time.Time) bool { } func (c *resticRepositoryController) checkNotReadyRepo(req *v1.ResticRepository, log logrus.FieldLogger) error { + // no identifier: can't possibly be ready, so just return + if req.Spec.ResticIdentifier == "" { + return nil + } + log.Info("Checking restic repository for readiness") // we need to ensure it (first check, if check fails, attempt to init) diff --git a/pkg/controller/restore_controller.go b/pkg/controller/restore_controller.go index 610ceab272d..b8a6fcd7396 100644 --- a/pkg/controller/restore_controller.go +++ b/pkg/controller/restore_controller.go @@ -43,6 +43,7 @@ import ( "github.com/heptio/velero/pkg/metrics" "github.com/heptio/velero/pkg/persistence" "github.com/heptio/velero/pkg/plugin/clientmgmt" + "github.com/heptio/velero/pkg/restic" pkgrestore "github.com/heptio/velero/pkg/restore" "github.com/heptio/velero/pkg/util/collections" kubeutil "github.com/heptio/velero/pkg/util/kube" @@ -75,7 +76,7 @@ type restoreController struct { namespace string restoreClient velerov1client.RestoresGetter - backupClient velerov1client.BackupsGetter + podVolumeBackupClient velerov1client.PodVolumeBackupsGetter restorer pkgrestore.Restorer backupLister listers.BackupLister restoreLister listers.RestoreLister @@ -84,6 +85,7 @@ type restoreController struct { restoreLogLevel logrus.Level defaultBackupLocation string metrics *metrics.ServerMetrics + logFormat logging.Format newPluginManager func(logger logrus.FieldLogger) clientmgmt.Manager newBackupStore func(*api.BackupStorageLocation, persistence.ObjectStoreGetter, logrus.FieldLogger) (persistence.BackupStore, error) @@ -93,7 +95,7 @@ func NewRestoreController( namespace string, restoreInformer informers.RestoreInformer, restoreClient velerov1client.RestoresGetter, - backupClient velerov1client.BackupsGetter, + podVolumeBackupClient velerov1client.PodVolumeBackupsGetter, restorer pkgrestore.Restorer, backupInformer informers.BackupInformer, backupLocationInformer informers.BackupStorageLocationInformer, @@ -103,12 +105,13 @@ func NewRestoreController( newPluginManager func(logrus.FieldLogger) clientmgmt.Manager, defaultBackupLocation string, metrics *metrics.ServerMetrics, + logFormat logging.Format, ) Interface { c := &restoreController{ genericController: newGenericController("restore", logger), namespace: namespace, restoreClient: restoreClient, - backupClient: backupClient, + podVolumeBackupClient: podVolumeBackupClient, restorer: restorer, backupLister: backupInformer.Lister(), restoreLister: restoreInformer.Lister(), @@ -117,6 +120,7 @@ func NewRestoreController( restoreLogLevel: restoreLogLevel, defaultBackupLocation: defaultBackupLocation, metrics: metrics, + logFormat: logFormat, // use variables to refer to these functions so they can be // replaced with fakes for testing. @@ -412,7 +416,7 @@ func (c *restoreController) fetchBackupInfo(backupName string, pluginManager cli func (c *restoreController) runValidatedRestore(restore *api.Restore, info backupInfo) error { // instantiate the per-restore logger that will output both to a temp file // (for upload to object storage) and to stdout. - restoreLog, err := newRestoreLogger(restore, c.logger, c.restoreLogLevel) + restoreLog, err := newRestoreLogger(restore, c.logger, c.restoreLogLevel, c.logFormat) if err != nil { return err } @@ -432,13 +436,32 @@ func (c *restoreController) runValidatedRestore(restore *api.Restore, info backu } defer closeAndRemoveFile(backupFile, c.logger) + opts := restic.NewPodVolumeBackupListOptions(restore.Spec.BackupName) + podVolumeBackupList, err := c.podVolumeBackupClient.PodVolumeBackups(c.namespace).List(opts) + if err != nil { + return errors.WithStack(err) + } + volumeSnapshots, err := info.backupStore.GetBackupVolumeSnapshots(restore.Spec.BackupName) if err != nil { return errors.Wrap(err, "error fetching volume snapshots metadata") } restoreLog.Info("starting restore") - restoreWarnings, restoreErrors := c.restorer.Restore(restoreLog, restore, info.backup, volumeSnapshots, backupFile, actions, c.snapshotLocationLister, pluginManager) + + var podVolumeBackups []*velerov1api.PodVolumeBackup + for i := range podVolumeBackupList.Items { + podVolumeBackups = append(podVolumeBackups, &podVolumeBackupList.Items[i]) + } + restoreReq := pkgrestore.Request{ + Log: restoreLog, + Restore: restore, + Backup: info.backup, + PodVolumeBackups: podVolumeBackups, + VolumeSnapshots: volumeSnapshots, + BackupReader: backupFile, + } + restoreWarnings, restoreErrors := c.restorer.Restore(restoreReq, actions, c.snapshotLocationLister, pluginManager) restoreLog.Info("restore completed") if logReader, err := restoreLog.done(c.logger); err != nil { @@ -555,14 +578,14 @@ type restoreLogger struct { w *gzip.Writer } -func newRestoreLogger(restore *api.Restore, baseLogger logrus.FieldLogger, logLevel logrus.Level) (*restoreLogger, error) { +func newRestoreLogger(restore *api.Restore, baseLogger logrus.FieldLogger, logLevel logrus.Level, logFormat logging.Format) (*restoreLogger, error) { file, err := ioutil.TempFile("", "") if err != nil { return nil, errors.Wrap(err, "error creating temp file") } w := gzip.NewWriter(file) - logger := logging.DefaultLogger(logLevel) + logger := logging.DefaultLogger(logLevel, logFormat) logger.Out = io.MultiWriter(os.Stdout, w) return &restoreLogger{ diff --git a/pkg/controller/restore_controller_test.go b/pkg/controller/restore_controller_test.go index 01783251bd4..af10f0a424b 100644 --- a/pkg/controller/restore_controller_test.go +++ b/pkg/controller/restore_controller_test.go @@ -19,7 +19,6 @@ package controller import ( "bytes" "encoding/json" - "io" "io/ioutil" "testing" "time" @@ -36,7 +35,7 @@ import ( "k8s.io/client-go/tools/cache" api "github.com/heptio/velero/pkg/apis/velero/v1" - velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/builder" "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" informers "github.com/heptio/velero/pkg/generated/informers/externalversions" listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" @@ -47,7 +46,8 @@ import ( pluginmocks "github.com/heptio/velero/pkg/plugin/mocks" "github.com/heptio/velero/pkg/plugin/velero" pkgrestore "github.com/heptio/velero/pkg/restore" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" + "github.com/heptio/velero/pkg/util/logging" "github.com/heptio/velero/pkg/volume" ) @@ -65,17 +65,17 @@ func TestFetchBackupInfo(t *testing.T) { { name: "lister has backup", backupName: "backup-1", - informerLocations: []*api.BackupStorageLocation{velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation}, - informerBackups: []*api.Backup{velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup}, - expectedRes: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + informerLocations: []*api.BackupStorageLocation{builder.ForBackupStorageLocation("velero", "default").Provider("myCloud").Bucket("bucket").Result()}, + informerBackups: []*api.Backup{defaultBackup().StorageLocation("default").Result()}, + expectedRes: defaultBackup().StorageLocation("default").Result(), }, { name: "lister does not have a backup, but backupSvc does", backupName: "backup-1", - backupStoreBackup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, - informerLocations: []*api.BackupStorageLocation{velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation}, - informerBackups: []*api.Backup{velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup}, - expectedRes: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + backupStoreBackup: defaultBackup().StorageLocation("default").Result(), + informerLocations: []*api.BackupStorageLocation{builder.ForBackupStorageLocation("velero", "default").Provider("myCloud").Bucket("bucket").Result()}, + informerBackups: []*api.Backup{defaultBackup().StorageLocation("default").Result()}, + expectedRes: defaultBackup().StorageLocation("default").Result(), }, { name: "no backup", @@ -85,6 +85,8 @@ func TestFetchBackupInfo(t *testing.T) { }, } + formatFlag := logging.FormatText + for _, test := range tests { t.Run(test.name, func(t *testing.T) { var ( @@ -113,6 +115,7 @@ func TestFetchBackupInfo(t *testing.T) { func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, "default", metrics.NewServerMetrics(), + formatFlag, ).(*restoreController) c.newBackupStore = func(*api.BackupStorageLocation, persistence.ObjectStoreGetter, logrus.FieldLogger) (persistence.BackupStore, error) { @@ -169,20 +172,22 @@ func TestProcessQueueItemSkips(t *testing.T) { { name: "restore with phase InProgress does not get processed", restoreKey: "foo/bar", - restore: velerotest.NewTestRestore("foo", "bar", api.RestorePhaseInProgress).Restore, + restore: builder.ForRestore("foo", "bar").Phase(api.RestorePhaseInProgress).Result(), }, { name: "restore with phase Completed does not get processed", restoreKey: "foo/bar", - restore: velerotest.NewTestRestore("foo", "bar", api.RestorePhaseCompleted).Restore, + restore: builder.ForRestore("foo", "bar").Phase(api.RestorePhaseCompleted).Result(), }, { name: "restore with phase FailedValidation does not get processed", restoreKey: "foo/bar", - restore: velerotest.NewTestRestore("foo", "bar", api.RestorePhaseFailedValidation).Restore, + restore: builder.ForRestore("foo", "bar").Phase(api.RestorePhaseFailedValidation).Result(), }, } + formatFlag := logging.FormatText + for _, test := range tests { t.Run(test.name, func(t *testing.T) { var ( @@ -206,6 +211,7 @@ func TestProcessQueueItemSkips(t *testing.T) { nil, "default", metrics.NewServerMetrics(), + formatFlag, ).(*restoreController) if test.restore != nil { @@ -220,6 +226,8 @@ func TestProcessQueueItemSkips(t *testing.T) { } func TestProcessQueueItem(t *testing.T) { + defaultStorageLocation := builder.ForBackupStorageLocation("velero", "default").Provider("myCloud").Bucket("bucket").Result() + tests := []struct { name string restoreKey string @@ -239,54 +247,48 @@ func TestProcessQueueItem(t *testing.T) { }{ { name: "restore with both namespace in both includedNamespaces and excludedNamespaces fails validation", - location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, - restore: NewRestore("foo", "bar", "backup-1", "another-1", "*", api.RestorePhaseNew).WithExcludedNamespace("another-1").Restore, - backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + location: defaultStorageLocation, + restore: NewRestore("foo", "bar", "backup-1", "another-1", "*", api.RestorePhaseNew).ExcludedNamespaces("another-1").Result(), + backup: defaultBackup().StorageLocation("default").Result(), expectedErr: false, expectedPhase: string(api.RestorePhaseFailedValidation), expectedValidationErrors: []string{"Invalid included/excluded namespace lists: excludes list cannot contain an item in the includes list: another-1"}, }, { name: "restore with resource in both includedResources and excludedResources fails validation", - location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, - restore: NewRestore("foo", "bar", "backup-1", "*", "a-resource", api.RestorePhaseNew).WithExcludedResource("a-resource").Restore, - backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + location: defaultStorageLocation, + restore: NewRestore("foo", "bar", "backup-1", "*", "a-resource", api.RestorePhaseNew).ExcludedResources("a-resource").Result(), + backup: defaultBackup().StorageLocation("default").Result(), expectedErr: false, expectedPhase: string(api.RestorePhaseFailedValidation), expectedValidationErrors: []string{"Invalid included/excluded resource lists: excludes list cannot contain an item in the includes list: a-resource"}, }, { name: "new restore with empty backup and schedule names fails validation", - restore: NewRestore("foo", "bar", "", "ns-1", "", api.RestorePhaseNew).Restore, + restore: NewRestore("foo", "bar", "", "ns-1", "", api.RestorePhaseNew).Result(), expectedErr: false, expectedPhase: string(api.RestorePhaseFailedValidation), expectedValidationErrors: []string{"Either a backup or schedule must be specified as a source for the restore, but not both"}, }, { name: "new restore with backup and schedule names provided fails validation", - restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).WithSchedule("sched-1").Restore, + restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).Schedule("sched-1").Result(), expectedErr: false, expectedPhase: string(api.RestorePhaseFailedValidation), expectedValidationErrors: []string{"Either a backup or schedule must be specified as a source for the restore, but not both"}, }, { - name: "valid restore with schedule name gets executed", - location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, - restore: NewRestore("foo", "bar", "", "ns-1", "", api.RestorePhaseNew).WithSchedule("sched-1").Restore, - backup: velerotest. - NewTestBackup(). - WithName("backup-1"). - WithStorageLocation("default"). - WithLabel(velerov1api.ScheduleNameLabel, "sched-1"). - WithPhase(api.BackupPhaseCompleted). - Backup, + name: "valid restore with schedule name gets executed", + location: defaultStorageLocation, + restore: NewRestore("foo", "bar", "", "ns-1", "", api.RestorePhaseNew).Schedule("sched-1").Result(), + backup: defaultBackup().StorageLocation("default").ObjectMeta(builder.WithLabels(api.ScheduleNameLabel, "sched-1")).Phase(api.BackupPhaseCompleted).Result(), expectedErr: false, expectedPhase: string(api.RestorePhaseInProgress), - expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).WithSchedule("sched-1").Restore, + expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Schedule("sched-1").Result(), }, { name: "restore with non-existent backup name fails", - restore: NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseNew).Restore, + restore: NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseNew).Result(), expectedErr: false, expectedPhase: string(api.RestorePhaseFailedValidation), expectedValidationErrors: []string{"Error retrieving backup: backup.velero.io \"backup-1\" not found"}, @@ -294,30 +296,30 @@ func TestProcessQueueItem(t *testing.T) { }, { name: "restorer throwing an error causes the restore to fail", - location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, - restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).Restore, - backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + location: defaultStorageLocation, + restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).Result(), + backup: defaultBackup().StorageLocation("default").Result(), restorerError: errors.New("blarg"), expectedErr: false, expectedPhase: string(api.RestorePhaseInProgress), expectedFinalPhase: string(api.RestorePhasePartiallyFailed), expectedRestoreErrors: 1, - expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore, + expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Result(), }, { name: "valid restore gets executed", - location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, - restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).Restore, - backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + location: defaultStorageLocation, + restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).Result(), + backup: defaultBackup().StorageLocation("default").Result(), expectedErr: false, expectedPhase: string(api.RestorePhaseInProgress), - expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore, + expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Result(), }, { name: "restoration of nodes is not supported", - location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, - restore: NewRestore("foo", "bar", "backup-1", "ns-1", "nodes", api.RestorePhaseNew).Restore, - backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + location: defaultStorageLocation, + restore: NewRestore("foo", "bar", "backup-1", "ns-1", "nodes", api.RestorePhaseNew).Result(), + backup: defaultBackup().StorageLocation("default").Result(), expectedErr: false, expectedPhase: string(api.RestorePhaseFailedValidation), expectedValidationErrors: []string{ @@ -327,9 +329,9 @@ func TestProcessQueueItem(t *testing.T) { }, { name: "restoration of events is not supported", - location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, - restore: NewRestore("foo", "bar", "backup-1", "ns-1", "events", api.RestorePhaseNew).Restore, - backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + location: defaultStorageLocation, + restore: NewRestore("foo", "bar", "backup-1", "ns-1", "events", api.RestorePhaseNew).Result(), + backup: defaultBackup().StorageLocation("default").Result(), expectedErr: false, expectedPhase: string(api.RestorePhaseFailedValidation), expectedValidationErrors: []string{ @@ -339,9 +341,9 @@ func TestProcessQueueItem(t *testing.T) { }, { name: "restoration of events.events.k8s.io is not supported", - location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, - restore: NewRestore("foo", "bar", "backup-1", "ns-1", "events.events.k8s.io", api.RestorePhaseNew).Restore, - backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + location: defaultStorageLocation, + restore: NewRestore("foo", "bar", "backup-1", "ns-1", "events.events.k8s.io", api.RestorePhaseNew).Result(), + backup: defaultBackup().StorageLocation("default").Result(), expectedErr: false, expectedPhase: string(api.RestorePhaseFailedValidation), expectedValidationErrors: []string{ @@ -351,9 +353,9 @@ func TestProcessQueueItem(t *testing.T) { }, { name: "restoration of backups.velero.io is not supported", - location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, - restore: NewRestore("foo", "bar", "backup-1", "ns-1", "backups.velero.io", api.RestorePhaseNew).Restore, - backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + location: defaultStorageLocation, + restore: NewRestore("foo", "bar", "backup-1", "ns-1", "backups.velero.io", api.RestorePhaseNew).Result(), + backup: defaultBackup().StorageLocation("default").Result(), expectedErr: false, expectedPhase: string(api.RestorePhaseFailedValidation), expectedValidationErrors: []string{ @@ -363,9 +365,9 @@ func TestProcessQueueItem(t *testing.T) { }, { name: "restoration of restores.velero.io is not supported", - location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, - restore: NewRestore("foo", "bar", "backup-1", "ns-1", "restores.velero.io", api.RestorePhaseNew).Restore, - backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + location: defaultStorageLocation, + restore: NewRestore("foo", "bar", "backup-1", "ns-1", "restores.velero.io", api.RestorePhaseNew).Result(), + backup: defaultBackup().StorageLocation("default").Result(), expectedErr: false, expectedPhase: string(api.RestorePhaseFailedValidation), expectedValidationErrors: []string{ @@ -375,15 +377,17 @@ func TestProcessQueueItem(t *testing.T) { }, { name: "backup download error results in failed restore", - location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, - restore: NewRestore(api.DefaultNamespace, "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).Restore, + location: defaultStorageLocation, + restore: NewRestore(api.DefaultNamespace, "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).Result(), expectedPhase: string(api.RestorePhaseInProgress), expectedFinalPhase: string(api.RestorePhaseFailed), backupStoreGetBackupContentsErr: errors.New("Couldn't download backup"), - backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + backup: defaultBackup().StorageLocation("default").Result(), }, } + formatFlag := logging.FormatText + for _, test := range tests { t.Run(test.name, func(t *testing.T) { var ( @@ -412,6 +416,7 @@ func TestProcessQueueItem(t *testing.T) { func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, "default", metrics.NewServerMetrics(), + formatFlag, ).(*restoreController) c.newBackupStore = func(*api.BackupStorageLocation, persistence.ObjectStoreGetter, logrus.FieldLogger) (persistence.BackupStore, error) { @@ -602,7 +607,7 @@ func TestProcessQueueItem(t *testing.T) { } } - velerotest.ValidatePatch(t, actions[1], expected, decode) + velerotest.ValidatePatch(t, actions[2], expected, decode) // explicitly capturing the argument passed to Restore myself because // I want to validate the called arg as of the time of calling, but @@ -613,6 +618,8 @@ func TestProcessQueueItem(t *testing.T) { } func TestvalidateAndCompleteWhenScheduleNameSpecified(t *testing.T) { + formatFlag := logging.FormatText + var ( client = fake.NewSimpleClientset() sharedInformers = informers.NewSharedInformerFactory(client, 0) @@ -634,6 +641,7 @@ func TestvalidateAndCompleteWhenScheduleNameSpecified(t *testing.T) { nil, "default", nil, + formatFlag, ).(*restoreController) restore := &api.Restore{ @@ -647,12 +655,11 @@ func TestvalidateAndCompleteWhenScheduleNameSpecified(t *testing.T) { } // no backups created from the schedule: fail validation - require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(velerotest. - NewTestBackup(). - WithName("backup-1"). - WithLabel(velerov1api.ScheduleNameLabel, "non-matching-schedule"). - WithPhase(api.BackupPhaseCompleted). - Backup, + require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add( + defaultBackup(). + ObjectMeta(builder.WithLabels(api.ScheduleNameLabel, "non-matching-schedule")). + Phase(api.BackupPhaseCompleted). + Result(), )) errs := c.validateAndComplete(restore, pluginManager) @@ -660,12 +667,14 @@ func TestvalidateAndCompleteWhenScheduleNameSpecified(t *testing.T) { assert.Empty(t, restore.Spec.BackupName) // no completed backups created from the schedule: fail validation - require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(velerotest. - NewTestBackup(). - WithName("backup-2"). - WithLabel(velerov1api.ScheduleNameLabel, "schedule-1"). - WithPhase(api.BackupPhaseInProgress). - Backup, + require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add( + defaultBackup(). + ObjectMeta( + builder.WithName("backup-2"), + builder.WithLabels(api.ScheduleNameLabel, "schedule-1"), + ). + Phase(api.BackupPhaseInProgress). + Result(), )) errs = c.validateAndComplete(restore, pluginManager) @@ -675,21 +684,25 @@ func TestvalidateAndCompleteWhenScheduleNameSpecified(t *testing.T) { // multiple completed backups created from the schedule: use most recent now := time.Now() - require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(velerotest. - NewTestBackup(). - WithName("foo"). - WithLabel(velerov1api.ScheduleNameLabel, "schedule-1"). - WithPhase(api.BackupPhaseCompleted). - WithStartTimestamp(now). - Backup, + require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add( + defaultBackup(). + ObjectMeta( + builder.WithName("foo"), + builder.WithLabels(api.ScheduleNameLabel, "schedule-1"), + ). + Phase(api.BackupPhaseCompleted). + StartTimestamp(now). + Result(), )) - require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(velerotest. - NewTestBackup(). - WithName("bar"). - WithLabel(velerov1api.ScheduleNameLabel, "schedule-1"). - WithPhase(api.BackupPhaseCompleted). - WithStartTimestamp(now.Add(time.Second)). - Backup, + require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add( + defaultBackup(). + ObjectMeta( + builder.WithName("foo"), + builder.WithLabels(api.ScheduleNameLabel, "schedule-1"), + ). + Phase(api.BackupPhaseCompleted). + StartTimestamp(now.Add(time.Second)). + Result(), )) errs = c.validateAndComplete(restore, pluginManager) @@ -712,7 +725,6 @@ func TestBackupXorScheduleProvided(t *testing.T) { r.Spec.BackupName = "" r.Spec.ScheduleName = "schedule-1" assert.True(t, backupXorScheduleProvided(r)) - } func TestMostRecentCompletedBackup(t *testing.T) { @@ -787,20 +799,18 @@ func TestMostRecentCompletedBackup(t *testing.T) { assert.Equal(t, expected, mostRecentCompletedBackup(backups)) } -func NewRestore(ns, name, backup, includeNS, includeResource string, phase api.RestorePhase) *velerotest.TestRestore { - restore := velerotest.NewTestRestore(ns, name, phase).WithBackup(backup) +func NewRestore(ns, name, backup, includeNS, includeResource string, phase api.RestorePhase) *builder.RestoreBuilder { + restore := builder.ForRestore(ns, name).Phase(phase).Backup(backup) if includeNS != "" { - restore = restore.WithIncludedNamespace(includeNS) + restore = restore.IncludedNamespaces(includeNS) } if includeResource != "" { - restore = restore.WithIncludedResource(includeResource) + restore = restore.IncludedResources(includeResource) } - for _, n := range nonRestorableResources { - restore = restore.WithExcludedResource(n) - } + restore.ExcludedResources(nonRestorableResources...) return restore } @@ -811,18 +821,14 @@ type fakeRestorer struct { } func (r *fakeRestorer) Restore( - log logrus.FieldLogger, - restore *api.Restore, - backup *api.Backup, - volumeSnapshots []*volume.Snapshot, - backupReader io.Reader, + info pkgrestore.Request, actions []velero.RestoreItemAction, snapshotLocationLister listers.VolumeSnapshotLocationLister, volumeSnapshotterGetter pkgrestore.VolumeSnapshotterGetter, ) (pkgrestore.Result, pkgrestore.Result) { - res := r.Called(log, restore, backup, backupReader, actions) + res := r.Called(info.Log, info.Restore, info.Backup, info.BackupReader, actions) - r.calledWithArg = *restore + r.calledWithArg = *info.Restore return res.Get(0).(pkgrestore.Result), res.Get(1).(pkgrestore.Result) } diff --git a/pkg/controller/schedule_controller.go b/pkg/controller/schedule_controller.go index ad0336e87fd..e0bf31c7619 100644 --- a/pkg/controller/schedule_controller.go +++ b/pkg/controller/schedule_controller.go @@ -33,7 +33,7 @@ import ( "k8s.io/client-go/tools/cache" api "github.com/heptio/velero/pkg/apis/velero/v1" - velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/builder" velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" informers "github.com/heptio/velero/pkg/generated/informers/externalversions/velero/v1" listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" @@ -286,29 +286,15 @@ func getNextRunTime(schedule *api.Schedule, cronSchedule cron.Schedule, asOf tim } func getBackup(item *api.Schedule, timestamp time.Time) *api.Backup { - backup := &api.Backup{ - Spec: item.Spec.Template, - ObjectMeta: metav1.ObjectMeta{ - Namespace: item.Namespace, - Name: fmt.Sprintf("%s-%s", item.Name, timestamp.Format("20060102150405")), - }, - } - - addLabelsToBackup(item, backup) + name := fmt.Sprintf("%s-%s", item.Name, timestamp.Format("20060102150405")) + backup := builder. + ForBackup(item.Namespace, name). + FromSchedule(item). + Result() return backup } -func addLabelsToBackup(item *api.Schedule, backup *api.Backup) { - labels := item.Labels - if labels == nil { - labels = make(map[string]string) - } - labels[velerov1api.ScheduleNameLabel] = item.Name - - backup.Labels = labels -} - func patchSchedule(original, updated *api.Schedule, client velerov1client.SchedulesGetter) (*api.Schedule, error) { origBytes, err := json.Marshal(original) if err != nil { diff --git a/pkg/controller/schedule_controller_test.go b/pkg/controller/schedule_controller_test.go index 616686e9ec5..1edee490578 100644 --- a/pkg/controller/schedule_controller_test.go +++ b/pkg/controller/schedule_controller_test.go @@ -31,24 +31,28 @@ import ( core "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" - api "github.com/heptio/velero/pkg/apis/velero/v1" velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/builder" "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" informers "github.com/heptio/velero/pkg/generated/informers/externalversions" "github.com/heptio/velero/pkg/metrics" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) func TestProcessSchedule(t *testing.T) { + newScheduleBuilder := func(phase velerov1api.SchedulePhase) *builder.ScheduleBuilder { + return builder.ForSchedule("ns", "name").Phase(phase) + } + tests := []struct { name string scheduleKey string - schedule *api.Schedule + schedule *velerov1api.Schedule fakeClockTime string expectedErr bool expectedPhase string expectedValidationErrors []string - expectedBackupCreate *api.Backup + expectedBackupCreate *velerov1api.Backup expectedLastBackup string }{ { @@ -63,54 +67,53 @@ func TestProcessSchedule(t *testing.T) { }, { name: "schedule with phase FailedValidation does not get processed", - schedule: velerotest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseFailedValidation).Schedule, + schedule: newScheduleBuilder(velerov1api.SchedulePhaseFailedValidation).Result(), expectedErr: false, }, { name: "schedule with phase New gets validated and failed if invalid", - schedule: velerotest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseNew).Schedule, + schedule: newScheduleBuilder(velerov1api.SchedulePhaseNew).Result(), expectedErr: false, - expectedPhase: string(api.SchedulePhaseFailedValidation), + expectedPhase: string(velerov1api.SchedulePhaseFailedValidation), expectedValidationErrors: []string{"Schedule must be a non-empty valid Cron expression"}, }, { name: "schedule with phase gets validated and failed if invalid", - schedule: velerotest.NewTestSchedule("ns", "name").Schedule, + schedule: newScheduleBuilder(velerov1api.SchedulePhase("")).Result(), expectedErr: false, - expectedPhase: string(api.SchedulePhaseFailedValidation), + expectedPhase: string(velerov1api.SchedulePhaseFailedValidation), expectedValidationErrors: []string{"Schedule must be a non-empty valid Cron expression"}, }, { name: "schedule with phase Enabled gets re-validated and failed if invalid", - schedule: velerotest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).Schedule, + schedule: newScheduleBuilder(velerov1api.SchedulePhaseEnabled).Result(), expectedErr: false, - expectedPhase: string(api.SchedulePhaseFailedValidation), + expectedPhase: string(velerov1api.SchedulePhaseFailedValidation), expectedValidationErrors: []string{"Schedule must be a non-empty valid Cron expression"}, }, { name: "schedule with phase New gets validated and triggers a backup", - schedule: velerotest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseNew).WithCronSchedule("@every 5m").Schedule, + schedule: newScheduleBuilder(velerov1api.SchedulePhaseNew).CronSchedule("@every 5m").Result(), fakeClockTime: "2017-01-01 12:00:00", expectedErr: false, - expectedPhase: string(api.SchedulePhaseEnabled), - expectedBackupCreate: velerotest.NewTestBackup().WithNamespace("ns").WithName("name-20170101120000").WithLabel(velerov1api.ScheduleNameLabel, "name").Backup, + expectedPhase: string(velerov1api.SchedulePhaseEnabled), + expectedBackupCreate: builder.ForBackup("ns", "name-20170101120000").ObjectMeta(builder.WithLabels(velerov1api.ScheduleNameLabel, "name")).Result(), expectedLastBackup: "2017-01-01 12:00:00", }, { name: "schedule with phase Enabled gets re-validated and triggers a backup if valid", - schedule: velerotest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).WithCronSchedule("@every 5m").Schedule, + schedule: newScheduleBuilder(velerov1api.SchedulePhaseEnabled).CronSchedule("@every 5m").Result(), fakeClockTime: "2017-01-01 12:00:00", expectedErr: false, - expectedBackupCreate: velerotest.NewTestBackup().WithNamespace("ns").WithName("name-20170101120000").WithLabel(velerov1api.ScheduleNameLabel, "name").Backup, + expectedBackupCreate: builder.ForBackup("ns", "name-20170101120000").ObjectMeta(builder.WithLabels(velerov1api.ScheduleNameLabel, "name")).Result(), expectedLastBackup: "2017-01-01 12:00:00", }, { - name: "schedule that's already run gets LastBackup updated", - schedule: velerotest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled). - WithCronSchedule("@every 5m").WithLastBackupTime("2000-01-01 00:00:00").Schedule, + name: "schedule that's already run gets LastBackup updated", + schedule: newScheduleBuilder(velerov1api.SchedulePhaseEnabled).CronSchedule("@every 5m").LastBackupTime("2000-01-01 00:00:00").Result(), fakeClockTime: "2017-01-01 12:00:00", expectedErr: false, - expectedBackupCreate: velerotest.NewTestBackup().WithNamespace("ns").WithName("name-20170101120000").WithLabel(velerov1api.ScheduleNameLabel, "name").Backup, + expectedBackupCreate: builder.ForBackup("ns", "name-20170101120000").ObjectMeta(builder.WithLabels(velerov1api.ScheduleNameLabel, "name")).Result(), expectedLastBackup: "2017-01-01 12:00:00", }, } @@ -161,7 +164,7 @@ func TestProcessSchedule(t *testing.T) { // these are the fields that may be updated by the controller phase, found, err := unstructured.NestedString(patchMap, "status", "phase") if err == nil && found { - res.Status.Phase = api.SchedulePhase(phase) + res.Status.Phase = velerov1api.SchedulePhase(phase) } lastBackupStr, found, err := unstructured.NestedString(patchMap, "status", "lastBackup") @@ -192,9 +195,9 @@ func TestProcessSchedule(t *testing.T) { index := 0 type PatchStatus struct { - ValidationErrors []string `json:"validationErrors"` - Phase api.SchedulePhase `json:"phase"` - LastBackup time.Time `json:"lastBackup"` + ValidationErrors []string `json:"validationErrors"` + Phase velerov1api.SchedulePhase `json:"phase"` + LastBackup time.Time `json:"lastBackup"` } type Patch struct { @@ -214,7 +217,7 @@ func TestProcessSchedule(t *testing.T) { expected := Patch{ Status: PatchStatus{ ValidationErrors: test.expectedValidationErrors, - Phase: api.SchedulePhase(test.expectedPhase), + Phase: velerov1api.SchedulePhase(test.expectedPhase), }, } @@ -227,7 +230,7 @@ func TestProcessSchedule(t *testing.T) { require.True(t, len(actions) > index, "len(actions) is too small") action := core.NewCreateAction( - api.SchemeGroupVersion.WithResource("backups"), + velerov1api.SchemeGroupVersion.WithResource("backups"), created.Namespace, created) @@ -257,43 +260,47 @@ func parseTime(timeString string) time.Time { } func TestGetNextRunTime(t *testing.T) { + defaultSchedule := func() *velerov1api.Schedule { + return builder.ForSchedule("velero", "schedule-1").CronSchedule("@every 5m").Result() + } + tests := []struct { name string - schedule *api.Schedule + schedule *velerov1api.Schedule lastRanOffset string expectedDue bool expectedNextRunTimeOffset string }{ { name: "first run", - schedule: &api.Schedule{Spec: api.ScheduleSpec{Schedule: "@every 5m"}}, + schedule: defaultSchedule(), expectedDue: true, expectedNextRunTimeOffset: "5m", }, { name: "just ran", - schedule: &api.Schedule{Spec: api.ScheduleSpec{Schedule: "@every 5m"}}, + schedule: defaultSchedule(), lastRanOffset: "0s", expectedDue: false, expectedNextRunTimeOffset: "5m", }, { name: "almost but not quite time to run", - schedule: &api.Schedule{Spec: api.ScheduleSpec{Schedule: "@every 5m"}}, + schedule: defaultSchedule(), lastRanOffset: "4m59s", expectedDue: false, expectedNextRunTimeOffset: "5m", }, { name: "time to run again", - schedule: &api.Schedule{Spec: api.ScheduleSpec{Schedule: "@every 5m"}}, + schedule: defaultSchedule(), lastRanOffset: "5m", expectedDue: true, expectedNextRunTimeOffset: "5m", }, { name: "several runs missed", - schedule: &api.Schedule{Spec: api.ScheduleSpec{Schedule: "@every 5m"}}, + schedule: defaultSchedule(), lastRanOffset: "5h", expectedDue: true, expectedNextRunTimeOffset: "5m", @@ -339,14 +346,7 @@ func TestParseCronSchedule(t *testing.T) { // Start with a Schedule with: // - schedule: once a day at 9am // - last backup: 2017-08-10 12:27:00 (just happened) - s := &api.Schedule{ - Spec: api.ScheduleSpec{ - Schedule: "0 9 * * *", - }, - Status: api.ScheduleStatus{ - LastBackup: metav1.NewTime(now), - }, - } + s := builder.ForSchedule("velero", "schedule-1").CronSchedule("0 9 * * *").LastBackupTime(now.Format("2006-01-02 15:04:05")).Result() logger := velerotest.NewLogger() @@ -383,121 +383,51 @@ func TestParseCronSchedule(t *testing.T) { func TestGetBackup(t *testing.T) { tests := []struct { name string - schedule *api.Schedule + schedule *velerov1api.Schedule testClockTime string - expectedBackup *api.Backup + expectedBackup *velerov1api.Backup }{ { - name: "ensure name is formatted correctly (AM time)", - schedule: &api.Schedule{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "foo", - Name: "bar", - }, - Spec: api.ScheduleSpec{ - Template: api.BackupSpec{}, - }, - }, - testClockTime: "2017-07-25 09:15:00", - expectedBackup: &api.Backup{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "foo", - Name: "bar-20170725091500", - Labels: map[string]string{ - velerov1api.ScheduleNameLabel: "bar", - }, - }, - Spec: api.BackupSpec{}, - }, + name: "ensure name is formatted correctly (AM time)", + schedule: builder.ForSchedule("foo", "bar").Result(), + testClockTime: "2017-07-25 09:15:00", + expectedBackup: builder.ForBackup("foo", "bar-20170725091500").ObjectMeta(builder.WithLabels(velerov1api.ScheduleNameLabel, "bar")).Result(), }, { - name: "ensure name is formatted correctly (PM time)", - schedule: &api.Schedule{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "foo", - Name: "bar", - }, - Spec: api.ScheduleSpec{ - Template: api.BackupSpec{}, - }, - }, - testClockTime: "2017-07-25 14:15:00", - expectedBackup: &api.Backup{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "foo", - Name: "bar-20170725141500", - Labels: map[string]string{ - velerov1api.ScheduleNameLabel: "bar", - }, - }, - Spec: api.BackupSpec{}, - }, + name: "ensure name is formatted correctly (PM time)", + schedule: builder.ForSchedule("foo", "bar").Result(), + testClockTime: "2017-07-25 14:15:00", + expectedBackup: builder.ForBackup("foo", "bar-20170725141500").ObjectMeta(builder.WithLabels(velerov1api.ScheduleNameLabel, "bar")).Result(), }, { name: "ensure schedule backup template is copied", - schedule: &api.Schedule{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "foo", - Name: "bar", - }, - Spec: api.ScheduleSpec{ - Template: api.BackupSpec{ - IncludedNamespaces: []string{"ns-1", "ns-2"}, - ExcludedNamespaces: []string{"ns-3"}, - IncludedResources: []string{"foo", "bar"}, - ExcludedResources: []string{"baz"}, - LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}}, - TTL: metav1.Duration{Duration: time.Duration(300)}, - }, - }, - }, + schedule: builder.ForSchedule("foo", "bar"). + Template(builder.ForBackup("", ""). + IncludedNamespaces("ns-1", "ns-2"). + ExcludedNamespaces("ns-3"). + IncludedResources("foo", "bar"). + ExcludedResources("baz"). + LabelSelector(&metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}}). + TTL(time.Duration(300)). + Result(). + Spec). + Result(), testClockTime: "2017-07-25 09:15:00", - expectedBackup: &api.Backup{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "foo", - Name: "bar-20170725091500", - Labels: map[string]string{ - velerov1api.ScheduleNameLabel: "bar", - }, - }, - Spec: api.BackupSpec{ - IncludedNamespaces: []string{"ns-1", "ns-2"}, - ExcludedNamespaces: []string{"ns-3"}, - IncludedResources: []string{"foo", "bar"}, - ExcludedResources: []string{"baz"}, - LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}}, - TTL: metav1.Duration{Duration: time.Duration(300)}, - }, - }, + expectedBackup: builder.ForBackup("foo", "bar-20170725091500"). + ObjectMeta(builder.WithLabels(velerov1api.ScheduleNameLabel, "bar")). + IncludedNamespaces("ns-1", "ns-2"). + ExcludedNamespaces("ns-3"). + IncludedResources("foo", "bar"). + ExcludedResources("baz"). + LabelSelector(&metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}}). + TTL(time.Duration(300)). + Result(), }, { - name: "ensure schedule labels is copied", - schedule: &api.Schedule{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "foo", - Name: "bar", - Labels: map[string]string{ - "foo": "bar", - "bar": "baz", - }, - }, - Spec: api.ScheduleSpec{ - Template: api.BackupSpec{}, - }, - }, - testClockTime: "2017-07-25 14:15:00", - expectedBackup: &api.Backup{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "foo", - Name: "bar-20170725141500", - Labels: map[string]string{ - velerov1api.ScheduleNameLabel: "bar", - "bar": "baz", - "foo": "bar", - }, - }, - Spec: api.BackupSpec{}, - }, + name: "ensure schedule labels is copied", + schedule: builder.ForSchedule("foo", "bar").ObjectMeta(builder.WithLabels("foo", "bar", "bar", "baz")).Result(), + testClockTime: "2017-07-25 14:15:00", + expectedBackup: builder.ForBackup("foo", "bar-20170725141500").ObjectMeta(builder.WithLabels(velerov1api.ScheduleNameLabel, "bar", "bar", "baz", "foo", "bar")).Result(), }, } diff --git a/pkg/discovery/helper_test.go b/pkg/discovery/helper_test.go index 55437071628..accbbffdab1 100644 --- a/pkg/discovery/helper_test.go +++ b/pkg/discovery/helper_test.go @@ -25,8 +25,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + velerotest "github.com/heptio/velero/pkg/test" "github.com/heptio/velero/pkg/util/logging" - velerotest "github.com/heptio/velero/pkg/util/test" ) func TestSortResources(t *testing.T) { @@ -180,10 +180,12 @@ func TestRefreshServerPreferredResources(t *testing.T) { }, } + formatFlag := logging.FormatText + for _, test := range tests { fakeServer := velerotest.NewFakeServerResourcesInterface(test.resourceList, test.failedGroups, test.returnError) t.Run(test.name, func(t *testing.T) { - resources, err := refreshServerPreferredResources(fakeServer, logging.DefaultLogger(logrus.DebugLevel)) + resources, err := refreshServerPreferredResources(fakeServer, logging.DefaultLogger(logrus.DebugLevel, formatFlag)) if test.returnError != nil { assert.NotNil(t, err) } else { diff --git a/pkg/generated/clientset/versioned/fake/clientset_generated.go b/pkg/generated/clientset/versioned/fake/clientset_generated.go index 80d769e9b5e..8b0a72ca849 100644 --- a/pkg/generated/clientset/versioned/fake/clientset_generated.go +++ b/pkg/generated/clientset/versioned/fake/clientset_generated.go @@ -41,7 +41,7 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { } } - cs := &Clientset{} + cs := &Clientset{tracker: o} cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} cs.AddReactor("*", "*", testing.ObjectReaction(o)) cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { @@ -63,12 +63,17 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { type Clientset struct { testing.Fake discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker } func (c *Clientset) Discovery() discovery.DiscoveryInterface { return c.discovery } +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + var _ clientset.Interface = &Clientset{} // VeleroV1 retrieves the VeleroV1Client diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/velero_client.go b/pkg/generated/clientset/versioned/typed/velero/v1/velero_client.go index 828beab415b..0cb4351b3ec 100644 --- a/pkg/generated/clientset/versioned/typed/velero/v1/velero_client.go +++ b/pkg/generated/clientset/versioned/typed/velero/v1/velero_client.go @@ -21,7 +21,6 @@ package v1 import ( v1 "github.com/heptio/velero/pkg/apis/velero/v1" "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" ) @@ -121,7 +120,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/pkg/install/daemonset.go b/pkg/install/daemonset.go index a4c0bc6b47a..075fea6baa1 100644 --- a/pkg/install/daemonset.go +++ b/pkg/install/daemonset.go @@ -61,6 +61,7 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet { "name": "restic", "component": "velero", }, + Annotations: c.annotations, }, Spec: corev1.PodSpec{ ServiceAccountName: "velero", @@ -128,19 +129,8 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet { Name: "VELERO_SCRATCH_DIR", Value: "/scratch", }, - { - Name: "AZURE_CREDENTIALS_FILE", - Value: "/credentials/cloud", - }, - { - Name: "GOOGLE_APPLICATION_CREDENTIALS", - Value: "/credentials/cloud", - }, - { - Name: "AWS_SHARED_CREDENTIALS_FILE", - Value: "/credentials/cloud", - }, }, + Resources: c.resources, }, }, }, @@ -148,7 +138,7 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet { }, } - if !c.withoutCredentialsVolume { + if c.withSecret { daemonSet.Spec.Template.Spec.Volumes = append( daemonSet.Spec.Template.Spec.Volumes, corev1.Volume{ @@ -168,6 +158,21 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet { MountPath: "/credentials", }, ) + + daemonSet.Spec.Template.Spec.Containers[0].Env = append(daemonSet.Spec.Template.Spec.Containers[0].Env, []corev1.EnvVar{ + { + Name: "GOOGLE_APPLICATION_CREDENTIALS", + Value: "/credentials/cloud", + }, + { + Name: "AWS_SHARED_CREDENTIALS_FILE", + Value: "/credentials/cloud", + }, + { + Name: "AZURE_CREDENTIALS_FILE", + Value: "/credentials/cloud", + }, + }...) } daemonSet.Spec.Template.Spec.Containers[0].Env = append(daemonSet.Spec.Template.Spec.Containers[0].Env, c.envVars...) diff --git a/pkg/install/daemonset_test.go b/pkg/install/daemonset_test.go index efb3ae04ae9..8954bc9b298 100644 --- a/pkg/install/daemonset_test.go +++ b/pkg/install/daemonset_test.go @@ -29,10 +29,11 @@ func TestDaemonSet(t *testing.T) { assert.Equal(t, "restic", ds.Spec.Template.Spec.Containers[0].Name) assert.Equal(t, "velero", ds.ObjectMeta.Namespace) - ds = DaemonSet("velero", WithoutCredentialsVolume()) - assert.Equal(t, 2, len(ds.Spec.Template.Spec.Volumes)) - ds = DaemonSet("velero", WithImage("gcr.io/heptio-images/velero:v0.11")) assert.Equal(t, "gcr.io/heptio-images/velero:v0.11", ds.Spec.Template.Spec.Containers[0].Image) assert.Equal(t, corev1.PullIfNotPresent, ds.Spec.Template.Spec.Containers[0].ImagePullPolicy) + + ds = DaemonSet("velero", WithSecret(true)) + assert.Equal(t, 6, len(ds.Spec.Template.Spec.Containers[0].Env)) + assert.Equal(t, 3, len(ds.Spec.Template.Spec.Volumes)) } diff --git a/pkg/install/deployment.go b/pkg/install/deployment.go index 436f80fb446..c4882ee7a11 100644 --- a/pkg/install/deployment.go +++ b/pkg/install/deployment.go @@ -19,7 +19,7 @@ package install import ( "strings" - appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,10 +27,12 @@ import ( type podTemplateOption func(*podTemplateConfig) type podTemplateConfig struct { - image string - withoutCredentialsVolume bool - envVars []corev1.EnvVar - restoreOnly bool + image string + envVars []corev1.EnvVar + restoreOnly bool + annotations map[string]string + resources corev1.ResourceRequirements + withSecret bool } func WithImage(image string) podTemplateOption { @@ -39,9 +41,9 @@ func WithImage(image string) podTemplateOption { } } -func WithoutCredentialsVolume() podTemplateOption { +func WithAnnotations(annotations map[string]string) podTemplateOption { return func(c *podTemplateConfig) { - c.withoutCredentialsVolume = true + c.annotations = annotations } } @@ -61,13 +63,25 @@ func WithEnvFromSecretKey(varName, secret, key string) podTemplateOption { } } +func WithSecret(secretPresent bool) podTemplateOption { + return func(c *podTemplateConfig) { + c.withSecret = secretPresent + } +} + func WithRestoreOnly() podTemplateOption { return func(c *podTemplateConfig) { c.restoreOnly = true } } -func Deployment(namespace string, opts ...podTemplateOption) *appsv1beta1.Deployment { +func WithResources(resources corev1.ResourceRequirements) podTemplateOption { + return func(c *podTemplateConfig) { + c.resources = resources + } +} + +func Deployment(namespace string, opts ...podTemplateOption) *appsv1.Deployment { // TODO: Add support for server args c := &podTemplateConfig{ image: DefaultImage, @@ -87,18 +101,18 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1beta1.Deploy containerLabels := labels() containerLabels["deploy"] = "velero" - deployment := &appsv1beta1.Deployment{ + deployment := &appsv1.Deployment{ ObjectMeta: objectMeta(namespace, "velero"), TypeMeta: metav1.TypeMeta{ Kind: "Deployment", - APIVersion: appsv1beta1.SchemeGroupVersion.String(), + APIVersion: appsv1.SchemeGroupVersion.String(), }, - Spec: appsv1beta1.DeploymentSpec{ + Spec: appsv1.DeploymentSpec{ Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"deploy": "velero"}}, Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: containerLabels, - Annotations: podAnnotations(), + Annotations: podAnnotations(c.annotations), }, Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyAlways, @@ -131,18 +145,15 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1beta1.Deploy Value: "/scratch", }, { - Name: "GOOGLE_APPLICATION_CREDENTIALS", - Value: "/credentials/cloud", - }, - { - Name: "AWS_SHARED_CREDENTIALS_FILE", - Value: "/credentials/cloud", - }, - { - Name: "AZURE_CREDENTIALS_FILE", - Value: "/credentials/cloud", + Name: "VELERO_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, }, }, + Resources: c.resources, }, }, Volumes: []corev1.Volume{ @@ -164,7 +175,7 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1beta1.Deploy }, } - if !c.withoutCredentialsVolume { + if c.withSecret { deployment.Spec.Template.Spec.Volumes = append( deployment.Spec.Template.Spec.Volumes, corev1.Volume{ @@ -184,6 +195,21 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1beta1.Deploy MountPath: "/credentials", }, ) + + deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, []corev1.EnvVar{ + { + Name: "GOOGLE_APPLICATION_CREDENTIALS", + Value: "/credentials/cloud", + }, + { + Name: "AWS_SHARED_CREDENTIALS_FILE", + Value: "/credentials/cloud", + }, + { + Name: "AZURE_CREDENTIALS_FILE", + Value: "/credentials/cloud", + }, + }...) } deployment.Spec.Template.Spec.Containers[0].Env = append(deployment.Spec.Template.Spec.Containers[0].Env, c.envVars...) diff --git a/pkg/install/deployment_test.go b/pkg/install/deployment_test.go index c87c8a318e0..8ad9dc688e0 100644 --- a/pkg/install/deployment_test.go +++ b/pkg/install/deployment_test.go @@ -32,15 +32,16 @@ func TestDeployment(t *testing.T) { assert.Equal(t, "--restore-only", deploy.Spec.Template.Spec.Containers[0].Args[1]) deploy = Deployment("velero", WithEnvFromSecretKey("my-var", "my-secret", "my-key")) - envSecret := deploy.Spec.Template.Spec.Containers[0].Env[4] + envSecret := deploy.Spec.Template.Spec.Containers[0].Env[2] assert.Equal(t, "my-var", envSecret.Name) assert.Equal(t, "my-secret", envSecret.ValueFrom.SecretKeyRef.LocalObjectReference.Name) assert.Equal(t, "my-key", envSecret.ValueFrom.SecretKeyRef.Key) - deploy = Deployment("velero", WithoutCredentialsVolume()) - assert.Equal(t, 2, len(deploy.Spec.Template.Spec.Volumes)) - deploy = Deployment("velero", WithImage("gcr.io/heptio-images/velero:v0.11")) assert.Equal(t, "gcr.io/heptio-images/velero:v0.11", deploy.Spec.Template.Spec.Containers[0].Image) assert.Equal(t, corev1.PullIfNotPresent, deploy.Spec.Template.Spec.Containers[0].ImagePullPolicy) + + deploy = Deployment("velero", WithSecret(true)) + assert.Equal(t, 5, len(deploy.Spec.Template.Spec.Containers[0].Env)) + assert.Equal(t, 3, len(deploy.Spec.Template.Spec.Volumes)) } diff --git a/pkg/install/install.go b/pkg/install/install.go index fb7777db7fd..3f605d864ee 100644 --- a/pkg/install/install.go +++ b/pkg/install/install.go @@ -23,7 +23,7 @@ import ( "time" "github.com/pkg/errors" - appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -120,11 +120,11 @@ func crdsAreReady(factory client.DynamicFactory, crdKinds []string) (bool, error return areReady, nil } -func isAvailable(c appsv1beta1.DeploymentCondition) bool { +func isAvailable(c appsv1.DeploymentCondition) bool { // Make sure that the deployment has been available for at least 10 seconds. // This is because the deployment can show as Ready momentarily before the pods fall into a CrashLoopBackOff. // See podutils.IsPodAvailable upstream for similar logic with pods - if c.Type == appsv1beta1.DeploymentAvailable && c.Status == corev1.ConditionTrue { + if c.Type == appsv1.DeploymentAvailable && c.Status == corev1.ConditionTrue { if !c.LastTransitionTime.IsZero() && c.LastTransitionTime.Add(10*time.Second).Before(time.Now()) { return true } @@ -134,7 +134,7 @@ func isAvailable(c appsv1beta1.DeploymentCondition) bool { // DeploymentIsReady will poll the kubernetes API server to see if the velero deployment is ready to service user requests. func DeploymentIsReady(factory client.DynamicFactory, namespace string) (bool, error) { - gvk := schema.FromAPIVersionAndKind(appsv1beta1.SchemeGroupVersion.String(), "Deployment") + gvk := schema.FromAPIVersionAndKind(appsv1.SchemeGroupVersion.String(), "Deployment") apiResource := metav1.APIResource{ Name: "deployments", Namespaced: true, @@ -154,7 +154,7 @@ func DeploymentIsReady(factory client.DynamicFactory, namespace string) (bool, e return false, errors.Wrap(err, "error waiting for deployment to be ready") } - deploy := new(appsv1beta1.Deployment) + deploy := new(appsv1.Deployment) if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredDeployment.Object, deploy); err != nil { return false, errors.Wrap(err, "error converting deployment from unstructured") } diff --git a/pkg/install/resources.go b/pkg/install/resources.go index 18d4d589133..f272dfa6904 100644 --- a/pkg/install/resources.go +++ b/pkg/install/resources.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "github.com/heptio/velero/pkg/apis/velero/v1" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" "github.com/heptio/velero/pkg/buildinfo" ) @@ -37,7 +37,17 @@ func imageVersion() string { } // DefaultImage is the default image to use for the Velero deployment and restic daemonset containers. -var DefaultImage = "gcr.io/heptio-images/velero:" + imageVersion() +var ( + DefaultImage = "gcr.io/heptio-images/velero:" + imageVersion() + DefaultVeleroPodCPURequest = "500m" + DefaultVeleroPodMemRequest = "128Mi" + DefaultVeleroPodCPULimit = "1000m" + DefaultVeleroPodMemLimit = "256Mi" + DefaultResticPodCPURequest = "0" + DefaultResticPodMemRequest = "0" + DefaultResticPodCPULimit = "0" + DefaultResticPodMemLimit = "0" +) func labels() map[string]string { return map[string]string{ @@ -45,12 +55,20 @@ func labels() map[string]string { } } -func podAnnotations() map[string]string { - return map[string]string{ +func podAnnotations(userAnnotations map[string]string) map[string]string { + // Use the default annotations as a starting point + base := map[string]string{ "prometheus.io/scrape": "true", "prometheus.io/port": "8085", "prometheus.io/path": "/metrics", } + + // Merge base annotations with user annotations to enforce CLI precedence + for k, v := range userAnnotations { + base[k] = v + } + + return base } func containerPorts() []corev1.ContainerPort { @@ -180,6 +198,9 @@ type VeleroOptions struct { ProviderName string Bucket string Prefix string + PodAnnotations map[string]string + VeleroPodResources corev1.ResourceRequirements + ResticPodResources corev1.ResourceRequirements SecretData []byte RestoreOnly bool UseRestic bool @@ -208,8 +229,10 @@ func AllResources(o *VeleroOptions) (*unstructured.UnstructuredList, error) { sa := ServiceAccount(o.Namespace) appendUnstructured(resources, sa) - sec := Secret(o.Namespace, o.SecretData) - appendUnstructured(resources, sec) + if o.SecretData != nil { + sec := Secret(o.Namespace, o.SecretData) + appendUnstructured(resources, sec) + } bsl := BackupStorageLocation(o.Namespace, o.ProviderName, o.Bucket, o.Prefix, o.BSLConfig) appendUnstructured(resources, bsl) @@ -220,12 +243,19 @@ func AllResources(o *VeleroOptions) (*unstructured.UnstructuredList, error) { appendUnstructured(resources, vsl) } + secretPresent := o.SecretData != nil + deploy := Deployment(o.Namespace, + WithAnnotations(o.PodAnnotations), WithImage(o.Image), + WithResources(o.VeleroPodResources), + WithSecret(secretPresent), ) if o.RestoreOnly { deploy = Deployment(o.Namespace, + WithAnnotations(o.PodAnnotations), WithImage(o.Image), + WithSecret(secretPresent), WithRestoreOnly(), ) } @@ -233,7 +263,11 @@ func AllResources(o *VeleroOptions) (*unstructured.UnstructuredList, error) { if o.UseRestic { ds := DaemonSet(o.Namespace, + + WithAnnotations(o.PodAnnotations), WithImage(o.Image), + WithResources(o.ResticPodResources), + WithSecret(secretPresent), ) appendUnstructured(resources, ds) } diff --git a/pkg/persistence/mocks/backup_store.go b/pkg/persistence/mocks/backup_store.go index 340ed9386ad..b0eb0d6d41b 100644 --- a/pkg/persistence/mocks/backup_store.go +++ b/pkg/persistence/mocks/backup_store.go @@ -1,9 +1,10 @@ -// Code generated by mockery v1.0.0 +// Code generated by mockery v1.0.0. DO NOT EDIT. + package mocks import io "io" import mock "github.com/stretchr/testify/mock" - +import persistence "github.com/heptio/velero/pkg/persistence" import v1 "github.com/heptio/velero/pkg/apis/velero/v1" import volume "github.com/heptio/velero/pkg/volume" @@ -12,6 +13,27 @@ type BackupStore struct { mock.Mock } +// BackupExists provides a mock function with given fields: bucket, backupName +func (_m *BackupStore) BackupExists(bucket string, backupName string) (bool, error) { + ret := _m.Called(bucket, backupName) + + var r0 bool + if rf, ok := ret.Get(0).(func(string, string) bool); ok { + r0 = rf(bucket, backupName) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 error + if rf, ok := ret.Get(1).(func(string, string) error); ok { + r1 = rf(bucket, backupName) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + // DeleteBackup provides a mock function with given fields: name func (_m *BackupStore) DeleteBackup(name string) error { ret := _m.Called(name) @@ -63,27 +85,6 @@ func (_m *BackupStore) GetBackupContents(name string) (io.ReadCloser, error) { return r0, r1 } -// BackupExists provides a mock function with given fields: bucket, backupName -func (_m *BackupStore) BackupExists(bucket string, backupName string) (bool, error) { - ret := _m.Called(bucket, backupName) - - var r0 bool - if rf, ok := ret.Get(0).(func(string, string) bool); ok { - r0 = rf(bucket, backupName) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(string, string) error); ok { - r1 = rf(bucket, backupName) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // GetBackupMetadata provides a mock function with given fields: name func (_m *BackupStore) GetBackupMetadata(name string) (*v1.Backup, error) { ret := _m.Called(name) @@ -151,18 +152,27 @@ func (_m *BackupStore) GetDownloadURL(target v1.DownloadTarget) (string, error) return r0, r1 } -// IsValid provides a mock function with given fields: -func (_m *BackupStore) IsValid() error { - ret := _m.Called() +// GetPodVolumeBackups provides a mock function with given fields: name +func (_m *BackupStore) GetPodVolumeBackups(name string) ([]*v1.PodVolumeBackup, error) { + ret := _m.Called(name) - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() + var r0 []*v1.PodVolumeBackup + if rf, ok := ret.Get(0).(func(string) []*v1.PodVolumeBackup); ok { + r0 = rf(name) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).([]*v1.PodVolumeBackup) + } } - return r0 + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(name) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // GetRevision provides a mock function with given fields: @@ -186,6 +196,20 @@ func (_m *BackupStore) GetRevision() (string, error) { return r0, r1 } +// IsValid provides a mock function with given fields: +func (_m *BackupStore) IsValid() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + // ListBackups provides a mock function with given fields: func (_m *BackupStore) ListBackups() ([]string, error) { ret := _m.Called() @@ -209,13 +233,13 @@ func (_m *BackupStore) ListBackups() ([]string, error) { return r0, r1 } -// PutBackup provides a mock function with given fields: name, metadata, contents, log, volumeSnapshots -func (_m *BackupStore) PutBackup(name string, metadata io.Reader, contents io.Reader, log io.Reader, volumeSnapshots io.Reader) error { - ret := _m.Called(name, metadata, contents, log, volumeSnapshots) +// PutBackup provides a mock function with given fields: info +func (_m *BackupStore) PutBackup(info persistence.BackupInfo) error { + ret := _m.Called(info) var r0 error - if rf, ok := ret.Get(0).(func(string, io.Reader, io.Reader, io.Reader, io.Reader) error); ok { - r0 = rf(name, metadata, contents, log, volumeSnapshots) + if rf, ok := ret.Get(0).(func(persistence.BackupInfo) error); ok { + r0 = rf(info) } else { r0 = ret.Error(0) } diff --git a/pkg/persistence/object_store.go b/pkg/persistence/object_store.go index bf3bae50aaa..69ef80e2ab2 100644 --- a/pkg/persistence/object_store.go +++ b/pkg/persistence/object_store.go @@ -35,6 +35,16 @@ import ( "github.com/heptio/velero/pkg/volume" ) +type BackupInfo struct { + Name string + Metadata, + Contents, + Log, + PodVolumeBackups, + VolumeSnapshots, + BackupResourceList io.Reader +} + // BackupStore defines operations for creating, retrieving, and deleting // Velero backup and restore data in/from a persistent backup store. type BackupStore interface { @@ -43,9 +53,10 @@ type BackupStore interface { ListBackups() ([]string, error) - PutBackup(name string, metadata, contents, log, volumeSnapshots io.Reader) error + PutBackup(info BackupInfo) error GetBackupMetadata(name string) (*velerov1api.Backup, error) GetBackupVolumeSnapshots(name string) ([]*volume.Snapshot, error) + GetPodVolumeBackups(name string) ([]*velerov1api.PodVolumeBackup, error) GetBackupContents(name string) (io.ReadCloser, error) // BackupExists checks if the backup metadata file exists in object storage. @@ -85,19 +96,31 @@ func NewObjectBackupStore(location *velerov1api.BackupStorageLocation, objectSto return nil, errors.New("object storage provider name must not be empty") } - objectStore, err := objectStoreGetter.GetObjectStore(location.Spec.Provider) - if err != nil { - return nil, err + // trim off any leading/trailing slashes + bucket := strings.Trim(location.Spec.ObjectStorage.Bucket, "/") + prefix := strings.Trim(location.Spec.ObjectStorage.Prefix, "/") + + // if there are any slashes in the middle of 'bucket', the user + // probably put / in the bucket field, which we + // don't support. + if strings.Contains(bucket, "/") { + return nil, errors.Errorf("backup storage location's bucket name %q must not contain a '/' (if using a prefix, put it in the 'Prefix' field instead)", location.Spec.ObjectStorage.Bucket) } - // add the bucket name to the config map so that object stores can use - // it when initializing. The AWS object store uses this to determine the - // bucket's region when setting up its client. + // add the bucket name and prefix to the config map so that object stores + // can use them when initializing. The AWS object store uses the bucket + // name to determine the bucket's region when setting up its client. if location.Spec.ObjectStorage != nil { if location.Spec.Config == nil { location.Spec.Config = make(map[string]string) } - location.Spec.Config["bucket"] = location.Spec.ObjectStorage.Bucket + location.Spec.Config["bucket"] = bucket + location.Spec.Config["prefix"] = prefix + } + + objectStore, err := objectStoreGetter.GetObjectStore(location.Spec.Provider) + if err != nil { + return nil, err } if err := objectStore.Init(location.Spec.Config); err != nil { @@ -105,14 +128,14 @@ func NewObjectBackupStore(location *velerov1api.BackupStorageLocation, objectSto } log := logger.WithFields(logrus.Fields(map[string]interface{}{ - "bucket": location.Spec.ObjectStorage.Bucket, - "prefix": location.Spec.ObjectStorage.Prefix, + "bucket": bucket, + "prefix": prefix, })) return &objectBackupStore{ objectStore: objectStore, - bucket: location.Spec.ObjectStorage.Bucket, - layout: NewObjectStoreLayout(location.Spec.ObjectStorage.Prefix), + bucket: bucket, + layout: NewObjectStoreLayout(prefix), logger: log, }, nil } @@ -166,44 +189,68 @@ func (s *objectBackupStore) ListBackups() ([]string, error) { return output, nil } -func (s *objectBackupStore) PutBackup(name string, metadata, contents, log, volumeSnapshots io.Reader) error { - if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupLogKey(name), log); err != nil { +func (s *objectBackupStore) PutBackup(info BackupInfo) error { + if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupLogKey(info.Name), info.Log); err != nil { // Uploading the log file is best-effort; if it fails, we log the error but it doesn't impact the // backup's status. - s.logger.WithError(err).WithField("backup", name).Error("Error uploading log file") + s.logger.WithError(err).WithField("backup", info.Name).Error("Error uploading log file") } - if metadata == nil { + if info.Metadata == nil { // If we don't have metadata, something failed, and there's no point in continuing. An object // storage bucket that is missing the metadata file can't be restored, nor can its logs be // viewed. return nil } - if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupMetadataKey(name), metadata); err != nil { + if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupMetadataKey(info.Name), info.Metadata); err != nil { // failure to upload metadata file is a hard-stop return err } - if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupContentsKey(name), contents); err != nil { - deleteErr := s.objectStore.DeleteObject(s.bucket, s.layout.getBackupMetadataKey(name)) + if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupContentsKey(info.Name), info.Contents); err != nil { + deleteErr := s.objectStore.DeleteObject(s.bucket, s.layout.getBackupMetadataKey(info.Name)) return kerrors.NewAggregate([]error{err, deleteErr}) } - if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupVolumeSnapshotsKey(name), volumeSnapshots); err != nil { + if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getPodVolumeBackupsKey(info.Name), info.PodVolumeBackups); err != nil { + errs := []error{err} + + deleteErr := s.objectStore.DeleteObject(s.bucket, s.layout.getBackupContentsKey(info.Name)) + errs = append(errs, deleteErr) + + deleteErr = s.objectStore.DeleteObject(s.bucket, s.layout.getBackupMetadataKey(info.Name)) + errs = append(errs, deleteErr) + + return kerrors.NewAggregate(errs) + } + + if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupVolumeSnapshotsKey(info.Name), info.VolumeSnapshots); err != nil { errs := []error{err} - deleteErr := s.objectStore.DeleteObject(s.bucket, s.layout.getBackupContentsKey(name)) + deleteErr := s.objectStore.DeleteObject(s.bucket, s.layout.getBackupContentsKey(info.Name)) errs = append(errs, deleteErr) - deleteErr = s.objectStore.DeleteObject(s.bucket, s.layout.getBackupMetadataKey(name)) + deleteErr = s.objectStore.DeleteObject(s.bucket, s.layout.getBackupMetadataKey(info.Name)) + errs = append(errs, deleteErr) + + return kerrors.NewAggregate(errs) + } + + if err := seekAndPutObject(s.objectStore, s.bucket, s.layout.getBackupResourceListKey(info.Name), info.BackupResourceList); err != nil { + errs := []error{err} + + deleteErr := s.objectStore.DeleteObject(s.bucket, s.layout.getBackupContentsKey(info.Name)) + errs = append(errs, deleteErr) + + deleteErr = s.objectStore.DeleteObject(s.bucket, s.layout.getBackupMetadataKey(info.Name)) errs = append(errs, deleteErr) return kerrors.NewAggregate(errs) } if err := s.putRevision(); err != nil { - s.logger.WithField("backup", name).WithError(err).Warn("Error updating backup store revision") + s.logger.WithField("backup", info.Name).WithError(err).Warn("Error updating backup store revision") } return nil @@ -237,55 +284,76 @@ func (s *objectBackupStore) GetBackupMetadata(name string) (*velerov1api.Backup, return backupObj, nil } -func keyExists(objectStore velero.ObjectStore, bucket, prefix, key string) (bool, error) { - keys, err := objectStore.ListObjects(bucket, prefix) +func (s *objectBackupStore) GetBackupVolumeSnapshots(name string) ([]*volume.Snapshot, error) { + // if the volumesnapshots file doesn't exist, we don't want to return an error, since + // a legacy backup or a backup with no snapshots would not have this file, so check for + // its existence before attempting to get its contents. + res, err := tryGet(s.objectStore, s.bucket, s.layout.getBackupVolumeSnapshotsKey(name)) if err != nil { - return false, err + return nil, err + } + if res == nil { + return nil, nil } + defer res.Close() - var found bool - for _, existing := range keys { - if key == existing { - found = true - break - } + var volumeSnapshots []*volume.Snapshot + if err := decode(res, &volumeSnapshots); err != nil { + return nil, err } - return found, nil + return volumeSnapshots, nil } -func (s *objectBackupStore) GetBackupVolumeSnapshots(name string) ([]*volume.Snapshot, error) { - key := s.layout.getBackupVolumeSnapshotsKey(name) - - // if the volumesnapshots file doesn't exist, we don't want to return an error, since - // a legacy backup or a backup with no snapshots would not have this file, so check for - // its existence before attempting to get its contents. - ok, err := keyExists(s.objectStore, s.bucket, s.layout.getBackupDir(name), key) +// tryGet returns the object with the given key if it exists, nil if it does not exist, +// or an error if it was unable to check existence or get the object. +func tryGet(objectStore velero.ObjectStore, bucket, key string) (io.ReadCloser, error) { + exists, err := objectStore.ObjectExists(bucket, key) if err != nil { return nil, errors.WithStack(err) } - if !ok { + if !exists { return nil, nil } - res, err := s.objectStore.GetObject(s.bucket, key) + return objectStore.GetObject(bucket, key) +} + +// decode extracts a .json.gz file reader into the object pointed to +// by 'into'. +func decode(jsongzReader io.Reader, into interface{}) error { + gzr, err := gzip.NewReader(jsongzReader) if err != nil { - return nil, err + return errors.WithStack(err) + } + defer gzr.Close() + + if err := json.NewDecoder(gzr).Decode(into); err != nil { + return errors.Wrap(err, "error decoding object data") } - defer res.Close() - gzr, err := gzip.NewReader(res) + return nil +} + +func (s *objectBackupStore) GetPodVolumeBackups(name string) ([]*velerov1api.PodVolumeBackup, error) { + // if the podvolumebackups file doesn't exist, we don't want to return an error, since + // a legacy backup or a backup with no pod volume backups would not have this file, so + // check for its existence before attempting to get its contents. + res, err := tryGet(s.objectStore, s.bucket, s.layout.getPodVolumeBackupsKey(name)) if err != nil { - return nil, errors.WithStack(err) + return nil, err } - defer gzr.Close() + if res == nil { + return nil, nil + } + defer res.Close() - var volumeSnapshots []*volume.Snapshot - if err := json.NewDecoder(gzr).Decode(&volumeSnapshots); err != nil { - return nil, errors.Wrap(err, "error decoding object data") + var podVolumeBackups []*velerov1api.PodVolumeBackup + if err := decode(res, &podVolumeBackups); err != nil { + return nil, err } - return volumeSnapshots, nil + return podVolumeBackups, nil } func (s *objectBackupStore) GetBackupContents(name string) (io.ReadCloser, error) { @@ -358,6 +426,8 @@ func (s *objectBackupStore) GetDownloadURL(target velerov1api.DownloadTarget) (s return s.objectStore.CreateSignedURL(s.bucket, s.layout.getBackupLogKey(target.Name), DownloadURLTTL) case velerov1api.DownloadTargetKindBackupVolumeSnapshots: return s.objectStore.CreateSignedURL(s.bucket, s.layout.getBackupVolumeSnapshotsKey(target.Name), DownloadURLTTL) + case velerov1api.DownloadTargetKindBackupResourceList: + return s.objectStore.CreateSignedURL(s.bucket, s.layout.getBackupResourceListKey(target.Name), DownloadURLTTL) case velerov1api.DownloadTargetKindRestoreLog: return s.objectStore.CreateSignedURL(s.bucket, s.layout.getRestoreLogKey(target.Name), DownloadURLTTL) case velerov1api.DownloadTargetKindRestoreResults: diff --git a/pkg/persistence/object_store_layout.go b/pkg/persistence/object_store_layout.go index 55dc77f6a29..40fe610c7d2 100644 --- a/pkg/persistence/object_store_layout.go +++ b/pkg/persistence/object_store_layout.go @@ -83,10 +83,18 @@ func (l *ObjectStoreLayout) getBackupLogKey(backup string) string { return path.Join(l.subdirs["backups"], backup, fmt.Sprintf("%s-logs.gz", backup)) } +func (l *ObjectStoreLayout) getPodVolumeBackupsKey(backup string) string { + return path.Join(l.subdirs["backups"], backup, fmt.Sprintf("%s-podvolumebackups.json.gz", backup)) +} + func (l *ObjectStoreLayout) getBackupVolumeSnapshotsKey(backup string) string { return path.Join(l.subdirs["backups"], backup, fmt.Sprintf("%s-volumesnapshots.json.gz", backup)) } +func (l *ObjectStoreLayout) getBackupResourceListKey(backup string) string { + return path.Join(l.subdirs["backups"], backup, fmt.Sprintf("%s-resource-list.json.gz", backup)) +} + func (l *ObjectStoreLayout) getRestoreLogKey(restore string) string { return path.Join(l.subdirs["restores"], restore, fmt.Sprintf("restore-%s-logs.gz", restore)) } diff --git a/pkg/persistence/object_store_test.go b/pkg/persistence/object_store_test.go index a7c093743dc..578ccc0c874 100644 --- a/pkg/persistence/object_store_test.go +++ b/pkg/persistence/object_store_test.go @@ -35,10 +35,12 @@ import ( "k8s.io/apimachinery/pkg/runtime" velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/builder" "github.com/heptio/velero/pkg/cloudprovider" cloudprovidermocks "github.com/heptio/velero/pkg/cloudprovider/mocks" + "github.com/heptio/velero/pkg/plugin/velero" + velerotest "github.com/heptio/velero/pkg/test" "github.com/heptio/velero/pkg/util/encode" - velerotest "github.com/heptio/velero/pkg/util/test" "github.com/heptio/velero/pkg/volume" ) @@ -170,8 +172,8 @@ func TestListBackups(t *testing.T) { { name: "normal case", storageData: map[string][]byte{ - "backups/backup-1/velero-backup.json": encodeToBytes(&velerov1api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-1"}}), - "backups/backup-2/velero-backup.json": encodeToBytes(&velerov1api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-2"}}), + "backups/backup-1/velero-backup.json": encodeToBytes(builder.ForBackup("", "backup-1").Result()), + "backups/backup-2/velero-backup.json": encodeToBytes(builder.ForBackup("", "backup-2").Result()), }, expectedRes: []string{"backup-1", "backup-2"}, }, @@ -179,8 +181,8 @@ func TestListBackups(t *testing.T) { name: "normal case with backup store prefix", prefix: "velero-backups/", storageData: map[string][]byte{ - "velero-backups/backups/backup-1/velero-backup.json": encodeToBytes(&velerov1api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-1"}}), - "velero-backups/backups/backup-2/velero-backup.json": encodeToBytes(&velerov1api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-2"}}), + "velero-backups/backups/backup-1/velero-backup.json": encodeToBytes(builder.ForBackup("", "backup-1").Result()), + "velero-backups/backups/backup-2/velero-backup.json": encodeToBytes(builder.ForBackup("", "backup-2").Result()), }, expectedRes: []string{"backup-1", "backup-2"}, }, @@ -208,54 +210,66 @@ func TestListBackups(t *testing.T) { func TestPutBackup(t *testing.T) { tests := []struct { - name string - prefix string - metadata io.Reader - contents io.Reader - log io.Reader - snapshots io.Reader - expectedErr string - expectedKeys []string + name string + prefix string + metadata io.Reader + contents io.Reader + log io.Reader + podVolumeBackup io.Reader + snapshots io.Reader + resourceList io.Reader + expectedErr string + expectedKeys []string }{ { - name: "normal case", - metadata: newStringReadSeeker("metadata"), - contents: newStringReadSeeker("contents"), - log: newStringReadSeeker("log"), - snapshots: newStringReadSeeker("snapshots"), - expectedErr: "", + name: "normal case", + metadata: newStringReadSeeker("metadata"), + contents: newStringReadSeeker("contents"), + log: newStringReadSeeker("log"), + podVolumeBackup: newStringReadSeeker("podVolumeBackup"), + snapshots: newStringReadSeeker("snapshots"), + resourceList: newStringReadSeeker("resourceList"), + expectedErr: "", expectedKeys: []string{ "backups/backup-1/velero-backup.json", "backups/backup-1/backup-1.tar.gz", "backups/backup-1/backup-1-logs.gz", + "backups/backup-1/backup-1-podvolumebackups.json.gz", "backups/backup-1/backup-1-volumesnapshots.json.gz", + "backups/backup-1/backup-1-resource-list.json.gz", "metadata/revision", }, }, { - name: "normal case with backup store prefix", - prefix: "prefix-1/", - metadata: newStringReadSeeker("metadata"), - contents: newStringReadSeeker("contents"), - log: newStringReadSeeker("log"), - snapshots: newStringReadSeeker("snapshots"), - expectedErr: "", + name: "normal case with backup store prefix", + prefix: "prefix-1/", + metadata: newStringReadSeeker("metadata"), + contents: newStringReadSeeker("contents"), + log: newStringReadSeeker("log"), + podVolumeBackup: newStringReadSeeker("podVolumeBackup"), + snapshots: newStringReadSeeker("snapshots"), + resourceList: newStringReadSeeker("resourceList"), + expectedErr: "", expectedKeys: []string{ "prefix-1/backups/backup-1/velero-backup.json", "prefix-1/backups/backup-1/backup-1.tar.gz", "prefix-1/backups/backup-1/backup-1-logs.gz", + "prefix-1/backups/backup-1/backup-1-podvolumebackups.json.gz", "prefix-1/backups/backup-1/backup-1-volumesnapshots.json.gz", + "prefix-1/backups/backup-1/backup-1-resource-list.json.gz", "prefix-1/metadata/revision", }, }, { - name: "error on metadata upload does not upload data", - metadata: new(errorReader), - contents: newStringReadSeeker("contents"), - log: newStringReadSeeker("log"), - snapshots: newStringReadSeeker("snapshots"), - expectedErr: "error readers return errors", - expectedKeys: []string{"backups/backup-1/backup-1-logs.gz"}, + name: "error on metadata upload does not upload data", + metadata: new(errorReader), + contents: newStringReadSeeker("contents"), + log: newStringReadSeeker("log"), + podVolumeBackup: newStringReadSeeker("podVolumeBackup"), + snapshots: newStringReadSeeker("snapshots"), + resourceList: newStringReadSeeker("resourceList"), + expectedErr: "error readers return errors", + expectedKeys: []string{"backups/backup-1/backup-1-logs.gz"}, }, { name: "error on data upload deletes metadata", @@ -263,31 +277,38 @@ func TestPutBackup(t *testing.T) { contents: new(errorReader), log: newStringReadSeeker("log"), snapshots: newStringReadSeeker("snapshots"), + resourceList: newStringReadSeeker("resourceList"), expectedErr: "error readers return errors", expectedKeys: []string{"backups/backup-1/backup-1-logs.gz"}, }, { - name: "error on log upload is ok", - metadata: newStringReadSeeker("foo"), - contents: newStringReadSeeker("bar"), - log: new(errorReader), - snapshots: newStringReadSeeker("snapshots"), - expectedErr: "", + name: "error on log upload is ok", + metadata: newStringReadSeeker("foo"), + contents: newStringReadSeeker("bar"), + log: new(errorReader), + podVolumeBackup: newStringReadSeeker("podVolumeBackup"), + snapshots: newStringReadSeeker("snapshots"), + resourceList: newStringReadSeeker("resourceList"), + expectedErr: "", expectedKeys: []string{ "backups/backup-1/velero-backup.json", "backups/backup-1/backup-1.tar.gz", + "backups/backup-1/backup-1-podvolumebackups.json.gz", "backups/backup-1/backup-1-volumesnapshots.json.gz", + "backups/backup-1/backup-1-resource-list.json.gz", "metadata/revision", }, }, { - name: "don't upload data when metadata is nil", - metadata: nil, - contents: newStringReadSeeker("contents"), - log: newStringReadSeeker("log"), - snapshots: newStringReadSeeker("snapshots"), - expectedErr: "", - expectedKeys: []string{"backups/backup-1/backup-1-logs.gz"}, + name: "don't upload data when metadata is nil", + metadata: nil, + contents: newStringReadSeeker("contents"), + log: newStringReadSeeker("log"), + podVolumeBackup: newStringReadSeeker("podVolumeBackup"), + snapshots: newStringReadSeeker("snapshots"), + resourceList: newStringReadSeeker("resourceList"), + expectedErr: "", + expectedKeys: []string{"backups/backup-1/backup-1-logs.gz"}, }, } @@ -295,7 +316,16 @@ func TestPutBackup(t *testing.T) { t.Run(tc.name, func(t *testing.T) { harness := newObjectBackupStoreTestHarness("foo", tc.prefix) - err := harness.PutBackup("backup-1", tc.metadata, tc.contents, tc.log, tc.snapshots) + backupInfo := BackupInfo{ + Name: "backup-1", + Metadata: tc.metadata, + Contents: tc.contents, + Log: tc.log, + PodVolumeBackups: tc.podVolumeBackup, + VolumeSnapshots: tc.snapshots, + BackupResourceList: tc.resourceList, + } + err := harness.PutBackup(backupInfo) velerotest.AssertErrorMatches(t, tc.expectedErr, err) assert.Len(t, harness.objectStore.Data[harness.bucket], len(tc.expectedKeys)) @@ -318,16 +348,7 @@ func TestGetBackupMetadata(t *testing.T) { name: "metadata file returns correctly", backupName: "foo", key: "backups/foo/velero-backup.json", - obj: &velerov1api.Backup{ - TypeMeta: metav1.TypeMeta{ - Kind: "Backup", - APIVersion: velerov1api.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: velerov1api.DefaultNamespace, - Name: "foo", - }, - }, + obj: builder.ForBackup(velerov1api.DefaultNamespace, "foo").Result(), }, { name: "no metadata file returns an error", @@ -471,60 +492,87 @@ func TestDeleteBackup(t *testing.T) { func TestGetDownloadURL(t *testing.T) { tests := []struct { - name string - targetKind velerov1api.DownloadTargetKind - targetName string - prefix string - expectedKey string + name string + targetName string + expectedKeyByKind map[velerov1api.DownloadTargetKind]string + prefix string }{ { - name: "backup contents", - targetKind: velerov1api.DownloadTargetKindBackupContents, - targetName: "my-backup", - expectedKey: "backups/my-backup/my-backup.tar.gz", + name: "backup", + targetName: "my-backup", + expectedKeyByKind: map[velerov1api.DownloadTargetKind]string{ + velerov1api.DownloadTargetKindBackupContents: "backups/my-backup/my-backup.tar.gz", + velerov1api.DownloadTargetKindBackupLog: "backups/my-backup/my-backup-logs.gz", + velerov1api.DownloadTargetKindBackupVolumeSnapshots: "backups/my-backup/my-backup-volumesnapshots.json.gz", + velerov1api.DownloadTargetKindBackupResourceList: "backups/my-backup/my-backup-resource-list.json.gz", + }, }, { - name: "backup log", - targetKind: velerov1api.DownloadTargetKindBackupLog, - targetName: "my-backup", - expectedKey: "backups/my-backup/my-backup-logs.gz", + name: "backup with prefix", + targetName: "my-backup", + prefix: "velero-backups/", + expectedKeyByKind: map[velerov1api.DownloadTargetKind]string{ + velerov1api.DownloadTargetKindBackupContents: "velero-backups/backups/my-backup/my-backup.tar.gz", + velerov1api.DownloadTargetKindBackupLog: "velero-backups/backups/my-backup/my-backup-logs.gz", + velerov1api.DownloadTargetKindBackupVolumeSnapshots: "velero-backups/backups/my-backup/my-backup-volumesnapshots.json.gz", + velerov1api.DownloadTargetKindBackupResourceList: "velero-backups/backups/my-backup/my-backup-resource-list.json.gz", + }, }, { - name: "scheduled backup contents", - targetKind: velerov1api.DownloadTargetKindBackupContents, - targetName: "my-backup-20170913154901", - expectedKey: "backups/my-backup-20170913154901/my-backup-20170913154901.tar.gz", + name: "backup with multiple dashes", + targetName: "b-cool-20170913154901-20170913154902", + expectedKeyByKind: map[velerov1api.DownloadTargetKind]string{ + velerov1api.DownloadTargetKindBackupContents: "backups/b-cool-20170913154901-20170913154902/b-cool-20170913154901-20170913154902.tar.gz", + velerov1api.DownloadTargetKindBackupLog: "backups/b-cool-20170913154901-20170913154902/b-cool-20170913154901-20170913154902-logs.gz", + velerov1api.DownloadTargetKindBackupVolumeSnapshots: "backups/b-cool-20170913154901-20170913154902/b-cool-20170913154901-20170913154902-volumesnapshots.json.gz", + velerov1api.DownloadTargetKindBackupResourceList: "backups/b-cool-20170913154901-20170913154902/b-cool-20170913154901-20170913154902-resource-list.json.gz", + }, }, { - name: "scheduled backup log", - targetKind: velerov1api.DownloadTargetKindBackupLog, - targetName: "my-backup-20170913154901", - expectedKey: "backups/my-backup-20170913154901/my-backup-20170913154901-logs.gz", + name: "scheduled backup", + targetName: "my-backup-20170913154901", + expectedKeyByKind: map[velerov1api.DownloadTargetKind]string{ + velerov1api.DownloadTargetKindBackupContents: "backups/my-backup-20170913154901/my-backup-20170913154901.tar.gz", + velerov1api.DownloadTargetKindBackupLog: "backups/my-backup-20170913154901/my-backup-20170913154901-logs.gz", + velerov1api.DownloadTargetKindBackupVolumeSnapshots: "backups/my-backup-20170913154901/my-backup-20170913154901-volumesnapshots.json.gz", + velerov1api.DownloadTargetKindBackupResourceList: "backups/my-backup-20170913154901/my-backup-20170913154901-resource-list.json.gz", + }, }, { - name: "backup contents with backup store prefix", - targetKind: velerov1api.DownloadTargetKindBackupContents, - targetName: "my-backup", - prefix: "velero-backups/", - expectedKey: "velero-backups/backups/my-backup/my-backup.tar.gz", + name: "scheduled backup with prefix", + targetName: "my-backup-20170913154901", + prefix: "velero-backups/", + expectedKeyByKind: map[velerov1api.DownloadTargetKind]string{ + velerov1api.DownloadTargetKindBackupContents: "velero-backups/backups/my-backup-20170913154901/my-backup-20170913154901.tar.gz", + velerov1api.DownloadTargetKindBackupLog: "velero-backups/backups/my-backup-20170913154901/my-backup-20170913154901-logs.gz", + velerov1api.DownloadTargetKindBackupVolumeSnapshots: "velero-backups/backups/my-backup-20170913154901/my-backup-20170913154901-volumesnapshots.json.gz", + velerov1api.DownloadTargetKindBackupResourceList: "velero-backups/backups/my-backup-20170913154901/my-backup-20170913154901-resource-list.json.gz", + }, }, { - name: "restore log", - targetKind: velerov1api.DownloadTargetKindRestoreLog, - targetName: "b-20170913154901", - expectedKey: "restores/b-20170913154901/restore-b-20170913154901-logs.gz", + name: "restore", + targetName: "my-backup", + expectedKeyByKind: map[velerov1api.DownloadTargetKind]string{ + velerov1api.DownloadTargetKindRestoreLog: "restores/my-backup/restore-my-backup-logs.gz", + velerov1api.DownloadTargetKindRestoreResults: "restores/my-backup/restore-my-backup-results.gz", + }, }, { - name: "restore results", - targetKind: velerov1api.DownloadTargetKindRestoreResults, - targetName: "b-20170913154901", - expectedKey: "restores/b-20170913154901/restore-b-20170913154901-results.gz", + name: "restore with prefix", + targetName: "my-backup", + prefix: "velero-backups/", + expectedKeyByKind: map[velerov1api.DownloadTargetKind]string{ + velerov1api.DownloadTargetKindRestoreLog: "velero-backups/restores/my-backup/restore-my-backup-logs.gz", + velerov1api.DownloadTargetKindRestoreResults: "velero-backups/restores/my-backup/restore-my-backup-results.gz", + }, }, { - name: "restore results - backup has multiple dashes (e.g. restore of scheduled backup)", - targetKind: velerov1api.DownloadTargetKindRestoreResults, - targetName: "b-cool-20170913154901-20170913154902", - expectedKey: "restores/b-cool-20170913154901-20170913154902/restore-b-cool-20170913154901-20170913154902-results.gz", + name: "restore with multiple dashes", + targetName: "b-cool-20170913154901-20170913154902", + expectedKeyByKind: map[velerov1api.DownloadTargetKind]string{ + velerov1api.DownloadTargetKindRestoreLog: "restores/b-cool-20170913154901-20170913154902/restore-b-cool-20170913154901-20170913154902-logs.gz", + velerov1api.DownloadTargetKindRestoreResults: "restores/b-cool-20170913154901-20170913154902/restore-b-cool-20170913154901-20170913154902-results.gz", + }, }, } @@ -532,11 +580,99 @@ func TestGetDownloadURL(t *testing.T) { t.Run(test.name, func(t *testing.T) { harness := newObjectBackupStoreTestHarness("test-bucket", test.prefix) - require.NoError(t, harness.objectStore.PutObject("test-bucket", test.expectedKey, newStringReadSeeker("foo"))) + for kind, expectedKey := range test.expectedKeyByKind { + t.Run(string(kind), func(t *testing.T) { + require.NoError(t, harness.objectStore.PutObject("test-bucket", expectedKey, newStringReadSeeker("foo"))) + + url, err := harness.GetDownloadURL(velerov1api.DownloadTarget{Kind: kind, Name: test.targetName}) + require.NoError(t, err) + assert.Equal(t, "a-url", url) + }) + } + }) + } +} + +type objectStoreGetter map[string]velero.ObjectStore + +func (osg objectStoreGetter) GetObjectStore(provider string) (velero.ObjectStore, error) { + res, ok := osg[provider] + if !ok { + return nil, errors.New("object store not found") + } + + return res, nil +} - url, err := harness.GetDownloadURL(velerov1api.DownloadTarget{Kind: test.targetKind, Name: test.targetName}) - require.NoError(t, err) - assert.Equal(t, "a-url", url) +// TestNewObjectBackupStore runs the NewObjectBackupStore constructor and ensures +// that an ObjectBackupStore is constructed correctly or an appropriate error is +// returned. +func TestNewObjectBackupStore(t *testing.T) { + tests := []struct { + name string + location *velerov1api.BackupStorageLocation + objectStoreGetter objectStoreGetter + wantBucket string + wantPrefix string + wantErr string + }{ + { + name: "location with no ObjectStorage field results in an error", + location: new(velerov1api.BackupStorageLocation), + wantErr: "backup storage location does not use object storage", + }, + { + name: "location with no Provider field results in an error", + location: builder.ForBackupStorageLocation("", "").Bucket("").Result(), + wantErr: "object storage provider name must not be empty", + }, + { + name: "location with a Bucket field with a '/' in the middle results in an error", + location: builder.ForBackupStorageLocation("", "").Provider("provider-1").Bucket("invalid/bucket").Result(), + wantErr: "backup storage location's bucket name \"invalid/bucket\" must not contain a '/' (if using a prefix, put it in the 'Prefix' field instead)", + }, + { + name: "when Bucket has a leading and trailing slash, they are both stripped", + location: builder.ForBackupStorageLocation("", "").Provider("provider-1").Bucket("/bucket/").Result(), + objectStoreGetter: objectStoreGetter{ + "provider-1": cloudprovider.NewInMemoryObjectStore("bucket"), + }, + wantBucket: "bucket", + }, + { + name: "when Prefix has a leading and trailing slash, the leading slash is stripped and the trailing slash is left", + location: builder.ForBackupStorageLocation("", "").Provider("provider-1").Bucket("bucket").Prefix("/prefix/").Result(), + objectStoreGetter: objectStoreGetter{ + "provider-1": cloudprovider.NewInMemoryObjectStore("bucket"), + }, + wantBucket: "bucket", + wantPrefix: "prefix/", + }, + { + name: "when Prefix has no leading or trailing slash, a trailing slash is added", + location: builder.ForBackupStorageLocation("", "").Provider("provider-1").Bucket("bucket").Prefix("prefix").Result(), + objectStoreGetter: objectStoreGetter{ + "provider-1": cloudprovider.NewInMemoryObjectStore("bucket"), + }, + wantBucket: "bucket", + wantPrefix: "prefix/", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + res, err := NewObjectBackupStore(tc.location, tc.objectStoreGetter, velerotest.NewLogger()) + if tc.wantErr != "" { + require.Equal(t, tc.wantErr, err.Error()) + } else { + require.Nil(t, err) + + store, ok := res.(*objectBackupStore) + require.True(t, ok) + + assert.Equal(t, tc.wantBucket, store.bucket) + assert.Equal(t, tc.wantPrefix, store.layout.rootPrefix) + } }) } } diff --git a/pkg/plugin/clientmgmt/client_builder_test.go b/pkg/plugin/clientmgmt/client_builder_test.go index 6b00bd83e8e..c5292b6d546 100644 --- a/pkg/plugin/clientmgmt/client_builder_test.go +++ b/pkg/plugin/clientmgmt/client_builder_test.go @@ -26,7 +26,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/heptio/velero/pkg/plugin/framework" - "github.com/heptio/velero/pkg/util/test" + "github.com/heptio/velero/pkg/test" ) func TestNewClientBuilder(t *testing.T) { diff --git a/pkg/plugin/clientmgmt/manager_test.go b/pkg/plugin/clientmgmt/manager_test.go index cf0c12439ef..97f43642462 100644 --- a/pkg/plugin/clientmgmt/manager_test.go +++ b/pkg/plugin/clientmgmt/manager_test.go @@ -27,7 +27,7 @@ import ( "github.com/stretchr/testify/require" "github.com/heptio/velero/pkg/plugin/framework" - "github.com/heptio/velero/pkg/util/test" + "github.com/heptio/velero/pkg/test" ) type mockRegistry struct { diff --git a/pkg/plugin/clientmgmt/registry_test.go b/pkg/plugin/clientmgmt/registry_test.go index 38804b913cd..87fbbd5131d 100644 --- a/pkg/plugin/clientmgmt/registry_test.go +++ b/pkg/plugin/clientmgmt/registry_test.go @@ -24,7 +24,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/heptio/velero/pkg/util/test" + "github.com/heptio/velero/pkg/test" ) func TestNewRegistry(t *testing.T) { diff --git a/pkg/plugin/framework/backup_item_action_test.go b/pkg/plugin/framework/backup_item_action_test.go index 8253ac0f64e..aff3b704910 100644 --- a/pkg/plugin/framework/backup_item_action_test.go +++ b/pkg/plugin/framework/backup_item_action_test.go @@ -32,7 +32,7 @@ import ( "github.com/heptio/velero/pkg/backup/mocks" proto "github.com/heptio/velero/pkg/plugin/generated" "github.com/heptio/velero/pkg/plugin/velero" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) func TestBackupItemActionGRPCServerExecute(t *testing.T) { diff --git a/pkg/plugin/framework/client_dispenser_test.go b/pkg/plugin/framework/client_dispenser_test.go index c68de328580..81baa27365a 100644 --- a/pkg/plugin/framework/client_dispenser_test.go +++ b/pkg/plugin/framework/client_dispenser_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/grpc" - "github.com/heptio/velero/pkg/util/test" + "github.com/heptio/velero/pkg/test" ) type fakeClient struct { diff --git a/pkg/plugin/framework/plugin_base_test.go b/pkg/plugin/framework/plugin_base_test.go index 016e04f1500..64724bae317 100644 --- a/pkg/plugin/framework/plugin_base_test.go +++ b/pkg/plugin/framework/plugin_base_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/heptio/velero/pkg/util/test" + "github.com/heptio/velero/pkg/test" ) func TestClientLogger(t *testing.T) { diff --git a/pkg/podexec/pod_command_executor_test.go b/pkg/podexec/pod_command_executor_test.go index ea8442b485b..68f372b1cac 100644 --- a/pkg/podexec/pod_command_executor_test.go +++ b/pkg/podexec/pod_command_executor_test.go @@ -35,7 +35,7 @@ import ( "k8s.io/client-go/tools/remotecommand" v1 "github.com/heptio/velero/pkg/apis/velero/v1" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) func TestNewPodCommandExecutor(t *testing.T) { diff --git a/pkg/restic/backupper.go b/pkg/restic/backupper.go index 8df0faa1d16..15439b9af4a 100644 --- a/pkg/restic/backupper.go +++ b/pkg/restic/backupper.go @@ -36,7 +36,7 @@ import ( // Backupper can execute restic backups of volumes in a pod. type Backupper interface { // BackupPodVolumes backs up all annotated volumes in a pod. - BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.Pod, log logrus.FieldLogger) (map[string]string, []error) + BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.Pod, log logrus.FieldLogger) ([]*velerov1api.PodVolumeBackup, []error) } type backupper struct { @@ -96,7 +96,7 @@ func resultsKey(ns, name string) string { return fmt.Sprintf("%s/%s", ns, name) } -func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.Pod, log logrus.FieldLogger) (map[string]string, []error) { +func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.Pod, log logrus.FieldLogger) ([]*velerov1api.PodVolumeBackup, []error) { // get volumes to backup from pod's annotations volumesToBackup := GetVolumesToBackup(pod) if len(volumesToBackup) == 0 { @@ -120,9 +120,9 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. b.resultsLock.Unlock() var ( - errs []error - volumeSnapshots = make(map[string]string) - podVolumes = make(map[string]corev1api.Volume) + errs []error + podVolumeBackups []*velerov1api.PodVolumeBackup + podVolumes = make(map[string]corev1api.Volume) ) // put the pod's volumes in a map for efficient lookup below @@ -130,6 +130,7 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. podVolumes[podVolume.Name] = podVolume } + var numVolumeSnapshots int for _, volumeName := range volumesToBackup { volume, ok := podVolumes[volumeName] if !ok { @@ -137,9 +138,18 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. continue } + var pvc *corev1api.PersistentVolumeClaim + if volume.PersistentVolumeClaim != nil { + pvc, err = b.pvcClient.PersistentVolumeClaims(pod.Namespace).Get(volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) + if err != nil { + errs = append(errs, errors.Wrap(err, "error getting persistent volume claim for volume")) + continue + } + } + // hostPath volumes are not supported because they're not mounted into /var/lib/kubelet/pods, so our // daemonset pod has no way to access their data. - isHostPath, err := isHostPathVolume(&volume, b.pvcClient.PersistentVolumeClaims(pod.Namespace), b.pvClient.PersistentVolumes()) + isHostPath, err := isHostPathVolume(&volume, pvc, b.pvClient.PersistentVolumes()) if err != nil { errs = append(errs, errors.Wrap(err, "error checking if volume is a hostPath volume")) continue @@ -149,18 +159,16 @@ func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api. continue } - volumeBackup := newPodVolumeBackup(backup, pod, volumeName, repo.Spec.ResticIdentifier) - - if err := errorOnly(b.repoManager.veleroClient.VeleroV1().PodVolumeBackups(volumeBackup.Namespace).Create(volumeBackup)); err != nil { + volumeBackup := newPodVolumeBackup(backup, pod, volume, repo.Spec.ResticIdentifier, pvc) + numVolumeSnapshots++ + if volumeBackup, err = b.repoManager.veleroClient.VeleroV1().PodVolumeBackups(volumeBackup.Namespace).Create(volumeBackup); err != nil { errs = append(errs, err) continue } - - volumeSnapshots[volumeName] = "" } ForEachVolume: - for i, count := 0, len(volumeSnapshots); i < count; i++ { + for i, count := 0, numVolumeSnapshots; i < count; i++ { select { case <-b.ctx.Done(): errs = append(errs, errors.New("timed out waiting for all PodVolumeBackups to complete")) @@ -169,13 +177,12 @@ ForEachVolume: switch res.Status.Phase { case velerov1api.PodVolumeBackupPhaseCompleted: if res.Status.SnapshotID == "" { // when the volume is empty there is no restic snapshot, so best to exclude it - delete(volumeSnapshots, res.Spec.Volume) break } - volumeSnapshots[res.Spec.Volume] = res.Status.SnapshotID + podVolumeBackups = append(podVolumeBackups, res) case velerov1api.PodVolumeBackupPhaseFailed: errs = append(errs, errors.Errorf("pod volume backup failed: %s", res.Status.Message)) - delete(volumeSnapshots, res.Spec.Volume) + podVolumeBackups = append(podVolumeBackups, res) } } } @@ -184,7 +191,7 @@ ForEachVolume: delete(b.results, resultsKey(pod.Namespace, pod.Name)) b.resultsLock.Unlock() - return volumeSnapshots, errs + return podVolumeBackups, errs } type pvcGetter interface { @@ -197,21 +204,12 @@ type pvGetter interface { // isHostPathVolume returns true if the volume is either a hostPath pod volume or a persistent // volume claim on a hostPath persistent volume, or false otherwise. -func isHostPathVolume(volume *corev1api.Volume, pvcGetter pvcGetter, pvGetter pvGetter) (bool, error) { +func isHostPathVolume(volume *corev1api.Volume, pvc *corev1api.PersistentVolumeClaim, pvGetter pvGetter) (bool, error) { if volume.HostPath != nil { return true, nil } - if volume.PersistentVolumeClaim == nil { - return false, nil - } - - pvc, err := pvcGetter.Get(volume.PersistentVolumeClaim.ClaimName, metav1.GetOptions{}) - if err != nil { - return false, errors.WithStack(err) - } - - if pvc.Spec.VolumeName == "" { + if pvc == nil || pvc.Spec.VolumeName == "" { return false, nil } @@ -223,8 +221,8 @@ func isHostPathVolume(volume *corev1api.Volume, pvcGetter pvcGetter, pvGetter pv return pv.Spec.HostPath != nil, nil } -func newPodVolumeBackup(backup *velerov1api.Backup, pod *corev1api.Pod, volumeName, repoIdentifier string) *velerov1api.PodVolumeBackup { - return &velerov1api.PodVolumeBackup{ +func newPodVolumeBackup(backup *velerov1api.Backup, pod *corev1api.Pod, volume corev1api.Volume, repoIdentifier string, pvc *corev1api.PersistentVolumeClaim) *velerov1api.PodVolumeBackup { + pvb := &velerov1api.PodVolumeBackup{ ObjectMeta: metav1.ObjectMeta{ Namespace: backup.Namespace, GenerateName: backup.Name + "-", @@ -250,19 +248,36 @@ func newPodVolumeBackup(backup *velerov1api.Backup, pod *corev1api.Pod, volumeNa Name: pod.Name, UID: pod.UID, }, - Volume: volumeName, + Volume: volume.Name, Tags: map[string]string{ "backup": backup.Name, "backup-uid": string(backup.UID), "pod": pod.Name, "pod-uid": string(pod.UID), "ns": pod.Namespace, - "volume": volumeName, + "volume": volume.Name, }, BackupStorageLocation: backup.Spec.StorageLocation, RepoIdentifier: repoIdentifier, }, } + + if pvc != nil { + // this annotation is used in pkg/restore to identify if a PVC + // has a restic backup. + pvb.Annotations = map[string]string{ + PVCNameAnnotation: pvc.Name, + } + + // this label is used by the pod volume backup controller to tell + // if a pod volume backup is for a PVC. + pvb.Labels[velerov1api.PVCUIDLabel] = string(pvc.UID) + + // this tag is not used by velero, but useful for debugging. + pvb.Spec.Tags["pvc-uid"] = string(pvc.UID) + } + + return pvb } func errorOnly(_ interface{}, err error) error { diff --git a/pkg/restic/backupper_test.go b/pkg/restic/backupper_test.go index 0ba5c18a041..8b1e034587e 100644 --- a/pkg/restic/backupper_test.go +++ b/pkg/restic/backupper_test.go @@ -54,15 +54,13 @@ func TestIsHostPathVolume(t *testing.T) { }, }, } - pvcGetter := &fakePVCGetter{ - pvc: &corev1api.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "ns-1", - Name: "pvc-1", - }, + pvc := &corev1api.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-1", + Name: "pvc-1", }, } - isHostPath, err = isHostPathVolume(vol, pvcGetter, nil) + isHostPath, err = isHostPathVolume(vol, pvc, nil) assert.Nil(t, err) assert.False(t, isHostPath) @@ -74,15 +72,13 @@ func TestIsHostPathVolume(t *testing.T) { }, }, } - pvcGetter = &fakePVCGetter{ - pvc: &corev1api.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "ns-1", - Name: "pvc-1", - }, - Spec: corev1api.PersistentVolumeClaimSpec{ - VolumeName: "pv-1", - }, + pvc = &corev1api.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-1", + Name: "pvc-1", + }, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "pv-1", }, } pvGetter := &fakePVGetter{ @@ -93,7 +89,7 @@ func TestIsHostPathVolume(t *testing.T) { Spec: corev1api.PersistentVolumeSpec{}, }, } - isHostPath, err = isHostPathVolume(vol, pvcGetter, pvGetter) + isHostPath, err = isHostPathVolume(vol, pvc, pvGetter) assert.Nil(t, err) assert.False(t, isHostPath) @@ -105,15 +101,13 @@ func TestIsHostPathVolume(t *testing.T) { }, }, } - pvcGetter = &fakePVCGetter{ - pvc: &corev1api.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "ns-1", - Name: "pvc-1", - }, - Spec: corev1api.PersistentVolumeClaimSpec{ - VolumeName: "pv-1", - }, + pvc = &corev1api.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-1", + Name: "pvc-1", + }, + Spec: corev1api.PersistentVolumeClaimSpec{ + VolumeName: "pv-1", }, } pvGetter = &fakePVGetter{ @@ -128,23 +122,11 @@ func TestIsHostPathVolume(t *testing.T) { }, }, } - isHostPath, err = isHostPathVolume(vol, pvcGetter, pvGetter) + isHostPath, err = isHostPathVolume(vol, pvc, pvGetter) assert.Nil(t, err) assert.True(t, isHostPath) } -type fakePVCGetter struct { - pvc *corev1api.PersistentVolumeClaim -} - -func (g *fakePVCGetter) Get(name string, opts metav1.GetOptions) (*corev1api.PersistentVolumeClaim, error) { - if g.pvc != nil { - return g.pvc, nil - } - - return nil, errors.New("item not found") -} - type fakePVGetter struct { pv *corev1api.PersistentVolume } diff --git a/pkg/restic/command_factory.go b/pkg/restic/command_factory.go index da75f313d62..95a2d600973 100644 --- a/pkg/restic/command_factory.go +++ b/pkg/restic/command_factory.go @@ -1,5 +1,5 @@ /* -Copyright 2018 the Velero contributors. +Copyright 2018, 2019 the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -91,13 +91,6 @@ func SnapshotsCommand(repoIdentifier string) *Command { } } -func CheckCommand(repoIdentifier string) *Command { - return &Command{ - Command: "check", - RepoIdentifier: repoIdentifier, - } -} - func PruneCommand(repoIdentifier string) *Command { return &Command{ Command: "prune", @@ -112,3 +105,10 @@ func ForgetCommand(repoIdentifier, snapshotID string) *Command { Args: []string{snapshotID}, } } + +func UnlockCommand(repoIdentifier string) *Command { + return &Command{ + Command: "unlock", + RepoIdentifier: repoIdentifier, + } +} diff --git a/pkg/restic/command_factory_test.go b/pkg/restic/command_factory_test.go index 1b45a4024f4..6cd0ab961cf 100644 --- a/pkg/restic/command_factory_test.go +++ b/pkg/restic/command_factory_test.go @@ -103,13 +103,6 @@ func TestSnapshotsCommand(t *testing.T) { assert.Equal(t, "repo-id", c.RepoIdentifier) } -func TestCheckCommand(t *testing.T) { - c := CheckCommand("repo-id") - - assert.Equal(t, "check", c.Command) - assert.Equal(t, "repo-id", c.RepoIdentifier) -} - func TestPruneCommand(t *testing.T) { c := PruneCommand("repo-id") diff --git a/pkg/restic/common.go b/pkg/restic/common.go index 80ed99935b2..413c80a003b 100644 --- a/pkg/restic/common.go +++ b/pkg/restic/common.go @@ -35,30 +35,35 @@ import ( ) const ( - DaemonSet = "restic" - InitContainer = "restic-wait" + // DaemonSet is the name of the Velero restic daemonset. + DaemonSet = "restic" + + // InitContainer is the name of the init container added + // to workload pods to help with restores. + InitContainer = "restic-wait" + + // DefaultMaintenanceFrequency is the default time interval + // at which restic check & prune are run. DefaultMaintenanceFrequency = 24 * time.Hour - podAnnotationPrefix = "snapshot.velero.io/" - volumesToBackupAnnotation = "backup.velero.io/backup-volumes" -) + // PVCNameAnnotation is the key for the annotation added to + // pod volume backups when they're for a PVC. + PVCNameAnnotation = "velero.io/pvc-name" -// PodHasSnapshotAnnotation returns true if the object has an annotation -// indicating that there is a restic snapshot for a volume in this pod, -// or false otherwise. -func PodHasSnapshotAnnotation(obj metav1.Object) bool { - for key := range obj.GetAnnotations() { - if strings.HasPrefix(key, podAnnotationPrefix) { - return true - } - } + // Deprecated. + // + // TODO(2.0): remove + podAnnotationPrefix = "snapshot.velero.io/" - return false -} + volumesToBackupAnnotation = "backup.velero.io/backup-volumes" +) -// GetPodSnapshotAnnotations returns a map, of volume name -> snapshot id, +// getPodSnapshotAnnotations returns a map, of volume name -> snapshot id, // of all restic snapshots for this pod. -func GetPodSnapshotAnnotations(obj metav1.Object) map[string]string { +// TODO(2.0) to remove +// Deprecated: we will stop using pod annotations to record restic snapshot IDs after they're taken, +// therefore we won't need to check if these annotations exist. +func getPodSnapshotAnnotations(obj metav1.Object) map[string]string { var res map[string]string insertSafe := func(k, v string) { @@ -77,18 +82,22 @@ func GetPodSnapshotAnnotations(obj metav1.Object) map[string]string { return res } -// SetPodSnapshotAnnotation adds an annotation to a pod to indicate that -// the specified volume has a restic snapshot with the provided id. -func SetPodSnapshotAnnotation(obj metav1.Object, volumeName, snapshotID string) { - annotations := obj.GetAnnotations() +// GetVolumeBackupsForPod returns a map, of volume name -> snapshot id, +// of the PodVolumeBackups that exist for the provided pod. +func GetVolumeBackupsForPod(podVolumeBackups []*velerov1api.PodVolumeBackup, pod metav1.Object) map[string]string { + volumes := make(map[string]string) - if annotations == nil { - annotations = make(map[string]string) + for _, pvb := range podVolumeBackups { + if pod.GetName() == pvb.Spec.Pod.Name { + volumes[pvb.Spec.Volume] = pvb.Status.SnapshotID + } } - annotations[podAnnotationPrefix+volumeName] = snapshotID + if len(volumes) > 0 { + return volumes + } - obj.SetAnnotations(annotations) + return getPodSnapshotAnnotations(pod) } // GetVolumesToBackup returns a list of volume names to backup for diff --git a/pkg/restic/common_test.go b/pkg/restic/common_test.go index 1dc6cbc9984..60e57a244b0 100644 --- a/pkg/restic/common_test.go +++ b/pkg/restic/common_test.go @@ -28,143 +28,90 @@ import ( "k8s.io/client-go/tools/cache" velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/builder" "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" informers "github.com/heptio/velero/pkg/generated/informers/externalversions" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) -func TestPodHasSnapshotAnnotation(t *testing.T) { +func TestGetVolumeBackupsForPod(t *testing.T) { tests := []struct { - name string - annotations map[string]string - expected bool + name string + podVolumeBackups []*velerov1api.PodVolumeBackup + podAnnotations map[string]string + podName string + expected map[string]string }{ { - name: "nil annotations", - annotations: nil, - expected: false, + name: "nil annotations", + podAnnotations: nil, + expected: nil, }, { - name: "empty annotations", - annotations: make(map[string]string), - expected: false, + name: "empty annotations", + podAnnotations: make(map[string]string), + expected: nil, }, { - name: "non-empty map, no snapshot annotation", - annotations: map[string]string{"foo": "bar"}, - expected: false, + name: "non-empty map, no snapshot annotation", + podAnnotations: map[string]string{"foo": "bar"}, + expected: nil, }, { - name: "has snapshot annotation only, no suffix", - annotations: map[string]string{podAnnotationPrefix: "bar"}, - expected: true, + name: "has snapshot annotation only, no suffix", + podAnnotations: map[string]string{podAnnotationPrefix: "bar"}, + expected: map[string]string{"": "bar"}, }, { - name: "has snapshot annotation only, with suffix", - annotations: map[string]string{podAnnotationPrefix + "foo": "bar"}, - expected: true, + name: "has snapshot annotation only, with suffix", + podAnnotations: map[string]string{podAnnotationPrefix + "foo": "bar"}, + expected: map[string]string{"foo": "bar"}, }, { - name: "has snapshot annotation, with suffix", - annotations: map[string]string{"foo": "bar", podAnnotationPrefix + "foo": "bar"}, - expected: true, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - pod := &corev1api.Pod{} - pod.Annotations = test.annotations - assert.Equal(t, test.expected, PodHasSnapshotAnnotation(pod)) - }) - } -} - -func TestGetPodSnapshotAnnotations(t *testing.T) { - tests := []struct { - name string - annotations map[string]string - expected map[string]string - }{ - { - name: "nil annotations", - annotations: nil, - expected: nil, + name: "has snapshot annotation, with suffix", + podAnnotations: map[string]string{"x": "y", podAnnotationPrefix + "foo": "bar", podAnnotationPrefix + "abc": "123"}, + expected: map[string]string{"foo": "bar", "abc": "123"}, }, { - name: "empty annotations", - annotations: make(map[string]string), - expected: nil, - }, - { - name: "non-empty map, no snapshot annotation", - annotations: map[string]string{"foo": "bar"}, - expected: nil, - }, - { - name: "has snapshot annotation only, no suffix", - annotations: map[string]string{podAnnotationPrefix: "bar"}, - expected: map[string]string{"": "bar"}, - }, - { - name: "has snapshot annotation only, with suffix", - annotations: map[string]string{podAnnotationPrefix + "foo": "bar"}, - expected: map[string]string{"foo": "bar"}, - }, - { - name: "has snapshot annotation, with suffix", - annotations: map[string]string{"x": "y", podAnnotationPrefix + "foo": "bar", podAnnotationPrefix + "abc": "123"}, - expected: map[string]string{"foo": "bar", "abc": "123"}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - pod := &corev1api.Pod{} - pod.Annotations = test.annotations - assert.Equal(t, test.expected, GetPodSnapshotAnnotations(pod)) - }) - } -} - -func TestSetPodSnapshotAnnotation(t *testing.T) { - tests := []struct { - name string - annotations map[string]string - volumeName string - snapshotID string - expected map[string]string - }{ - { - name: "set snapshot annotation on pod with no annotations", - annotations: nil, - volumeName: "foo", - snapshotID: "bar", - expected: map[string]string{podAnnotationPrefix + "foo": "bar"}, + name: "has snapshot annotation, with suffix, and also PVBs", + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").SnapshotID("bar").Volume("pvbtest1-foo").Result(), + builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").SnapshotID("123").Volume("pvbtest2-abc").Result(), + }, + podName: "TestPod", + podAnnotations: map[string]string{"x": "y", podAnnotationPrefix + "foo": "bar", podAnnotationPrefix + "abc": "123"}, + expected: map[string]string{"pvbtest1-foo": "bar", "pvbtest2-abc": "123"}, }, { - name: "set snapshot annotation on pod with existing annotations", - annotations: map[string]string{"existing": "annotation"}, - volumeName: "foo", - snapshotID: "bar", - expected: map[string]string{"existing": "annotation", podAnnotationPrefix + "foo": "bar"}, + name: "no snapshot annotation, no suffix, but with PVBs", + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").SnapshotID("bar").Volume("pvbtest1-foo").Result(), + builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").SnapshotID("123").Volume("pvbtest2-abc").Result(), + }, + podName: "TestPod", + expected: map[string]string{"pvbtest1-foo": "bar", "pvbtest2-abc": "123"}, }, { - name: "snapshot annotation is overwritten if already exists", - annotations: map[string]string{podAnnotationPrefix + "foo": "existing"}, - volumeName: "foo", - snapshotID: "bar", - expected: map[string]string{podAnnotationPrefix + "foo": "bar"}, + name: "has snapshot annotation, with suffix, and with PVBs from current pod and a PVB from another pod", + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").SnapshotID("bar").Volume("pvbtest1-foo").Result(), + builder.ForPodVolumeBackup("velero", "pvb-2").PodName("TestPod").SnapshotID("123").Volume("pvbtest2-abc").Result(), + builder.ForPodVolumeBackup("velero", "pvb-3").PodName("TestAnotherPod").SnapshotID("xyz").Volume("pvbtest3-xyz").Result(), + }, + podAnnotations: map[string]string{"x": "y", podAnnotationPrefix + "foo": "bar", podAnnotationPrefix + "abc": "123"}, + podName: "TestPod", + expected: map[string]string{"pvbtest1-foo": "bar", "pvbtest2-abc": "123"}, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { pod := &corev1api.Pod{} - pod.Annotations = test.annotations + pod.Annotations = test.podAnnotations + pod.Name = test.podName - SetPodSnapshotAnnotation(pod, test.volumeName, test.snapshotID) - assert.Equal(t, test.expected, pod.Annotations) + res := GetVolumeBackupsForPod(test.podVolumeBackups, pod) + assert.Equal(t, test.expected, res) }) } } diff --git a/pkg/restic/config.go b/pkg/restic/config.go index 440c0151299..37c74b58f06 100644 --- a/pkg/restic/config.go +++ b/pkg/restic/config.go @@ -21,6 +21,8 @@ import ( "path" "strings" + "github.com/pkg/errors" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" "github.com/heptio/velero/pkg/cloudprovider/aws" "github.com/heptio/velero/pkg/persistence" @@ -40,8 +42,8 @@ var getAWSBucketRegion = aws.GetBucketRegion // getRepoPrefix returns the prefix of the value of the --repo flag for // restic commands, i.e. everything except the "/". -func getRepoPrefix(location *velerov1api.BackupStorageLocation) string { - var provider, bucket, prefix, bucketAndPrefix string +func getRepoPrefix(location *velerov1api.BackupStorageLocation) (string, error) { + var bucket, prefix string if location.Spec.ObjectStorage != nil { layout := persistence.NewObjectStoreLayout(location.Spec.ObjectStorage.Prefix) @@ -49,14 +51,17 @@ func getRepoPrefix(location *velerov1api.BackupStorageLocation) string { bucket = location.Spec.ObjectStorage.Bucket prefix = layout.GetResticDir() } - bucketAndPrefix = path.Join(bucket, prefix) - var locationSpecProvider = location.Spec.Provider - if !strings.Contains(locationSpecProvider, "/") { - locationSpecProvider = "velero.io/" + locationSpecProvider + var provider = location.Spec.Provider + if !strings.Contains(provider, "/") { + provider = "velero.io/" + provider + } + + if repoPrefix := location.Spec.Config["resticRepoPrefix"]; repoPrefix != "" { + return repoPrefix, nil } - switch BackendType(locationSpecProvider) { + switch BackendType(provider) { case AWSBackend: var url string switch { @@ -73,20 +78,23 @@ func getRepoPrefix(location *velerov1api.BackupStorageLocation) string { url = fmt.Sprintf("s3-%s.amazonaws.com", region) } - return fmt.Sprintf("s3:%s/%s", strings.TrimSuffix(url, "/"), bucketAndPrefix) + return fmt.Sprintf("s3:%s/%s", strings.TrimSuffix(url, "/"), path.Join(bucket, prefix)), nil case AzureBackend: - provider = "azure" + return fmt.Sprintf("azure:%s:/%s", bucket, prefix), nil case GCPBackend: - provider = "gs" + return fmt.Sprintf("gs:%s:/%s", bucket, prefix), nil } - return fmt.Sprintf("%s:%s:/%s", provider, bucket, prefix) + return "", errors.New("restic repository prefix (resticRepoPrefix) not specified in backup storage location's config") } // GetRepoIdentifier returns the string to be used as the value of the --repo flag in // restic commands for the given repository. -func GetRepoIdentifier(location *velerov1api.BackupStorageLocation, name string) string { - prefix := getRepoPrefix(location) +func GetRepoIdentifier(location *velerov1api.BackupStorageLocation, name string) (string, error) { + prefix, err := getRepoPrefix(location) + if err != nil { + return "", err + } - return fmt.Sprintf("%s/%s", strings.TrimSuffix(prefix, "/"), name) + return fmt.Sprintf("%s/%s", strings.TrimSuffix(prefix, "/"), name), nil } diff --git a/pkg/restic/config_test.go b/pkg/restic/config_test.go index d107433a7b9..e0c1d1f8983 100644 --- a/pkg/restic/config_test.go +++ b/pkg/restic/config_test.go @@ -42,7 +42,9 @@ func TestGetRepoIdentifier(t *testing.T) { }, }, } - assert.Equal(t, "s3:s3.amazonaws.com/bucket/prefix/restic/repo-1", GetRepoIdentifier(backupLocation, "repo-1")) + id, err := GetRepoIdentifier(backupLocation, "repo-1") + assert.NoError(t, err) + assert.Equal(t, "s3:s3.amazonaws.com/bucket/prefix/restic/repo-1", id) // stub implementation of getAWSBucketRegion getAWSBucketRegion = func(string) (string, error) { @@ -59,7 +61,9 @@ func TestGetRepoIdentifier(t *testing.T) { }, }, } - assert.Equal(t, "s3:s3-us-west-2.amazonaws.com/bucket/restic/repo-1", GetRepoIdentifier(backupLocation, "repo-1")) + id, err = GetRepoIdentifier(backupLocation, "repo-1") + assert.NoError(t, err) + assert.Equal(t, "s3:s3-us-west-2.amazonaws.com/bucket/restic/repo-1", id) backupLocation = &velerov1api.BackupStorageLocation{ Spec: velerov1api.BackupStorageLocationSpec{ @@ -72,7 +76,9 @@ func TestGetRepoIdentifier(t *testing.T) { }, }, } - assert.Equal(t, "s3:s3-us-west-2.amazonaws.com/bucket/prefix/restic/repo-1", GetRepoIdentifier(backupLocation, "repo-1")) + id, err = GetRepoIdentifier(backupLocation, "repo-1") + assert.NoError(t, err) + assert.Equal(t, "s3:s3-us-west-2.amazonaws.com/bucket/prefix/restic/repo-1", id) backupLocation = &velerov1api.BackupStorageLocation{ Spec: velerov1api.BackupStorageLocationSpec{ @@ -88,7 +94,9 @@ func TestGetRepoIdentifier(t *testing.T) { }, }, } - assert.Equal(t, "s3:alternate-url/bucket/prefix/restic/repo-1", GetRepoIdentifier(backupLocation, "repo-1")) + id, err = GetRepoIdentifier(backupLocation, "repo-1") + assert.NoError(t, err) + assert.Equal(t, "s3:alternate-url/bucket/prefix/restic/repo-1", id) backupLocation = &velerov1api.BackupStorageLocation{ Spec: velerov1api.BackupStorageLocationSpec{ @@ -104,7 +112,9 @@ func TestGetRepoIdentifier(t *testing.T) { }, }, } - assert.Equal(t, "s3:alternate-url-with-trailing-slash/bucket/prefix/restic/repo-1", GetRepoIdentifier(backupLocation, "repo-1")) + id, err = GetRepoIdentifier(backupLocation, "repo-1") + assert.NoError(t, err) + assert.Equal(t, "s3:alternate-url-with-trailing-slash/bucket/prefix/restic/repo-1", id) backupLocation = &velerov1api.BackupStorageLocation{ Spec: velerov1api.BackupStorageLocationSpec{ @@ -117,7 +127,9 @@ func TestGetRepoIdentifier(t *testing.T) { }, }, } - assert.Equal(t, "azure:bucket:/prefix/restic/repo-1", GetRepoIdentifier(backupLocation, "repo-1")) + id, err = GetRepoIdentifier(backupLocation, "repo-1") + assert.NoError(t, err) + assert.Equal(t, "azure:bucket:/prefix/restic/repo-1", id) backupLocation = &velerov1api.BackupStorageLocation{ Spec: velerov1api.BackupStorageLocationSpec{ @@ -130,5 +142,40 @@ func TestGetRepoIdentifier(t *testing.T) { }, }, } - assert.Equal(t, "gs:bucket-2:/prefix-2/restic/repo-2", GetRepoIdentifier(backupLocation, "repo-2")) + id, err = GetRepoIdentifier(backupLocation, "repo-2") + assert.NoError(t, err) + assert.Equal(t, "gs:bucket-2:/prefix-2/restic/repo-2", id) + + backupLocation = &velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "unsupported-provider", + StorageType: velerov1api.StorageType{ + ObjectStorage: &velerov1api.ObjectStorageLocation{ + Bucket: "bucket-2", + Prefix: "prefix-2", + }, + }, + }, + } + id, err = GetRepoIdentifier(backupLocation, "repo-1") + assert.EqualError(t, err, "restic repository prefix (resticRepoPrefix) not specified in backup storage location's config") + assert.Empty(t, id) + + backupLocation = &velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "custom-repo-identifier", + Config: map[string]string{ + "resticRepoPrefix": "custom:prefix:/restic", + }, + StorageType: velerov1api.StorageType{ + ObjectStorage: &velerov1api.ObjectStorageLocation{ + Bucket: "bucket", + Prefix: "prefix", + }, + }, + }, + } + id, err = GetRepoIdentifier(backupLocation, "repo-1") + assert.NoError(t, err) + assert.Equal(t, "custom:prefix:/restic/repo-1", id) } diff --git a/pkg/restic/mocks/backupper.go b/pkg/restic/mocks/backupper.go deleted file mode 100644 index 6065b3fb88e..00000000000 --- a/pkg/restic/mocks/backupper.go +++ /dev/null @@ -1,38 +0,0 @@ -// Code generated by mockery v1.0.0 -package mocks - -import corev1 "k8s.io/api/core/v1" -import logrus "github.com/sirupsen/logrus" -import mock "github.com/stretchr/testify/mock" - -import v1 "github.com/heptio/velero/pkg/apis/velero/v1" - -// Backupper is an autogenerated mock type for the Backupper type -type Backupper struct { - mock.Mock -} - -// BackupPodVolumes provides a mock function with given fields: backup, pod, log -func (_m *Backupper) BackupPodVolumes(backup *v1.Backup, pod *corev1.Pod, log logrus.FieldLogger) (map[string]string, []error) { - ret := _m.Called(backup, pod, log) - - var r0 map[string]string - if rf, ok := ret.Get(0).(func(*v1.Backup, *corev1.Pod, logrus.FieldLogger) map[string]string); ok { - r0 = rf(backup, pod, log) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]string) - } - } - - var r1 []error - if rf, ok := ret.Get(1).(func(*v1.Backup, *corev1.Pod, logrus.FieldLogger) []error); ok { - r1 = rf(backup, pod, log) - } else { - if ret.Get(1) != nil { - r1 = ret.Get(1).([]error) - } - } - - return r0, r1 -} diff --git a/pkg/restic/mocks/restorer.go b/pkg/restic/mocks/restorer.go new file mode 100644 index 00000000000..04b48375ddd --- /dev/null +++ b/pkg/restic/mocks/restorer.go @@ -0,0 +1,27 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" +import restic "github.com/heptio/velero/pkg/restic" + +// Restorer is an autogenerated mock type for the Restorer type +type Restorer struct { + mock.Mock +} + +// RestorePodVolumes provides a mock function with given fields: _a0 +func (_m *Restorer) RestorePodVolumes(_a0 restic.RestoreData) []error { + ret := _m.Called(_a0) + + var r0 []error + if rf, ok := ret.Get(0).(func(restic.RestoreData) []error); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]error) + } + } + + return r0 +} diff --git a/pkg/restic/repository_manager.go b/pkg/restic/repository_manager.go index fe7b32a970f..1e58ef93089 100644 --- a/pkg/restic/repository_manager.go +++ b/pkg/restic/repository_manager.go @@ -1,5 +1,5 @@ /* -Copyright 2018 the Velero contributors. +Copyright 2018, 2019 the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -49,12 +49,12 @@ type RepositoryManager interface { // authenticated to. ConnectToRepo(repo *velerov1api.ResticRepository) error - // CheckRepo checks the specified repo for errors. - CheckRepo(repo *velerov1api.ResticRepository) error - // PruneRepo deletes unused data from a repo. PruneRepo(repo *velerov1api.ResticRepository) error + // UnlockRepo removes stale locks from a repo. + UnlockRepo(repo *velerov1api.ResticRepository) error + // Forget removes a snapshot from the list of // available snapshots in a repo. Forget(context.Context, SnapshotIdentifier) error @@ -197,14 +197,6 @@ func (rm *repositoryManager) ConnectToRepo(repo *velerov1api.ResticRepository) e return rm.exec(snapshotsCmd, repo.Spec.BackupStorageLocation) } -func (rm *repositoryManager) CheckRepo(repo *velerov1api.ResticRepository) error { - // restic check requires an exclusive lock - rm.repoLocker.LockExclusive(repo.Name) - defer rm.repoLocker.UnlockExclusive(repo.Name) - - return rm.exec(CheckCommand(repo.Spec.ResticIdentifier), repo.Spec.BackupStorageLocation) -} - func (rm *repositoryManager) PruneRepo(repo *velerov1api.ResticRepository) error { // restic prune requires an exclusive lock rm.repoLocker.LockExclusive(repo.Name) @@ -213,6 +205,14 @@ func (rm *repositoryManager) PruneRepo(repo *velerov1api.ResticRepository) error return rm.exec(PruneCommand(repo.Spec.ResticIdentifier), repo.Spec.BackupStorageLocation) } +func (rm *repositoryManager) UnlockRepo(repo *velerov1api.ResticRepository) error { + // restic unlock requires a non-exclusive lock + rm.repoLocker.Lock(repo.Name) + defer rm.repoLocker.Unlock(repo.Name) + + return rm.exec(UnlockCommand(repo.Spec.ResticIdentifier), repo.Spec.BackupStorageLocation) +} + func (rm *repositoryManager) Forget(ctx context.Context, snapshot SnapshotIdentifier) error { // We can't wait for this in the constructor, because this informer is coming // from the shared informer factory, which isn't started until *after* the repo diff --git a/pkg/restic/restorer.go b/pkg/restic/restorer.go index f06c492da0b..6a310096d6c 100644 --- a/pkg/restic/restorer.go +++ b/pkg/restic/restorer.go @@ -31,10 +31,17 @@ import ( "github.com/heptio/velero/pkg/util/boolptr" ) +type RestoreData struct { + Restore *velerov1api.Restore + Pod *corev1api.Pod + PodVolumeBackups []*velerov1api.PodVolumeBackup + SourceNamespace, BackupLocation string +} + // Restorer can execute restic restores of volumes in a pod. type Restorer interface { // RestorePodVolumes restores all annotated volumes in a pod. - RestorePodVolumes(restore *velerov1api.Restore, pod *corev1api.Pod, sourceNamespace, backupLocation string, log logrus.FieldLogger) []error + RestorePodVolumes(RestoreData) []error } type restorer struct { @@ -84,14 +91,13 @@ func newRestorer( return r } -func (r *restorer) RestorePodVolumes(restore *velerov1api.Restore, pod *corev1api.Pod, sourceNamespace, backupLocation string, log logrus.FieldLogger) []error { - // get volumes to restore from pod's annotations - volumesToRestore := GetPodSnapshotAnnotations(pod) +func (r *restorer) RestorePodVolumes(data RestoreData) []error { + volumesToRestore := GetVolumeBackupsForPod(data.PodVolumeBackups, data.Pod) if len(volumesToRestore) == 0 { return nil } - repo, err := r.repoEnsurer.EnsureRepo(r.ctx, restore.Namespace, sourceNamespace, backupLocation) + repo, err := r.repoEnsurer.EnsureRepo(r.ctx, data.Restore.Namespace, data.SourceNamespace, data.BackupLocation) if err != nil { return []error{err} } @@ -104,7 +110,7 @@ func (r *restorer) RestorePodVolumes(restore *velerov1api.Restore, pod *corev1ap resultsChan := make(chan *velerov1api.PodVolumeRestore) r.resultsLock.Lock() - r.results[resultsKey(pod.Namespace, pod.Name)] = resultsChan + r.results[resultsKey(data.Pod.Namespace, data.Pod.Name)] = resultsChan r.resultsLock.Unlock() var ( @@ -113,7 +119,7 @@ func (r *restorer) RestorePodVolumes(restore *velerov1api.Restore, pod *corev1ap ) for volume, snapshot := range volumesToRestore { - volumeRestore := newPodVolumeRestore(restore, pod, volume, snapshot, backupLocation, repo.Spec.ResticIdentifier) + volumeRestore := newPodVolumeRestore(data.Restore, data.Pod, data.BackupLocation, volume, snapshot, repo.Spec.ResticIdentifier) if err := errorOnly(r.repoManager.veleroClient.VeleroV1().PodVolumeRestores(volumeRestore.Namespace).Create(volumeRestore)); err != nil { errs = append(errs, errors.WithStack(err)) @@ -136,13 +142,13 @@ ForEachVolume: } r.resultsLock.Lock() - delete(r.results, resultsKey(pod.Namespace, pod.Name)) + delete(r.results, resultsKey(data.Pod.Namespace, data.Pod.Name)) r.resultsLock.Unlock() return errs } -func newPodVolumeRestore(restore *velerov1api.Restore, pod *corev1api.Pod, volume, snapshot, backupLocation, repoIdentifier string) *velerov1api.PodVolumeRestore { +func newPodVolumeRestore(restore *velerov1api.Restore, pod *corev1api.Pod, backupLocation, volume, snapshot, repoIdentifier string) *velerov1api.PodVolumeRestore { return &velerov1api.PodVolumeRestore{ ObjectMeta: metav1.ObjectMeta{ Namespace: restore.Namespace, diff --git a/pkg/restore/add_pv_from_pvc_action_test.go b/pkg/restore/add_pv_from_pvc_action_test.go index d9fcbd5e34b..bb86860b480 100644 --- a/pkg/restore/add_pv_from_pvc_action_test.go +++ b/pkg/restore/add_pv_from_pvc_action_test.go @@ -27,7 +27,7 @@ import ( "github.com/heptio/velero/pkg/kuberesource" "github.com/heptio/velero/pkg/plugin/velero" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) func TestAddPVFromPVCActionExecute(t *testing.T) { diff --git a/pkg/restore/add_pvc_from_pod_action_test.go b/pkg/restore/add_pvc_from_pod_action_test.go index 11e574dd96a..df2cdf46f3e 100644 --- a/pkg/restore/add_pvc_from_pod_action_test.go +++ b/pkg/restore/add_pvc_from_pod_action_test.go @@ -28,7 +28,7 @@ import ( "github.com/heptio/velero/pkg/kuberesource" "github.com/heptio/velero/pkg/plugin/velero" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) func TestAddPVCFromPodActionExecute(t *testing.T) { diff --git a/pkg/restore/change_storageclass_action.go b/pkg/restore/change_storageclass_action.go new file mode 100644 index 00000000000..c874e388df6 --- /dev/null +++ b/pkg/restore/change_storageclass_action.go @@ -0,0 +1,117 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restore + +import ( + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + storagev1client "k8s.io/client-go/kubernetes/typed/storage/v1" + + "github.com/heptio/velero/pkg/plugin/framework" + "github.com/heptio/velero/pkg/plugin/velero" +) + +// ChangeStorageClassAction updates a PV or PVC's storage class name +// if a mapping is found in the plugin's config map. +type ChangeStorageClassAction struct { + logger logrus.FieldLogger + configMapClient corev1client.ConfigMapInterface + storageClassClient storagev1client.StorageClassInterface +} + +// NewChangeStorageClassAction is the constructor for ChangeStorageClassAction. +func NewChangeStorageClassAction( + logger logrus.FieldLogger, + configMapClient corev1client.ConfigMapInterface, + storageClassClient storagev1client.StorageClassInterface, +) *ChangeStorageClassAction { + return &ChangeStorageClassAction{ + logger: logger, + configMapClient: configMapClient, + storageClassClient: storageClassClient, + } +} + +// AppliesTo returns the resources that ChangeStorageClassAction should +// be run for. +func (a *ChangeStorageClassAction) AppliesTo() (velero.ResourceSelector, error) { + return velero.ResourceSelector{ + IncludedResources: []string{"persistentvolumeclaims", "persistentvolumes"}, + }, nil +} + +// Execute updates the item's spec.storageClassName if a mapping is found +// in the config map for the plugin. +func (a *ChangeStorageClassAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { + a.logger.Info("Executing ChangeStorageClassAction") + defer a.logger.Info("Done executing ChangeStorageClassAction") + + a.logger.Debug("Getting plugin config") + config, err := getPluginConfig(framework.PluginKindRestoreItemAction, "velero.io/change-storage-class", a.configMapClient) + if err != nil { + return nil, err + } + + if config == nil || len(config.Data) == 0 { + a.logger.Debug("No storage class mappings found") + return velero.NewRestoreItemActionExecuteOutput(input.Item), nil + } + + obj, ok := input.Item.(*unstructured.Unstructured) + if !ok { + return nil, errors.Errorf("object was of unexpected type %T", input.Item) + } + + log := a.logger.WithFields(map[string]interface{}{ + "kind": obj.GetKind(), + "namespace": obj.GetNamespace(), + "name": obj.GetName(), + }) + + // use the unstructured helpers here since this code is for both PVs and PVCs, and the + // field names are the same for both types. + storageClass, _, err := unstructured.NestedString(obj.UnstructuredContent(), "spec", "storageClassName") + if err != nil { + return nil, errors.Wrap(err, "error getting item's spec.storageClassName") + } + if storageClass == "" { + log.Debug("Item has no storage class specified") + return velero.NewRestoreItemActionExecuteOutput(input.Item), nil + } + + newStorageClass, ok := config.Data[storageClass] + if !ok { + log.Debugf("No mapping found for storage class %s", storageClass) + return velero.NewRestoreItemActionExecuteOutput(input.Item), nil + } + + // validate that new storage class exists + if _, err := a.storageClassClient.Get(newStorageClass, metav1.GetOptions{}); err != nil { + return nil, errors.Wrapf(err, "error getting storage class %s from API", newStorageClass) + } + + log.Infof("Updating item's storage class name to %s", newStorageClass) + + if err := unstructured.SetNestedField(obj.UnstructuredContent(), newStorageClass, "spec", "storageClassName"); err != nil { + return nil, errors.Wrap(err, "unable to set item's spec.storageClassName") + } + + return velero.NewRestoreItemActionExecuteOutput(obj), nil +} diff --git a/pkg/restore/change_storageclass_action_test.go b/pkg/restore/change_storageclass_action_test.go new file mode 100644 index 00000000000..4a363e88e03 --- /dev/null +++ b/pkg/restore/change_storageclass_action_test.go @@ -0,0 +1,188 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restore + +import ( + "testing" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1api "k8s.io/api/core/v1" + storagev1api "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + + "github.com/heptio/velero/pkg/builder" + "github.com/heptio/velero/pkg/plugin/velero" +) + +// TestChangeStorageClassActionExecute runs the ChangeStorageClassAction's Execute +// method and validates that the item's storage class is modified (or not) as expected. +// Validation is done by comparing the result of the Execute method to the test case's +// desired result. +func TestChangeStorageClassActionExecute(t *testing.T) { + tests := []struct { + name string + pvOrPVC interface{} + configMap *corev1api.ConfigMap + storageClass *storagev1api.StorageClass + want interface{} + wantErr error + }{ + { + name: "a valid mapping for a persistent volume is applied correctly", + pvOrPVC: builder.ForPersistentVolume("pv-1").StorageClass("storageclass-1").Result(), + configMap: builder.ForConfigMap("velero", "change-storage-classs"). + ObjectMeta(builder.WithLabels("velero.io/plugin-config", "true", "velero.io/change-storage-class", "RestoreItemAction")). + Data("storageclass-1", "storageclass-2"). + Result(), + storageClass: builder.ForStorageClass("storageclass-2").Result(), + want: builder.ForPersistentVolume("pv-1").StorageClass("storageclass-2").Result(), + }, + { + name: "a valid mapping for a persistent volume claim is applied correctly", + pvOrPVC: builder.ForPersistentVolumeClaim("velero", "pvc-1").StorageClass("storageclass-1").Result(), + configMap: builder.ForConfigMap("velero", "change-storage-classs"). + ObjectMeta(builder.WithLabels("velero.io/plugin-config", "true", "velero.io/change-storage-class", "RestoreItemAction")). + Data("storageclass-1", "storageclass-2"). + Result(), + storageClass: builder.ForStorageClass("storageclass-2").Result(), + want: builder.ForPersistentVolumeClaim("velero", "pvc-1").StorageClass("storageclass-2").Result(), + }, + { + name: "when no config map exists for the plugin, the item is returned as-is", + pvOrPVC: builder.ForPersistentVolume("pv-1").StorageClass("storageclass-1").Result(), + configMap: builder.ForConfigMap("velero", "change-storage-classs"). + ObjectMeta(builder.WithLabels("velero.io/plugin-config", "true", "velero.io/some-other-plugin", "RestoreItemAction")). + Data("storageclass-1", "storageclass-2"). + Result(), + want: builder.ForPersistentVolume("pv-1").StorageClass("storageclass-1").Result(), + }, + { + name: "when no storage class mappings exist in the plugin config map, the item is returned as-is", + pvOrPVC: builder.ForPersistentVolume("pv-1").StorageClass("storageclass-1").Result(), + configMap: builder.ForConfigMap("velero", "change-storage-classs"). + ObjectMeta(builder.WithLabels("velero.io/plugin-config", "true", "velero.io/change-storage-class", "RestoreItemAction")). + Result(), + want: builder.ForPersistentVolume("pv-1").StorageClass("storageclass-1").Result(), + }, + { + name: "when persistent volume has no storage class, the item is returned as-is", + pvOrPVC: builder.ForPersistentVolume("pv-1").Result(), + configMap: builder.ForConfigMap("velero", "change-storage-classs"). + ObjectMeta(builder.WithLabels("velero.io/plugin-config", "true", "velero.io/change-storage-class", "RestoreItemAction")). + Data("storageclass-1", "storageclass-2"). + Result(), + want: builder.ForPersistentVolume("pv-1").Result(), + }, + { + name: "when persistent volume claim has no storage class, the item is returned as-is", + pvOrPVC: builder.ForPersistentVolumeClaim("velero", "pvc-1").Result(), + configMap: builder.ForConfigMap("velero", "change-storage-classs"). + ObjectMeta(builder.WithLabels("velero.io/plugin-config", "true", "velero.io/change-storage-class", "RestoreItemAction")). + Data("storageclass-1", "storageclass-2"). + Result(), + want: builder.ForPersistentVolumeClaim("velero", "pvc-1").Result(), + }, + { + name: "when persistent volume's storage class has no mapping in the config map, the item is returned as-is", + pvOrPVC: builder.ForPersistentVolume("pv-1").StorageClass("storageclass-1").Result(), + configMap: builder.ForConfigMap("velero", "change-storage-classs"). + ObjectMeta(builder.WithLabels("velero.io/plugin-config", "true", "velero.io/change-storage-class", "RestoreItemAction")). + Data("storageclass-3", "storageclass-4"). + Result(), + want: builder.ForPersistentVolume("pv-1").StorageClass("storageclass-1").Result(), + }, + { + name: "when persistent volume claim's storage class has no mapping in the config map, the item is returned as-is", + pvOrPVC: builder.ForPersistentVolumeClaim("velero", "pvc-1").StorageClass("storageclass-1").Result(), + configMap: builder.ForConfigMap("velero", "change-storage-classs"). + ObjectMeta(builder.WithLabels("velero.io/plugin-config", "true", "velero.io/change-storage-class", "RestoreItemAction")). + Data("storageclass-3", "storageclass-4"). + Result(), + want: builder.ForPersistentVolumeClaim("velero", "pvc-1").StorageClass("storageclass-1").Result(), + }, + { + name: "when persistent volume's storage class is mapped to a nonexistent storage class, an error is returned", + pvOrPVC: builder.ForPersistentVolume("pv-1").StorageClass("storageclass-1").Result(), + configMap: builder.ForConfigMap("velero", "change-storage-classs"). + ObjectMeta(builder.WithLabels("velero.io/plugin-config", "true", "velero.io/change-storage-class", "RestoreItemAction")). + Data("storageclass-1", "nonexistent-storage-class"). + Result(), + wantErr: errors.New("error getting storage class nonexistent-storage-class from API: storageclasses.storage.k8s.io \"nonexistent-storage-class\" not found"), + }, + { + name: "when persistent volume claim's storage class is mapped to a nonexistent storage class, an error is returned", + pvOrPVC: builder.ForPersistentVolumeClaim("velero", "pvc-1").StorageClass("storageclass-1").Result(), + configMap: builder.ForConfigMap("velero", "change-storage-classs"). + ObjectMeta(builder.WithLabels("velero.io/plugin-config", "true", "velero.io/change-storage-class", "RestoreItemAction")). + Data("storageclass-1", "nonexistent-storage-class"). + Result(), + wantErr: errors.New("error getting storage class nonexistent-storage-class from API: storageclasses.storage.k8s.io \"nonexistent-storage-class\" not found"), + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + clientset := fake.NewSimpleClientset() + a := NewChangeStorageClassAction( + logrus.StandardLogger(), + clientset.CoreV1().ConfigMaps("velero"), + clientset.StorageV1().StorageClasses(), + ) + + // set up test data + if tc.configMap != nil { + _, err := clientset.CoreV1().ConfigMaps(tc.configMap.Namespace).Create(tc.configMap) + require.NoError(t, err) + } + + if tc.storageClass != nil { + _, err := clientset.StorageV1().StorageClasses().Create(tc.storageClass) + require.NoError(t, err) + } + + unstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pvOrPVC) + require.NoError(t, err) + + input := &velero.RestoreItemActionExecuteInput{ + Item: &unstructured.Unstructured{ + Object: unstructuredMap, + }, + } + + // execute method under test + res, err := a.Execute(input) + + // validate for both error and non-error cases + switch { + case tc.wantErr != nil: + assert.EqualError(t, err, tc.wantErr.Error()) + default: + assert.NoError(t, err) + + wantUnstructured, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.want) + require.NoError(t, err) + + assert.Equal(t, &unstructured.Unstructured{Object: wantUnstructured}, res.UpdatedItem) + } + }) + } +} diff --git a/pkg/restore/job_action_test.go b/pkg/restore/job_action_test.go index 360a9d77bfa..12fa1131efc 100644 --- a/pkg/restore/job_action_test.go +++ b/pkg/restore/job_action_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "github.com/heptio/velero/pkg/plugin/velero" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) func TestJobActionExecute(t *testing.T) { diff --git a/pkg/restore/merge_service_account_test.go b/pkg/restore/merge_service_account_test.go index 5783cdbae15..5b306ad7771 100644 --- a/pkg/restore/merge_service_account_test.go +++ b/pkg/restore/merge_service_account_test.go @@ -25,7 +25,7 @@ import ( corev1api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) var mergedServiceAccountsBenchmarkResult *unstructured.Unstructured diff --git a/pkg/restore/pod_action_test.go b/pkg/restore/pod_action_test.go index 5590e9f202e..7789db52153 100644 --- a/pkg/restore/pod_action_test.go +++ b/pkg/restore/pod_action_test.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "github.com/heptio/velero/pkg/plugin/velero" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) func TestPodActionExecute(t *testing.T) { diff --git a/pkg/restore/pv_restorer_test.go b/pkg/restore/pv_restorer_test.go index 014fa233414..2f218ba15db 100644 --- a/pkg/restore/pv_restorer_test.go +++ b/pkg/restore/pv_restorer_test.go @@ -26,14 +26,19 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/builder" cloudprovidermocks "github.com/heptio/velero/pkg/cloudprovider/mocks" "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" informers "github.com/heptio/velero/pkg/generated/informers/externalversions" "github.com/heptio/velero/pkg/plugin/velero" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" "github.com/heptio/velero/pkg/volume" ) +func defaultBackup() *builder.BackupBuilder { + return builder.ForBackup(api.DefaultNamespace, "backup-1") +} + func TestExecutePVAction_NoSnapshotRestores(t *testing.T) { tests := []struct { name string @@ -48,46 +53,46 @@ func TestExecutePVAction_NoSnapshotRestores(t *testing.T) { { name: "no name should error", obj: NewTestUnstructured().WithMetadata().Unstructured, - restore: velerotest.NewDefaultTestRestore().Restore, + restore: builder.ForRestore(api.DefaultNamespace, "").Result(), expectedErr: true, }, { name: "no spec should error", obj: NewTestUnstructured().WithName("pv-1").Unstructured, - restore: velerotest.NewDefaultTestRestore().Restore, + restore: builder.ForRestore(api.DefaultNamespace, "").Result(), expectedErr: true, }, { name: "ensure spec.claimRef is deleted", obj: NewTestUnstructured().WithName("pv-1").WithAnnotations("a", "b").WithSpec("claimRef", "someOtherField").Unstructured, - restore: velerotest.NewDefaultTestRestore().WithRestorePVs(false).Restore, - backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(api.BackupPhaseInProgress).Backup, + restore: builder.ForRestore(api.DefaultNamespace, "").RestorePVs(false).Result(), + backup: defaultBackup().Phase(api.BackupPhaseInProgress).Result(), expectedRes: NewTestUnstructured().WithAnnotations("a", "b").WithName("pv-1").WithSpec("someOtherField").Unstructured, }, { name: "ensure spec.storageClassName is retained", obj: NewTestUnstructured().WithName("pv-1").WithAnnotations("a", "b").WithSpec("storageClassName", "someOtherField").Unstructured, - restore: velerotest.NewDefaultTestRestore().WithRestorePVs(false).Restore, - backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(api.BackupPhaseInProgress).Backup, + restore: builder.ForRestore(api.DefaultNamespace, "").RestorePVs(false).Result(), + backup: defaultBackup().Phase(api.BackupPhaseInProgress).Result(), expectedRes: NewTestUnstructured().WithAnnotations("a", "b").WithName("pv-1").WithSpec("storageClassName", "someOtherField").Unstructured, }, { name: "if backup.spec.snapshotVolumes is false, ignore restore.spec.restorePVs and return early", obj: NewTestUnstructured().WithName("pv-1").WithAnnotations("a", "b").WithSpec("claimRef", "storageClassName", "someOtherField").Unstructured, - restore: velerotest.NewDefaultTestRestore().WithRestorePVs(true).Restore, - backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(api.BackupPhaseInProgress).WithSnapshotVolumes(false).Backup, + restore: builder.ForRestore(api.DefaultNamespace, "").RestorePVs(true).Result(), + backup: defaultBackup().Phase(api.BackupPhaseInProgress).SnapshotVolumes(false).Result(), expectedRes: NewTestUnstructured().WithName("pv-1").WithAnnotations("a", "b").WithSpec("storageClassName", "someOtherField").Unstructured, }, { name: "restore.spec.restorePVs=false, return early", obj: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured, - restore: velerotest.NewDefaultTestRestore().WithRestorePVs(false).Restore, - backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(api.BackupPhaseInProgress).Backup, + restore: builder.ForRestore(api.DefaultNamespace, "").RestorePVs(false).Result(), + backup: defaultBackup().Phase(api.BackupPhaseInProgress).Result(), volumeSnapshots: []*volume.Snapshot{ newSnapshot("pv-1", "loc-1", "gp", "az-1", "snap-1", 1000), }, locations: []*api.VolumeSnapshotLocation{ - velerotest.NewTestVolumeSnapshotLocation().WithName("loc-1").VolumeSnapshotLocation, + builder.ForVolumeSnapshotLocation(api.DefaultNamespace, "loc-1").Result(), }, expectedErr: false, expectedRes: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured, @@ -95,11 +100,11 @@ func TestExecutePVAction_NoSnapshotRestores(t *testing.T) { { name: "volumeSnapshots is empty: return early", obj: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured, - restore: velerotest.NewDefaultTestRestore().WithRestorePVs(true).Restore, - backup: velerotest.NewTestBackup().WithName("backup-1").Backup, + restore: builder.ForRestore(api.DefaultNamespace, "").RestorePVs(true).Result(), + backup: defaultBackup().Result(), locations: []*api.VolumeSnapshotLocation{ - velerotest.NewTestVolumeSnapshotLocation().WithName("loc-1").VolumeSnapshotLocation, - velerotest.NewTestVolumeSnapshotLocation().WithName("loc-2").VolumeSnapshotLocation, + builder.ForVolumeSnapshotLocation(api.DefaultNamespace, "loc-1").Result(), + builder.ForVolumeSnapshotLocation(api.DefaultNamespace, "loc-2").Result(), }, volumeSnapshots: []*volume.Snapshot{}, expectedRes: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured, @@ -107,11 +112,11 @@ func TestExecutePVAction_NoSnapshotRestores(t *testing.T) { { name: "volumeSnapshots doesn't have a snapshot for PV: return early", obj: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured, - restore: velerotest.NewDefaultTestRestore().WithRestorePVs(true).Restore, - backup: velerotest.NewTestBackup().WithName("backup-1").Backup, + restore: builder.ForRestore(api.DefaultNamespace, "").RestorePVs(true).Result(), + backup: defaultBackup().Result(), locations: []*api.VolumeSnapshotLocation{ - velerotest.NewTestVolumeSnapshotLocation().WithName("loc-1").VolumeSnapshotLocation, - velerotest.NewTestVolumeSnapshotLocation().WithName("loc-2").VolumeSnapshotLocation, + builder.ForVolumeSnapshotLocation(api.DefaultNamespace, "loc-1").Result(), + builder.ForVolumeSnapshotLocation(api.DefaultNamespace, "loc-2").Result(), }, volumeSnapshots: []*volume.Snapshot{ newSnapshot("non-matching-pv-1", "loc-1", "type-1", "az-1", "snap-1", 1), @@ -173,11 +178,11 @@ func TestExecutePVAction_SnapshotRestores(t *testing.T) { { name: "backup with a matching volume.Snapshot for PV executes restore", obj: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured, - restore: velerotest.NewDefaultTestRestore().WithRestorePVs(true).Restore, - backup: velerotest.NewTestBackup().WithName("backup-1").Backup, + restore: builder.ForRestore(api.DefaultNamespace, "").RestorePVs(true).Result(), + backup: defaultBackup().Result(), locations: []*api.VolumeSnapshotLocation{ - velerotest.NewTestVolumeSnapshotLocation().WithName("loc-1").WithProvider("provider-1").VolumeSnapshotLocation, - velerotest.NewTestVolumeSnapshotLocation().WithName("loc-2").WithProvider("provider-2").VolumeSnapshotLocation, + builder.ForVolumeSnapshotLocation(api.DefaultNamespace, "loc-1").Provider("provider-1").Result(), + builder.ForVolumeSnapshotLocation(api.DefaultNamespace, "loc-2").Provider("provider-2").Result(), }, volumeSnapshots: []*volume.Snapshot{ newSnapshot("pv-1", "loc-1", "type-1", "az-1", "snap-1", 1), @@ -254,3 +259,94 @@ func int64Ptr(val int) *int64 { r := int64(val) return &r } + +type testUnstructured struct { + *unstructured.Unstructured +} + +func NewTestUnstructured() *testUnstructured { + obj := &testUnstructured{ + Unstructured: &unstructured.Unstructured{ + Object: make(map[string]interface{}), + }, + } + + return obj +} + +func (obj *testUnstructured) WithMetadata(fields ...string) *testUnstructured { + return obj.withMap("metadata", fields...) +} + +func (obj *testUnstructured) WithSpec(fields ...string) *testUnstructured { + if _, found := obj.Object["spec"]; found { + panic("spec already set - you probably didn't mean to do this twice!") + } + return obj.withMap("spec", fields...) +} + +func (obj *testUnstructured) WithStatus(fields ...string) *testUnstructured { + return obj.withMap("status", fields...) +} + +func (obj *testUnstructured) WithMetadataField(field string, value interface{}) *testUnstructured { + return obj.withMapEntry("metadata", field, value) +} + +func (obj *testUnstructured) WithSpecField(field string, value interface{}) *testUnstructured { + return obj.withMapEntry("spec", field, value) +} + +func (obj *testUnstructured) WithStatusField(field string, value interface{}) *testUnstructured { + return obj.withMapEntry("status", field, value) +} + +func (obj *testUnstructured) WithAnnotations(fields ...string) *testUnstructured { + vals := map[string]string{} + for _, field := range fields { + vals[field] = "foo" + } + + return obj.WithAnnotationValues(vals) +} + +func (obj *testUnstructured) WithAnnotationValues(fieldVals map[string]string) *testUnstructured { + annotations := make(map[string]interface{}) + for field, val := range fieldVals { + annotations[field] = val + } + + obj = obj.WithMetadataField("annotations", annotations) + + return obj +} + +func (obj *testUnstructured) WithName(name string) *testUnstructured { + return obj.WithMetadataField("name", name) +} + +func (obj *testUnstructured) withMap(name string, fields ...string) *testUnstructured { + m := make(map[string]interface{}) + obj.Object[name] = m + + for _, field := range fields { + m[field] = "foo" + } + + return obj +} + +func (obj *testUnstructured) withMapEntry(mapName, field string, value interface{}) *testUnstructured { + var m map[string]interface{} + + if res, ok := obj.Unstructured.Object[mapName]; !ok { + m = make(map[string]interface{}) + obj.Unstructured.Object[mapName] = m + } else { + m = res.(map[string]interface{}) + } + + m[field] = value + + return obj +} diff --git a/pkg/restore/restic_restore_action.go b/pkg/restore/restic_restore_action.go index 051b2c0c015..964255634d1 100644 --- a/pkg/restore/restic_restore_action.go +++ b/pkg/restore/restic_restore_action.go @@ -28,24 +28,33 @@ import ( "k8s.io/apimachinery/pkg/runtime" corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/builder" "github.com/heptio/velero/pkg/buildinfo" + velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" "github.com/heptio/velero/pkg/plugin/framework" "github.com/heptio/velero/pkg/plugin/velero" "github.com/heptio/velero/pkg/restic" "github.com/heptio/velero/pkg/util/kube" ) -const defaultImageBase = "gcr.io/heptio-images/velero-restic-restore-helper" +const ( + defaultImageBase = "gcr.io/heptio-images/velero-restic-restore-helper" + defaultCPURequestLimit = "100m" + defaultMemRequestLimit = "128Mi" +) type ResticRestoreAction struct { - logger logrus.FieldLogger - client corev1client.ConfigMapInterface + logger logrus.FieldLogger + client corev1client.ConfigMapInterface + podVolumeBackupClient velerov1client.PodVolumeBackupInterface } -func NewResticRestoreAction(logger logrus.FieldLogger, client corev1client.ConfigMapInterface) *ResticRestoreAction { +func NewResticRestoreAction(logger logrus.FieldLogger, client corev1client.ConfigMapInterface, podVolumeBackupClient velerov1client.PodVolumeBackupInterface) *ResticRestoreAction { return &ResticRestoreAction{ - logger: logger, - client: client, + logger: logger, + client: client, + podVolumeBackupClient: podVolumeBackupClient, } } @@ -66,13 +75,23 @@ func (a *ResticRestoreAction) Execute(input *velero.RestoreItemActionExecuteInpu log := a.logger.WithField("pod", kube.NamespaceAndName(&pod)) - volumeSnapshots := restic.GetPodSnapshotAnnotations(&pod) + opts := restic.NewPodVolumeBackupListOptions(input.Restore.Spec.BackupName) + podVolumeBackupList, err := a.podVolumeBackupClient.List(opts) + if err != nil { + return nil, errors.WithStack(err) + } + + var podVolumeBackups []*velerov1api.PodVolumeBackup + for i := range podVolumeBackupList.Items { + podVolumeBackups = append(podVolumeBackups, &podVolumeBackupList.Items[i]) + } + volumeSnapshots := restic.GetVolumeBackupsForPod(podVolumeBackups, &pod) if len(volumeSnapshots) == 0 { - log.Debug("No restic snapshot ID annotations found") + log.Debug("No restic backups found for pod") return velero.NewRestoreItemActionExecuteOutput(input.Item), nil } - log.Info("Restic snapshot ID annotations found") + log.Info("Restic backups for pod found") // TODO we might want/need to get plugin config at the top of this method at some point; for now, wait // until we know we're doing a restore before getting config. @@ -85,38 +104,30 @@ func (a *ResticRestoreAction) Execute(input *velero.RestoreItemActionExecuteInpu image := getImage(log, config) log.Infof("Using image %q", image) - initContainer := corev1.Container{ - Name: restic.InitContainer, - Image: image, - Args: []string{string(input.Restore.UID)}, - Env: []corev1.EnvVar{ - { - Name: "POD_NAMESPACE", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.namespace", - }, - }, - }, - { - Name: "POD_NAME", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - FieldPath: "metadata.name", - }, - }, - }, - }, + cpuRequest, memRequest := getResourceRequests(log, config) + cpuLimit, memLimit := getResourceLimits(log, config) + + resourceReqs, err := kube.ParseResourceRequirements(cpuRequest, memRequest, cpuLimit, memLimit) + if err != nil { + log.Errorf("Using default resource values, couldn't parse resource requirements: %s.", err) + resourceReqs, _ = kube.ParseResourceRequirements( + defaultCPURequestLimit, defaultMemRequestLimit, // requests + defaultCPURequestLimit, defaultMemRequestLimit, // limits + ) } + initContainerBuilder := newResticInitContainerBuilder(image, string(input.Restore.UID)) + initContainerBuilder.Resources(&resourceReqs) + for volumeName := range volumeSnapshots { - mount := corev1.VolumeMount{ + mount := &corev1.VolumeMount{ Name: volumeName, MountPath: "/restores/" + volumeName, } - initContainer.VolumeMounts = append(initContainer.VolumeMounts, mount) + initContainerBuilder.VolumeMounts(mount) } + initContainer := *initContainerBuilder.Result() if len(pod.Spec.InitContainers) == 0 || pod.Spec.InitContainers[0].Name != restic.InitContainer { pod.Spec.InitContainers = append([]corev1.Container{initContainer}, pod.Spec.InitContainers...) } else { @@ -162,6 +173,28 @@ func getImage(log logrus.FieldLogger, config *corev1.ConfigMap) string { } } +// getResourceRequests extracts the CPU and memory requests from a ConfigMap. +// The 0 values are valid if the keys are not present +func getResourceRequests(log logrus.FieldLogger, config *corev1.ConfigMap) (string, string) { + if config == nil { + log.Debug("No config found for plugin") + return "", "" + } + + return config.Data["cpuRequest"], config.Data["memRequest"] +} + +// getResourceLimits extracts the CPU and memory limits from a ConfigMap. +// The 0 values are valid if the keys are not present +func getResourceLimits(log logrus.FieldLogger, config *corev1.ConfigMap) (string, string) { + if config == nil { + log.Debug("No config found for plugin") + return "", "" + } + + return config.Data["cpuLimit"], config.Data["memLimit"] +} + // TODO eventually this can move to pkg/plugin/framework since it'll be used across multiple // plugins. func getPluginConfig(kind framework.PluginKind, name string, client corev1client.ConfigMapInterface) (*corev1.ConfigMap, error) { @@ -191,6 +224,29 @@ func getPluginConfig(kind framework.PluginKind, name string, client corev1client return &list.Items[0], nil } +func newResticInitContainerBuilder(image, restoreUID string) *builder.ContainerBuilder { + return builder.ForContainer(restic.InitContainer, image). + Args(restoreUID). + Env([]*corev1.EnvVar{ + { + Name: "POD_NAMESPACE", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.namespace", + }, + }, + }, + { + Name: "POD_NAME", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + }...) +} + func initContainerImage(imageBase string) string { tag := buildinfo.Version if tag == "" { diff --git a/pkg/restore/restic_restore_action_test.go b/pkg/restore/restic_restore_action_test.go index 4318771ddcd..938ef58e3e7 100644 --- a/pkg/restore/restic_restore_action_test.go +++ b/pkg/restore/restic_restore_action_test.go @@ -20,16 +20,26 @@ import ( "fmt" "testing" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" - corev1 "k8s.io/api/core/v1" + "github.com/stretchr/testify/require" + corev1api "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes/fake" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/builder" "github.com/heptio/velero/pkg/buildinfo" - velerotest "github.com/heptio/velero/pkg/util/test" + velerofake "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" + "github.com/heptio/velero/pkg/plugin/velero" + velerotest "github.com/heptio/velero/pkg/test" + "github.com/heptio/velero/pkg/util/kube" ) func TestGetImage(t *testing.T) { - configMapWithData := func(key, val string) *corev1.ConfigMap { - return &corev1.ConfigMap{ + configMapWithData := func(key, val string) *corev1api.ConfigMap { + return &corev1api.ConfigMap{ Data: map[string]string{ key: val, }, @@ -44,7 +54,7 @@ func TestGetImage(t *testing.T) { tests := []struct { name string - configMap *corev1.ConfigMap + configMap *corev1api.ConfigMap want string }{ { @@ -80,3 +90,134 @@ func TestGetImage(t *testing.T) { }) } } + +// TestResticRestoreActionExecute tests the restic restore item action plugin's Execute method. +func TestResticRestoreActionExecute(t *testing.T) { + resourceReqs, _ := kube.ParseResourceRequirements( + defaultCPURequestLimit, defaultMemRequestLimit, // requests + defaultCPURequestLimit, defaultMemRequestLimit, // limits + ) + + var ( + restoreName = "my-restore" + backupName = "test-backup" + veleroNs = "velero" + ) + + tests := []struct { + name string + pod *corev1api.Pod + podVolumeBackups []*velerov1api.PodVolumeBackup + want *corev1api.Pod + }{ + { + name: "Restoring pod with no other initContainers adds the restic initContainer", + pod: builder.ForPod("ns-1", "my-pod").ObjectMeta( + builder.WithAnnotations("snapshot.velero.io/myvol", "")). + Result(), + want: builder.ForPod("ns-1", "my-pod"). + ObjectMeta( + builder.WithAnnotations("snapshot.velero.io/myvol", "")). + InitContainers( + newResticInitContainerBuilder(initContainerImage(defaultImageBase), ""). + Resources(&resourceReqs). + VolumeMounts(builder.ForVolumeMount("myvol", "/restores/myvol").Result()).Result()). + Result(), + }, + { + name: "Restoring pod with other initContainers adds the restic initContainer as the first one", + pod: builder.ForPod("ns-1", "my-pod"). + ObjectMeta( + builder.WithAnnotations("snapshot.velero.io/myvol", "")). + InitContainers(builder.ForContainer("first-container", "").Result()). + Result(), + want: builder.ForPod("ns-1", "my-pod"). + ObjectMeta( + builder.WithAnnotations("snapshot.velero.io/myvol", "")). + InitContainers( + newResticInitContainerBuilder(initContainerImage(defaultImageBase), ""). + Resources(&resourceReqs). + VolumeMounts(builder.ForVolumeMount("myvol", "/restores/myvol").Result()).Result(), + builder.ForContainer("first-container", "").Result()). + Result(), + }, + { + name: "Restoring pod with other initContainers adds the restic initContainer as the first one using PVB to identify the volumes and not annotations", + pod: builder.ForPod("ns-1", "my-pod"). + Volumes( + builder.ForVolume("vol-1").PersistentVolumeClaimSource("pvc-1").Result(), + builder.ForVolume("vol-2").PersistentVolumeClaimSource("pvc-2").Result(), + ). + ObjectMeta( + builder.WithAnnotations("snapshot.velero.io/not-used", "")). + InitContainers(builder.ForContainer("first-container", "").Result()). + Result(), + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup(veleroNs, "pvb-1"). + PodName("my-pod"). + Volume("vol-1"). + ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, backupName)). + Result(), + builder.ForPodVolumeBackup(veleroNs, "pvb-2"). + PodName("my-pod"). + Volume("vol-2"). + ObjectMeta(builder.WithLabels(velerov1api.BackupNameLabel, backupName)). + Result(), + }, + want: builder.ForPod("ns-1", "my-pod"). + Volumes( + builder.ForVolume("vol-1").PersistentVolumeClaimSource("pvc-1").Result(), + builder.ForVolume("vol-2").PersistentVolumeClaimSource("pvc-2").Result(), + ). + ObjectMeta( + builder.WithAnnotations("snapshot.velero.io/not-used", "")). + InitContainers( + newResticInitContainerBuilder(initContainerImage(defaultImageBase), ""). + Resources(&resourceReqs). + VolumeMounts(builder.ForVolumeMount("vol-1", "/restores/vol-1").Result(), builder.ForVolumeMount("vol-2", "/restores/vol-2").Result()).Result(), + builder.ForContainer("first-container", "").Result()). + Result(), + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + clientset := fake.NewSimpleClientset() + clientsetVelero := velerofake.NewSimpleClientset() + + for _, podVolumeBackup := range tc.podVolumeBackups { + _, err := clientsetVelero.VeleroV1().PodVolumeBackups(veleroNs).Create(podVolumeBackup) + require.NoError(t, err) + } + + unstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.pod) + require.NoError(t, err) + + input := &velero.RestoreItemActionExecuteInput{ + Item: &unstructured.Unstructured{ + Object: unstructuredMap, + }, + Restore: builder.ForRestore(veleroNs, restoreName). + Backup(backupName). + Phase(velerov1api.RestorePhaseInProgress). + Result(), + } + + a := NewResticRestoreAction( + logrus.StandardLogger(), + clientset.CoreV1().ConfigMaps(veleroNs), + clientsetVelero.VeleroV1().PodVolumeBackups(veleroNs), + ) + + // method under test + res, err := a.Execute(input) + assert.NoError(t, err) + + wantUnstructured, err := runtime.DefaultUnstructuredConverter.ToUnstructured(tc.want) + require.NoError(t, err) + + assert.Equal(t, &unstructured.Unstructured{Object: wantUnstructured}, res.UpdatedItem) + }) + } + +} diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index 0bff67aa6f2..2f89dc04d61 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -25,9 +25,11 @@ import ( "os" "path/filepath" "sort" + "strings" "time" "github.com/pkg/errors" + uuid "github.com/satori/go.uuid" "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" @@ -42,7 +44,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - api "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" "github.com/heptio/velero/pkg/client" "github.com/heptio/velero/pkg/discovery" listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" @@ -50,6 +52,7 @@ import ( "github.com/heptio/velero/pkg/label" "github.com/heptio/velero/pkg/plugin/velero" "github.com/heptio/velero/pkg/restic" + "github.com/heptio/velero/pkg/util/boolptr" "github.com/heptio/velero/pkg/util/collections" "github.com/heptio/velero/pkg/util/filesystem" "github.com/heptio/velero/pkg/util/kube" @@ -61,14 +64,20 @@ type VolumeSnapshotterGetter interface { GetVolumeSnapshotter(name string) (velero.VolumeSnapshotter, error) } +type Request struct { + *velerov1api.Restore + + Log logrus.FieldLogger + Backup *velerov1api.Backup + PodVolumeBackups []*velerov1api.PodVolumeBackup + VolumeSnapshots []*volume.Snapshot + BackupReader io.Reader +} + // Restorer knows how to restore a backup. type Restorer interface { // Restore restores the backup data from backupReader, returning warnings and errors. - Restore(log logrus.FieldLogger, - restore *api.Restore, - backup *api.Backup, - volumeSnapshots []*volume.Snapshot, - backupReader io.Reader, + Restore(req Request, actions []velero.RestoreItemAction, snapshotLocationLister listers.VolumeSnapshotLocationLister, volumeSnapshotterGetter VolumeSnapshotterGetter, @@ -85,6 +94,7 @@ type kubernetesRestorer struct { resourceTerminatingTimeout time.Duration resourcePriorities []string fileSystem filesystem.Interface + pvRenamer func(string) string logger logrus.FieldLogger } @@ -166,6 +176,7 @@ func NewKubernetesRestorer( resourceTerminatingTimeout: resourceTerminatingTimeout, resourcePriorities: resourcePriorities, logger: logger, + pvRenamer: func(string) string { return "velero-clone-" + uuid.NewV4().String() }, fileSystem: filesystem.NewFileSystem(), }, nil } @@ -174,11 +185,7 @@ func NewKubernetesRestorer( // and using data from the provided backup/backup reader. Returns a warnings and errors RestoreResult, // respectively, summarizing info about the restore. func (kr *kubernetesRestorer) Restore( - log logrus.FieldLogger, - restore *api.Restore, - backup *api.Backup, - volumeSnapshots []*volume.Snapshot, - backupReader io.Reader, + req Request, actions []velero.RestoreItemAction, snapshotLocationLister listers.VolumeSnapshotLocationLister, volumeSnapshotterGetter VolumeSnapshotterGetter, @@ -187,7 +194,7 @@ func (kr *kubernetesRestorer) Restore( // Nothing Selector, i.e. a selector that matches nothing. We want // a selector that matches everything. This can be accomplished by // passing a non-nil empty LabelSelector. - ls := restore.Spec.LabelSelector + ls := req.Restore.Spec.LabelSelector if ls == nil { ls = &metav1.LabelSelector{} } @@ -198,22 +205,27 @@ func (kr *kubernetesRestorer) Restore( } // get resource includes-excludes - resourceIncludesExcludes := getResourceIncludesExcludes(kr.discoveryHelper, restore.Spec.IncludedResources, restore.Spec.ExcludedResources) - prioritizedResources, err := prioritizeResources(kr.discoveryHelper, kr.resourcePriorities, resourceIncludesExcludes, log) + resourceIncludesExcludes := getResourceIncludesExcludes(kr.discoveryHelper, req.Restore.Spec.IncludedResources, req.Restore.Spec.ExcludedResources) + prioritizedResources, err := prioritizeResources(kr.discoveryHelper, kr.resourcePriorities, resourceIncludesExcludes, req.Log) if err != nil { return Result{}, Result{Velero: []string{err.Error()}} } + // get namespace includes-excludes + namespaceIncludesExcludes := collections.NewIncludesExcludes(). + Includes(req.Restore.Spec.IncludedNamespaces...). + Excludes(req.Restore.Spec.ExcludedNamespaces...) + resolvedActions, err := resolveActions(actions, kr.discoveryHelper) if err != nil { return Result{}, Result{Velero: []string{err.Error()}} } podVolumeTimeout := kr.resticTimeout - if val := restore.Annotations[api.PodVolumeOperationTimeoutAnnotation]; val != "" { + if val := req.Restore.Annotations[velerov1api.PodVolumeOperationTimeoutAnnotation]; val != "" { parsed, err := time.ParseDuration(val) if err != nil { - log.WithError(errors.WithStack(err)).Errorf("Unable to parse pod volume timeout annotation %s, using server value.", val) + req.Log.WithError(errors.WithStack(err)).Errorf("Unable to parse pod volume timeout annotation %s, using server value.", val) } else { podVolumeTimeout = parsed } @@ -224,29 +236,31 @@ func (kr *kubernetesRestorer) Restore( var resticRestorer restic.Restorer if kr.resticRestorerFactory != nil { - resticRestorer, err = kr.resticRestorerFactory.NewRestorer(ctx, restore) + resticRestorer, err = kr.resticRestorerFactory.NewRestorer(ctx, req.Restore) if err != nil { return Result{}, Result{Velero: []string{err.Error()}} } } pvRestorer := &pvRestorer{ - logger: log, - backup: backup, - snapshotVolumes: backup.Spec.SnapshotVolumes, - restorePVs: restore.Spec.RestorePVs, - volumeSnapshots: volumeSnapshots, + logger: req.Log, + backup: req.Backup, + snapshotVolumes: req.Backup.Spec.SnapshotVolumes, + restorePVs: req.Restore.Spec.RestorePVs, + volumeSnapshots: req.VolumeSnapshots, volumeSnapshotterGetter: volumeSnapshotterGetter, snapshotLocationLister: snapshotLocationLister, } restoreCtx := &context{ - backup: backup, - backupReader: backupReader, - restore: restore, + backup: req.Backup, + backupReader: req.BackupReader, + restore: req.Restore, + resourceIncludesExcludes: resourceIncludesExcludes, + namespaceIncludesExcludes: namespaceIncludesExcludes, prioritizedResources: prioritizedResources, selector: selector, - log: log, + log: req.Log, dynamicFactory: kr.dynamicFactory, fileSystem: kr.fileSystem, namespaceClient: kr.namespaceClient, @@ -255,15 +269,17 @@ func (kr *kubernetesRestorer) Restore( resticRestorer: resticRestorer, pvsToProvision: sets.NewString(), pvRestorer: pvRestorer, - volumeSnapshots: volumeSnapshots, + volumeSnapshots: req.VolumeSnapshots, + podVolumeBackups: req.PodVolumeBackups, resourceTerminatingTimeout: kr.resourceTerminatingTimeout, extractor: &backupExtractor{ - log: log, + log: req.Log, fileSystem: kr.fileSystem, }, - applicableActions: make(map[schema.GroupResource][]resolvedAction), - resourceClients: make(map[resourceClientKey]client.Dynamic), - restoredItems: make(map[velero.ResourceIdentifier]struct{}), + resourceClients: make(map[resourceClientKey]client.Dynamic), + restoredItems: make(map[velero.ResourceIdentifier]struct{}), + renamedPVs: make(map[string]string), + pvRenamer: kr.pvRenamer, } return restoreCtx.execute() @@ -331,10 +347,12 @@ func resolveActions(actions []velero.RestoreItemAction, helper discovery.Helper) } type context struct { - backup *api.Backup + backup *velerov1api.Backup backupReader io.Reader - restore *api.Restore + restore *velerov1api.Restore restoreDir string + resourceIncludesExcludes *collections.IncludesExcludes + namespaceIncludesExcludes *collections.IncludesExcludes prioritizedResources []schema.GroupResource selector labels.Selector log logrus.FieldLogger @@ -348,11 +366,13 @@ type context struct { pvsToProvision sets.String pvRestorer PVRestorer volumeSnapshots []*volume.Snapshot + podVolumeBackups []*velerov1api.PodVolumeBackup resourceTerminatingTimeout time.Duration extractor *backupExtractor - applicableActions map[schema.GroupResource][]resolvedAction resourceClients map[resourceClientKey]client.Dynamic restoredItems map[velero.ResourceIdentifier]struct{} + renamedPVs map[string]string + pvRenamer func(string) string } type resourceClientKey struct { @@ -381,12 +401,8 @@ func (ctx *context) execute() (Result, Result) { func (ctx *context) restoreFromDir() (Result, Result) { warnings, errs := Result{}, Result{} - namespaceFilter := collections.NewIncludesExcludes(). - Includes(ctx.restore.Spec.IncludedNamespaces...). - Excludes(ctx.restore.Spec.ExcludedNamespaces...) - // Make sure the top level "resources" dir exists: - resourcesDir := filepath.Join(ctx.restoreDir, api.ResourcesDir) + resourcesDir := filepath.Join(ctx.restoreDir, velerov1api.ResourcesDir) rde, err := ctx.fileSystem.DirExists(resourcesDir) if err != nil { addVeleroError(&errs, err) @@ -425,7 +441,7 @@ func (ctx *context) restoreFromDir() (Result, Result) { resourcePath := filepath.Join(resourcesDir, rscDir.Name()) - clusterSubDir := filepath.Join(resourcePath, api.ClusterScopedDir) + clusterSubDir := filepath.Join(resourcePath, velerov1api.ClusterScopedDir) clusterSubDirExists, err := ctx.fileSystem.DirExists(clusterSubDir) if err != nil { addVeleroError(&errs, err) @@ -438,7 +454,7 @@ func (ctx *context) restoreFromDir() (Result, Result) { continue } - nsSubDir := filepath.Join(resourcePath, api.NamespaceScopedDir) + nsSubDir := filepath.Join(resourcePath, velerov1api.NamespaceScopedDir) nsSubDirExists, err := ctx.fileSystem.DirExists(nsSubDir) if err != nil { addVeleroError(&errs, err) @@ -461,7 +477,7 @@ func (ctx *context) restoreFromDir() (Result, Result) { nsName := nsDir.Name() nsPath := filepath.Join(nsSubDir, nsName) - if !namespaceFilter.ShouldInclude(nsName) { + if !ctx.namespaceIncludesExcludes.ShouldInclude(nsName) { ctx.log.Infof("Skipping namespace %s", nsName) continue } @@ -513,9 +529,9 @@ func (ctx *context) restoreFromDir() (Result, Result) { func getItemFilePath(rootDir, groupResource, namespace, name string) string { switch namespace { case "": - return filepath.Join(rootDir, api.ResourcesDir, groupResource, api.ClusterScopedDir, name+".json") + return filepath.Join(rootDir, velerov1api.ResourcesDir, groupResource, velerov1api.ClusterScopedDir, name+".json") default: - return filepath.Join(rootDir, api.ResourcesDir, groupResource, api.NamespaceScopedDir, namespace, name+".json") + return filepath.Join(rootDir, velerov1api.ResourcesDir, groupResource, velerov1api.NamespaceScopedDir, namespace, name+".json") } } @@ -588,10 +604,6 @@ func addToResult(r *Result, ns string, e error) { } func (ctx *context) getApplicableActions(groupResource schema.GroupResource, namespace string) []resolvedAction { - if actions, ok := ctx.applicableActions[groupResource]; ok { - return actions - } - var actions []resolvedAction for _, action := range ctx.actions { if !action.resourceIncludesExcludes.ShouldInclude(groupResource.String()) { @@ -602,10 +614,13 @@ func (ctx *context) getApplicableActions(groupResource schema.GroupResource, nam continue } + if namespace == "" && !action.namespaceIncludesExcludes.IncludeEverything() { + continue + } + actions = append(actions, action) } - ctx.applicableActions[groupResource] = actions return actions } @@ -733,7 +748,7 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (R fullPath := filepath.Join(resourcePath, file.Name()) obj, err := ctx.unmarshal(fullPath) if err != nil { - addToResult(&errs, namespace, fmt.Errorf("error decoding %q: %v", fullPath, err)) + addToResult(&errs, namespace, fmt.Errorf("error decoding %q: %v", strings.Replace(fullPath, ctx.restoreDir+"/", "", -1), err)) continue } @@ -789,6 +804,42 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc warnings, errs := Result{}, Result{} resourceID := getResourceID(groupResource, namespace, obj.GetName()) + // Check if group/resource should be restored. We need to do this here since + // this method may be getting called for an additional item which is a group/resource + // that's excluded. + if !ctx.resourceIncludesExcludes.ShouldInclude(groupResource.String()) { + ctx.log.WithFields(logrus.Fields{ + "namespace": obj.GetNamespace(), + "name": obj.GetName(), + "groupResource": groupResource.String(), + }).Info("Not restoring item because resource is excluded") + return warnings, errs + } + + // Check if namespace/cluster-scoped resource should be restored. We need + // to do this here since this method may be getting called for an additional + // item which is in a namespace that's excluded, or which is cluster-scoped + // and should be excluded. + if namespace != "" { + if !ctx.namespaceIncludesExcludes.ShouldInclude(namespace) { + ctx.log.WithFields(logrus.Fields{ + "namespace": obj.GetNamespace(), + "name": obj.GetName(), + "groupResource": groupResource.String(), + }).Info("Not restoring item because namespace is excluded") + return warnings, errs + } + } else { + if boolptr.IsSetToFalse(ctx.restore.Spec.IncludeClusterResources) { + ctx.log.WithFields(logrus.Fields{ + "namespace": obj.GetNamespace(), + "name": obj.GetName(), + "groupResource": groupResource.String(), + }).Info("Not restoring item because it's cluster-scoped") + return warnings, errs + } + } + // make a copy of object retrieved from backup // to make it available unchanged inside restore actions itemFromBackup := obj.DeepCopy() @@ -830,41 +881,84 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc } if groupResource == kuberesource.PersistentVolumes { - var hasSnapshot bool + switch { + case hasSnapshot(name, ctx.volumeSnapshots): + shouldRenamePV, err := shouldRenamePV(ctx, obj, resourceClient) + if err != nil { + addToResult(&errs, namespace, err) + return warnings, errs + } + + var shouldRestoreSnapshot bool + if !shouldRenamePV { + // Check if the PV exists in the cluster before attempting to create + // a volume from the snapshot, in order to avoid orphaned volumes (GH #609) + shouldRestoreSnapshot, err = ctx.shouldRestore(name, resourceClient) + if err != nil { + addToResult(&errs, namespace, errors.Wrapf(err, "error waiting on in-cluster persistentvolume %s", name)) + return warnings, errs + } + } else { + // if we're renaming the PV, we're going to give it a new random name, + // so we can assume it doesn't already exist in the cluster and therefore + // we should proceed with restoring from snapshot. + shouldRestoreSnapshot = true + } - for _, snapshot := range ctx.volumeSnapshots { - if snapshot.Spec.PersistentVolumeName == name { - hasSnapshot = true - break + if shouldRestoreSnapshot { + // even if we're renaming the PV, obj still has the old name here, because the pvRestorer + // uses the original name to look up metadata about the snapshot. + ctx.log.Infof("Restoring persistent volume from snapshot.") + updatedObj, err := ctx.pvRestorer.executePVAction(obj) + if err != nil { + addToResult(&errs, namespace, fmt.Errorf("error executing PVAction for %s: %v", resourceID, err)) + return warnings, errs + } + obj = updatedObj + } + + if shouldRenamePV { + // give obj a new name, and record the mapping between the old and new names + oldName := obj.GetName() + newName := ctx.pvRenamer(oldName) + + ctx.renamedPVs[oldName] = newName + obj.SetName(newName) + + // add the original PV name as an annotation + annotations := obj.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + annotations["velero.io/original-pv-name"] = oldName + obj.SetAnnotations(annotations) } - } - if !hasSnapshot && hasDeleteReclaimPolicy(obj.Object) { - ctx.log.Infof("Not restoring PV because it doesn't have a snapshot and its reclaim policy is Delete.") + case hasResticBackup(obj, ctx): + ctx.log.Infof("Dynamically re-provisioning persistent volume because it has a restic backup to be restored.") ctx.pvsToProvision.Insert(name) + + // return early because we don't want to restore the PV itself, we want to dynamically re-provision it. return warnings, errs - } - // Check if the PV exists in the cluster before attempting to create - // a volume from the snapshot, in order to avoid orphaned volumes (GH #609) - shouldRestoreSnapshot, err := ctx.shouldRestore(name, resourceClient) - if err != nil { - addToResult(&errs, namespace, errors.Wrapf(err, "error waiting on in-cluster persistentvolume %s", name)) + case hasDeleteReclaimPolicy(obj.Object): + ctx.log.Infof("Dynamically re-provisioning persistent volume because it doesn't have a snapshot and its reclaim policy is Delete.") + ctx.pvsToProvision.Insert(name) + + // return early because we don't want to restore the PV itself, we want to dynamically re-provision it. return warnings, errs - } - // PV's existence will be recorded later. Just skip the volume restore logic. - if shouldRestoreSnapshot { - // restore the PV from snapshot (if applicable) + default: + ctx.log.Infof("Restoring persistent volume as-is because it doesn't have a snapshot and its reclaim policy is not Delete.") + + // we call the pvRestorer here to clear out the PV's claimRef, so it can be re-claimed + // when its PVC is restored. updatedObj, err := ctx.pvRestorer.executePVAction(obj) if err != nil { addToResult(&errs, namespace, fmt.Errorf("error executing PVAction for %s: %v", resourceID, err)) return warnings, errs } obj = updatedObj - } else if err != nil { - addToResult(&errs, namespace, fmt.Errorf("error checking existence for PV %s: %v", name, err)) - return warnings, errs } } @@ -963,6 +1057,14 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc delete(annotations, "pv.kubernetes.io/bound-by-controller") obj.SetAnnotations(annotations) } + + if newName, ok := ctx.renamedPVs[pvc.Spec.VolumeName]; ok { + ctx.log.Infof("Updating persistent volume claim %s/%s to reference renamed persistent volume (%s -> %s)", namespace, name, pvc.Spec.VolumeName, newName) + if err := unstructured.SetNestedField(obj.Object, newName, "spec", "volumeName"); err != nil { + addToResult(&errs, namespace, err) + return warnings, errs + } + } } // necessary because we may have remapped the namespace @@ -997,7 +1099,7 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc // We know the object from the cluster won't have the backup/restore name labels, so // copy them from the object we attempted to restore. labels := obj.GetLabels() - addRestoreLabels(fromCluster, labels[api.RestoreNameLabel], labels[api.BackupNameLabel]) + addRestoreLabels(fromCluster, labels[velerov1api.RestoreNameLabel], labels[velerov1api.BackupNameLabel]) if !equality.Semantic.DeepEqual(fromCluster, obj) { switch groupResource { @@ -1045,28 +1147,115 @@ func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource sc return warnings, errs } - if groupResource == kuberesource.Pods && len(restic.GetPodSnapshotAnnotations(obj)) > 0 { - if ctx.resticRestorer == nil { - ctx.log.Warn("No restic restorer, not restoring pod's volumes") - } else { - ctx.globalWaitGroup.GoErrorSlice(func() []error { - pod := new(v1.Pod) - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdObj.UnstructuredContent(), &pod); err != nil { - ctx.log.WithError(err).Error("error converting unstructured pod") - return []error{err} - } + if groupResource == kuberesource.Pods && len(restic.GetVolumeBackupsForPod(ctx.podVolumeBackups, obj)) > 0 { + restorePodVolumeBackups(ctx, createdObj, originalNamespace) + } - if errs := ctx.resticRestorer.RestorePodVolumes(ctx.restore, pod, originalNamespace, ctx.backup.Spec.StorageLocation, ctx.log); errs != nil { - ctx.log.WithError(kubeerrs.NewAggregate(errs)).Error("unable to successfully complete restic restores of pod's volumes") - return errs - } + return warnings, errs +} + +// shouldRenamePV returns a boolean indicating whether a persistent volume should be given a new name +// before being restored, or an error if this cannot be determined. A persistent volume will be +// given a new name if and only if (a) a PV with the original name already exists in-cluster, and +// (b) in the backup, the PV is claimed by a PVC in a namespace that's being remapped during the +// restore. +func shouldRenamePV(ctx *context, obj *unstructured.Unstructured, client client.Dynamic) (bool, error) { + if len(ctx.restore.Spec.NamespaceMapping) == 0 { + ctx.log.Debugf("Persistent volume does not need to be renamed because restore is not remapping any namespaces") + return false, nil + } + + pv := new(v1.PersistentVolume) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, pv); err != nil { + return false, errors.Wrapf(err, "error converting persistent volume to structured") + } + + if pv.Spec.ClaimRef == nil { + ctx.log.Debugf("Persistent volume does not need to be renamed because it's not claimed") + return false, nil + } + + if _, ok := ctx.restore.Spec.NamespaceMapping[pv.Spec.ClaimRef.Namespace]; !ok { + ctx.log.Debugf("Persistent volume does not need to be renamed because it's not claimed by a PVC in a namespace that's being remapped") + return false, nil + } + + _, err := client.Get(pv.Name, metav1.GetOptions{}) + switch { + case apierrors.IsNotFound(err): + ctx.log.Debugf("Persistent volume does not need to be renamed because it does not exist in the cluster") + return false, nil + case err != nil: + return false, errors.Wrapf(err, "error checking if persistent volume exists in the cluster") + } + + // no error returned: the PV was found in-cluster, so we need to rename it + return true, nil +} + +// restorePodVolumeBackups restores the PodVolumeBackups for the given restored pod +func restorePodVolumeBackups(ctx *context, createdObj *unstructured.Unstructured, originalNamespace string) { + if ctx.resticRestorer == nil { + ctx.log.Warn("No restic restorer, not restoring pod's volumes") + } else { + ctx.globalWaitGroup.GoErrorSlice(func() []error { + pod := new(v1.Pod) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdObj.UnstructuredContent(), &pod); err != nil { + ctx.log.WithError(err).Error("error converting unstructured pod") + return []error{err} + } + + data := restic.RestoreData{ + Restore: ctx.restore, + Pod: pod, + PodVolumeBackups: ctx.podVolumeBackups, + SourceNamespace: originalNamespace, + BackupLocation: ctx.backup.Spec.StorageLocation, + } + if errs := ctx.resticRestorer.RestorePodVolumes(data); errs != nil { + ctx.log.WithError(kubeerrs.NewAggregate(errs)).Error("unable to successfully complete restic restores of pod's volumes") + return errs + } - return nil - }) + return nil + }) + } +} + +func hasSnapshot(pvName string, snapshots []*volume.Snapshot) bool { + for _, snapshot := range snapshots { + if snapshot.Spec.PersistentVolumeName == pvName { + return true } } - return warnings, errs + return false +} + +func hasResticBackup(unstructuredPV *unstructured.Unstructured, ctx *context) bool { + if len(ctx.podVolumeBackups) == 0 { + return false + } + + pv := new(v1.PersistentVolume) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstructuredPV.Object, pv); err != nil { + ctx.log.WithError(err).Warnf("Unable to convert PV from unstructured to structured") + return false + } + + if pv.Spec.ClaimRef == nil { + return false + } + + var found bool + for _, pvb := range ctx.podVolumeBackups { + if pvb.Spec.Pod.Namespace == pv.Spec.ClaimRef.Namespace && pvb.GetAnnotations()[restic.PVCNameAnnotation] == pv.Spec.ClaimRef.Name { + found = true + break + } + } + + return found } func hasDeleteReclaimPolicy(obj map[string]interface{}) bool { @@ -1107,24 +1296,12 @@ func addRestoreLabels(obj metav1.Object, restoreName, backupName string) { labels = make(map[string]string) } - labels[api.BackupNameLabel] = label.GetValidName(backupName) - labels[api.RestoreNameLabel] = label.GetValidName(restoreName) + labels[velerov1api.BackupNameLabel] = label.GetValidName(backupName) + labels[velerov1api.RestoreNameLabel] = label.GetValidName(restoreName) obj.SetLabels(labels) } -// hasControllerOwner returns whether or not an object has a controller -// owner ref. Used to identify whether or not an object should be explicitly -// recreated during a restore. -func hasControllerOwner(refs []metav1.OwnerReference) bool { - for _, ref := range refs { - if ref.Controller != nil && *ref.Controller { - return true - } - } - return false -} - // isCompleted returns whether or not an object is considered completed. // Used to identify whether or not an object should be restored. Only Jobs or Pods are considered func isCompleted(obj *unstructured.Unstructured, groupResource schema.GroupResource) (bool, error) { diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index c7ff983465b..83f0a675669 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -1,5 +1,5 @@ /* -Copyright 2017, 2019 the Velero contributors. +Copyright 2019 the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,1270 +17,2426 @@ limitations under the License. package restore import ( + "archive/tar" + "bytes" + "compress/gzip" + ctx "context" "encoding/json" + "fmt" + "io" + "sort" "testing" "time" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - v1 "k8s.io/api/core/v1" - k8serrors "k8s.io/apimachinery/pkg/api/errors" + corev1api "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/kubernetes/scheme" - corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - - api "github.com/heptio/velero/pkg/apis/velero/v1" - pkgclient "github.com/heptio/velero/pkg/client" - "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" - informers "github.com/heptio/velero/pkg/generated/informers/externalversions" + discoveryfake "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/dynamic" + kubefake "k8s.io/client-go/kubernetes/fake" + kubetesting "k8s.io/client-go/testing" + + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/builder" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/discovery" + velerov1informers "github.com/heptio/velero/pkg/generated/informers/externalversions" "github.com/heptio/velero/pkg/kuberesource" "github.com/heptio/velero/pkg/plugin/velero" + "github.com/heptio/velero/pkg/restic" + resticmocks "github.com/heptio/velero/pkg/restic/mocks" + "github.com/heptio/velero/pkg/test" + testutil "github.com/heptio/velero/pkg/test" "github.com/heptio/velero/pkg/util/collections" - "github.com/heptio/velero/pkg/util/logging" - velerotest "github.com/heptio/velero/pkg/util/test" + "github.com/heptio/velero/pkg/util/encode" + kubeutil "github.com/heptio/velero/pkg/util/kube" "github.com/heptio/velero/pkg/volume" ) -func TestPrioritizeResources(t *testing.T) { +// TestRestoreResourceFiltering runs restores with different combinations +// of resource filters (included/excluded resources, included/excluded +// namespaces, label selectors, "include cluster resources" flag), and +// verifies that the set of items created in the API are correct. +// Validation is done by looking at the namespaces/names of the items in +// the API; contents are not checked. +func TestRestoreResourceFiltering(t *testing.T) { tests := []struct { name string - apiResources map[string][]string - priorities []string - includes []string - excludes []string - expected []string + restore *velerov1api.Restore + backup *velerov1api.Backup + apiResources []*test.APIResource + tarball io.Reader + want map[*test.APIResource][]string }{ { - name: "priorities & ordering are correctly applied", - apiResources: map[string][]string{ - "v1": {"aaa", "bbb", "configmaps", "ddd", "namespaces", "ooo", "pods", "sss"}, + name: "no filters restores everything", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + test.PVs(), + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"}, + test.PVs(): {"/pv-1", "/pv-2"}, }, - priorities: []string{"namespaces", "configmaps", "pods"}, - includes: []string{"*"}, - expected: []string{"namespaces", "configmaps", "pods", "aaa", "bbb", "ddd", "ooo", "sss"}, }, { - name: "includes are correctly applied", - apiResources: map[string][]string{ - "v1": {"aaa", "bbb", "configmaps", "ddd", "namespaces", "ooo", "pods", "sss"}, + name: "included resources filter only restores resources of those types", + restore: defaultRestore().IncludedResources("pods").Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + test.PVs(), + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"}, }, - priorities: []string{"namespaces", "configmaps", "pods"}, - includes: []string{"namespaces", "aaa", "sss"}, - expected: []string{"namespaces", "aaa", "sss"}, }, { - name: "excludes are correctly applied", - apiResources: map[string][]string{ - "v1": {"aaa", "bbb", "configmaps", "ddd", "namespaces", "ooo", "pods", "sss"}, + name: "excluded resources filter only restores resources not of those types", + restore: defaultRestore().ExcludedResources("pvs").Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + test.PVs(), }, - priorities: []string{"namespaces", "configmaps", "pods"}, - includes: []string{"*"}, - excludes: []string{"ooo", "pods"}, - expected: []string{"namespaces", "configmaps", "aaa", "bbb", "ddd", "sss"}, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"}, + }, + }, + { + name: "included namespaces filter only restores resources in those namespaces", + restore: defaultRestore().IncludedNamespaces("ns-1").Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ). + addItems("deployments.apps", + builder.ForDeployment("ns-1", "deploy-1").Result(), + builder.ForDeployment("ns-2", "deploy-2").Result(), + ). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + test.Deployments(), + test.PVs(), + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1"}, + test.Deployments(): {"ns-1/deploy-1"}, + }, + }, + { + name: "excluded namespaces filter only restores resources not in those namespaces", + restore: defaultRestore().ExcludedNamespaces("ns-2").Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ). + addItems("deployments.apps", + builder.ForDeployment("ns-1", "deploy-1").Result(), + builder.ForDeployment("ns-2", "deploy-2").Result(), + ). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + test.Deployments(), + test.PVs(), + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1"}, + test.Deployments(): {"ns-1/deploy-1"}, + }, + }, + { + name: "IncludeClusterResources=false only restores namespaced resources", + restore: defaultRestore().IncludeClusterResources(false).Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ). + addItems("deployments.apps", + builder.ForDeployment("ns-1", "deploy-1").Result(), + builder.ForDeployment("ns-2", "deploy-2").Result(), + ). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + test.Deployments(), + test.PVs(), + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"}, + test.Deployments(): {"ns-1/deploy-1", "ns-2/deploy-2"}, + }, + }, + { + name: "label selector only restores matching resources", + restore: defaultRestore().LabelSelector(&metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}).Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", + builder.ForPod("ns-1", "pod-1").ObjectMeta(builder.WithLabels("a", "b")).Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ). + addItems("deployments.apps", + builder.ForDeployment("ns-1", "deploy-1").Result(), + builder.ForDeployment("ns-2", "deploy-2").ObjectMeta(builder.WithLabels("a", "b")).Result(), + ). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").ObjectMeta(builder.WithLabels("a", "b")).Result(), + builder.ForPersistentVolume("pv-2").ObjectMeta(builder.WithLabels("a", "c")).Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + test.Deployments(), + test.PVs(), + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1"}, + test.Deployments(): {"ns-2/deploy-2"}, + test.PVs(): {"/pv-1"}, + }, + }, + { + name: "should include cluster-scoped resources if restoring subset of namespaces and IncludeClusterResources=true", + restore: defaultRestore().IncludedNamespaces("ns-1").IncludeClusterResources(true).Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ). + addItems("deployments.apps", + builder.ForDeployment("ns-1", "deploy-1").Result(), + builder.ForDeployment("ns-2", "deploy-2").Result(), + ). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + test.Deployments(), + test.PVs(), + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1"}, + test.Deployments(): {"ns-1/deploy-1"}, + test.PVs(): {"/pv-1", "/pv-2"}, + }, + }, + { + name: "should not include cluster-scoped resources if restoring subset of namespaces and IncludeClusterResources=false", + restore: defaultRestore().IncludedNamespaces("ns-1").IncludeClusterResources(false).Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ). + addItems("deployments.apps", + builder.ForDeployment("ns-1", "deploy-1").Result(), + builder.ForDeployment("ns-2", "deploy-2").Result(), + ). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + test.Deployments(), + test.PVs(), + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1"}, + test.Deployments(): {"ns-1/deploy-1"}, + }, + }, + { + name: "should include cluster-scoped resources if restoring all namespaces and IncludeClusterResources=true", + restore: defaultRestore().IncludeClusterResources(true).Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ). + addItems("deployments.apps", + builder.ForDeployment("ns-1", "deploy-1").Result(), + builder.ForDeployment("ns-2", "deploy-2").Result(), + ). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + test.Deployments(), + test.PVs(), + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"}, + test.Deployments(): {"ns-1/deploy-1", "ns-2/deploy-2"}, + test.PVs(): {"/pv-1", "/pv-2"}, + }, + }, + { + name: "should not include cluster-scoped resources if restoring all namespaces and IncludeClusterResources=false", + restore: defaultRestore().IncludeClusterResources(false).Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ). + addItems("deployments.apps", + builder.ForDeployment("ns-1", "deploy-1").Result(), + builder.ForDeployment("ns-2", "deploy-2").Result(), + ). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + test.Deployments(), + test.PVs(), + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"}, + test.Deployments(): {"ns-1/deploy-1", "ns-2/deploy-2"}, + }, + }, + { + name: "when a wildcard and a specific resource are included, the wildcard takes precedence", + restore: defaultRestore().IncludedResources("*", "pods").Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ). + addItems("deployments.apps", + builder.ForDeployment("ns-1", "deploy-1").Result(), + builder.ForDeployment("ns-2", "deploy-2").Result(), + ). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + test.Deployments(), + test.PVs(), + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"}, + test.Deployments(): {"ns-1/deploy-1", "ns-2/deploy-2"}, + test.PVs(): {"/pv-1", "/pv-2"}, + }, + }, + { + name: "wildcard excludes are ignored", + restore: defaultRestore().ExcludedResources("*").Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ). + addItems("deployments.apps", + builder.ForDeployment("ns-1", "deploy-1").Result(), + builder.ForDeployment("ns-2", "deploy-2").Result(), + ). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + test.Deployments(), + test.PVs(), + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"}, + test.Deployments(): {"ns-1/deploy-1", "ns-2/deploy-2"}, + test.PVs(): {"/pv-1", "/pv-2"}, + }, + }, + { + name: "unresolvable included resources are ignored", + restore: defaultRestore().IncludedResources("pods", "unresolvable").Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ). + addItems("deployments.apps", + builder.ForDeployment("ns-1", "deploy-1").Result(), + builder.ForDeployment("ns-2", "deploy-2").Result(), + ). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + test.Deployments(), + test.PVs(), + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"}, + }, + }, + { + name: "unresolvable excluded resources are ignored", + restore: defaultRestore().ExcludedResources("deployments", "unresolvable").Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ). + addItems("deployments.apps", + builder.ForDeployment("ns-1", "deploy-1").Result(), + builder.ForDeployment("ns-2", "deploy-2").Result(), + ). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + test.Deployments(), + test.PVs(), + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"}, + test.PVs(): {"/pv-1", "/pv-2"}, + }, + }, + { + name: "mirror pods are not restored", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t).addItems("pods", builder.ForPod("ns-1", "pod-1").ObjectMeta(builder.WithAnnotations(corev1api.MirrorPodAnnotationKey, "foo")).Result()).done(), + apiResources: []*test.APIResource{test.Pods()}, + want: map[*test.APIResource][]string{test.Pods(): {}}, + }, + { + name: "service accounts are restored", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t).addItems("serviceaccounts", builder.ForServiceAccount("ns-1", "sa-1").Result()).done(), + apiResources: []*test.APIResource{test.ServiceAccounts()}, + want: map[*test.APIResource][]string{test.ServiceAccounts(): {"ns-1/sa-1"}}, }, } - logger := velerotest.NewLogger() - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - var helperResourceList []*metav1.APIResourceList + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newHarness(t) - for gv, resources := range test.apiResources { - resourceList := &metav1.APIResourceList{GroupVersion: gv} - for _, resource := range resources { - resourceList.APIResources = append(resourceList.APIResources, metav1.APIResource{Name: resource}) - } - helperResourceList = append(helperResourceList, resourceList) + for _, r := range tc.apiResources { + h.DiscoveryClient.WithAPIResource(r) + } + require.NoError(t, h.restorer.discoveryHelper.Refresh()) + + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: nil, + VolumeSnapshots: nil, + BackupReader: tc.tarball, } + warnings, errs := h.restorer.Restore( + data, + nil, // actions + nil, // snapshot location lister + nil, // volume snapshotter getter + ) + + assertEmptyResults(t, warnings, errs) + assertAPIContents(t, h, tc.want) + }) + } +} - helper := velerotest.NewFakeDiscoveryHelper(true, nil) - helper.ResourceList = helperResourceList +// TestRestoreNamespaceMapping runs restores with namespace mappings specified, +// and verifies that the set of items created in the API are in the correct +// namespaces. Validation is done by looking at the namespaces/names of the items +// in the API; contents are not checked. +func TestRestoreNamespaceMapping(t *testing.T) { + tests := []struct { + name string + restore *velerov1api.Restore + backup *velerov1api.Backup + apiResources []*test.APIResource + tarball io.Reader + want map[*test.APIResource][]string + }{ + { + name: "namespace mappings are applied", + restore: defaultRestore().NamespaceMappings("ns-1", "mapped-ns-1", "ns-2", "mapped-ns-2").Result(), + backup: defaultBackup().Result(), + apiResources: []*test.APIResource{ + test.Pods(), + }, + tarball: newTarWriter(t). + addItems("pods", + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + builder.ForPod("ns-3", "pod-3").Result(), + ). + done(), + want: map[*test.APIResource][]string{ + test.Pods(): {"mapped-ns-1/pod-1", "mapped-ns-2/pod-2", "ns-3/pod-3"}, + }, + }, + } - includesExcludes := collections.NewIncludesExcludes().Includes(test.includes...).Excludes(test.excludes...) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newHarness(t) - result, err := prioritizeResources(helper, test.priorities, includesExcludes, logger) - if err != nil { - t.Fatalf("unexpected error: %v", err) + for _, r := range tc.apiResources { + h.DiscoveryClient.WithAPIResource(r) } - - require.Equal(t, len(test.expected), len(result)) - - for i := range result { - if e, a := test.expected[i], result[i].Resource; e != a { - t.Errorf("index %d, expected %s, got %s", i, e, a) - } + require.NoError(t, h.restorer.discoveryHelper.Refresh()) + + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: nil, + VolumeSnapshots: nil, + BackupReader: tc.tarball, } + warnings, errs := h.restorer.Restore( + data, + nil, // actions + nil, // snapshot location lister + nil, // volume snapshotter getter + ) + + assertEmptyResults(t, warnings, errs) + assertAPIContents(t, h, tc.want) }) } } -func TestRestoreNamespaceFiltering(t *testing.T) { +// TestRestoreResourcePriorities runs restores with resource priorities specified, +// and verifies that the set of items created in the API are created in the expected +// order. Validation is done by adding a Reactor to the fake dynamic client that records +// resource identifiers as they're created, and comparing that to the expected order. +func TestRestoreResourcePriorities(t *testing.T) { tests := []struct { - name string - fileSystem *velerotest.FakeFileSystem - baseDir string - restore *api.Restore - expectedReadDirs []string - prioritizedResources []schema.GroupResource + name string + restore *velerov1api.Restore + backup *velerov1api.Backup + apiResources []*test.APIResource + tarball io.Reader + resourcePriorities []string }{ { - name: "namespacesToRestore having * restores all namespaces", - fileSystem: velerotest.NewFakeFileSystem().WithDirectories("bak/resources/nodes/cluster", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"), - baseDir: "bak", - restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}}}, - expectedReadDirs: []string{"bak/resources", "bak/resources/nodes/cluster", "bak/resources/secrets/namespaces", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"}, - prioritizedResources: []schema.GroupResource{ - {Resource: "nodes"}, - {Resource: "secrets"}, + name: "resources are restored according to the specified resource priorities", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", + builder.ForPod("ns-1", "pod-1").Result(), + builder.ForPod("ns-2", "pod-2").Result(), + ). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").Result(), + builder.ForPersistentVolume("pv-2").Result(), + ). + addItems("deployments.apps", + builder.ForDeployment("ns-1", "deploy-1").Result(), + builder.ForDeployment("ns-2", "deploy-2").Result(), + ). + addItems("serviceaccounts", + builder.ForServiceAccount("ns-1", "sa-1").Result(), + builder.ForServiceAccount("ns-2", "sa-2").Result(), + ). + addItems("persistentvolumeclaims", + builder.ForPersistentVolumeClaim("ns-1", "pvc-1").Result(), + builder.ForPersistentVolumeClaim("ns-2", "pvc-2").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + test.PVs(), + test.Deployments(), + test.ServiceAccounts(), }, + resourcePriorities: []string{"persistentvolumes", "serviceaccounts", "pods", "deployments.apps"}, }, + } + + for _, tc := range tests { + h := newHarness(t) + h.restorer.resourcePriorities = tc.resourcePriorities + + recorder := &createRecorder{t: t} + h.DynamicClient.PrependReactor("create", "*", recorder.reactor()) + + for _, r := range tc.apiResources { + h.DiscoveryClient.WithAPIResource(r) + } + require.NoError(t, h.restorer.discoveryHelper.Refresh()) + + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: nil, + VolumeSnapshots: nil, + BackupReader: tc.tarball, + } + warnings, errs := h.restorer.Restore( + data, + nil, // actions + nil, // snapshot location lister + nil, // volume snapshotter getter + ) + + assertEmptyResults(t, warnings, errs) + assertResourceCreationOrder(t, tc.resourcePriorities, recorder.resources) + } +} + +// TestInvalidTarballContents runs restores for tarballs that are invalid in some way, and +// verifies that the set of items created in the API and the errors returned are correct. +// Validation is done by looking at the namespaces/names of the items in the API and the +// Result objects returned from the restorer. +func TestInvalidTarballContents(t *testing.T) { + tests := []struct { + name string + restore *velerov1api.Restore + backup *velerov1api.Backup + apiResources []*test.APIResource + tarball io.Reader + want map[*test.APIResource][]string + wantErrs Result + }{ { - name: "namespacesToRestore properly filters", - fileSystem: velerotest.NewFakeFileSystem().WithDirectories("bak/resources/nodes/cluster", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"), - baseDir: "bak", - restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"b", "c"}}}, - expectedReadDirs: []string{"bak/resources", "bak/resources/nodes/cluster", "bak/resources/secrets/namespaces", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"}, - prioritizedResources: []schema.GroupResource{ - {Resource: "nodes"}, - {Resource: "secrets"}, + name: "empty tarball returns an error", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + done(), + wantErrs: Result{ + Velero: []string{"backup does not contain top level resources directory"}, }, }, { - name: "namespacesToRestore properly filters with exclusion filter", - fileSystem: velerotest.NewFakeFileSystem().WithDirectories("bak/resources/nodes/cluster", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"), - baseDir: "bak", - restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}, ExcludedNamespaces: []string{"a"}}}, - expectedReadDirs: []string{"bak/resources", "bak/resources/nodes/cluster", "bak/resources/secrets/namespaces", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"}, - prioritizedResources: []schema.GroupResource{ - {Resource: "nodes"}, - {Resource: "secrets"}, + name: "invalid JSON is reported as an error and restore continues", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + add("resources/pods/namespaces/ns-1/pod-1.json", []byte("invalid JSON")). + addItems("pods", + builder.ForPod("ns-1", "pod-2").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), }, - }, - { - name: "namespacesToRestore properly filters with inclusion & exclusion filters", - fileSystem: velerotest.NewFakeFileSystem().WithDirectories("bak/resources/nodes/cluster", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"), - baseDir: "bak", - restore: &api.Restore{ - Spec: api.RestoreSpec{ - IncludedNamespaces: []string{"a", "b", "c"}, - ExcludedNamespaces: []string{"b"}, - }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-2"}, }, - expectedReadDirs: []string{"bak/resources", "bak/resources/nodes/cluster", "bak/resources/secrets/namespaces", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/c"}, - prioritizedResources: []schema.GroupResource{ - {Resource: "nodes"}, - {Resource: "secrets"}, + wantErrs: Result{ + Namespaces: map[string][]string{ + "ns-1": {"error decoding \"resources/pods/namespaces/ns-1/pod-1.json\": invalid character 'i' looking for beginning of value"}, + }, }, }, } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - log := velerotest.NewLogger() + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newHarness(t) - nsClient := &velerotest.FakeNamespaceClient{} - - ctx := &context{ - restore: test.restore, - namespaceClient: nsClient, - fileSystem: test.fileSystem, - log: log, - prioritizedResources: test.prioritizedResources, - restoreDir: test.baseDir, + for _, r := range tc.apiResources { + h.DiscoveryClient.WithAPIResource(r) } - - nsClient.On("Get", mock.Anything, metav1.GetOptions{}).Return(&v1.Namespace{}, nil) - - warnings, errors := ctx.restoreFromDir() - - assert.Empty(t, warnings.Velero) - assert.Empty(t, warnings.Cluster) - assert.Empty(t, warnings.Namespaces) - assert.Empty(t, errors.Velero) - assert.Empty(t, errors.Cluster) - assert.Empty(t, errors.Namespaces) - assert.Equal(t, test.expectedReadDirs, test.fileSystem.ReadDirCalls) + require.NoError(t, h.restorer.discoveryHelper.Refresh()) + + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: nil, + VolumeSnapshots: nil, + BackupReader: tc.tarball, + } + warnings, errs := h.restorer.Restore( + data, + nil, // actions + nil, // snapshot location lister + nil, // volume snapshotter getter + ) + + assertEmptyResults(t, warnings) + assert.Equal(t, tc.wantErrs, errs) + assertAPIContents(t, h, tc.want) }) } } -func TestRestorePriority(t *testing.T) { +// TestRestoreItems runs restores of specific items and validates that they are created +// with the expected metadata/spec/status in the API. +func TestRestoreItems(t *testing.T) { tests := []struct { - name string - fileSystem *velerotest.FakeFileSystem - restore *api.Restore - baseDir string - prioritizedResources []schema.GroupResource - expectedErrors Result - expectedReadDirs []string + name string + restore *velerov1api.Restore + backup *velerov1api.Backup + apiResources []*test.APIResource + tarball io.Reader + want []*test.APIResource }{ { - name: "cluster test", - fileSystem: velerotest.NewFakeFileSystem().WithDirectory("bak/resources/a/cluster").WithDirectory("bak/resources/c/cluster"), - baseDir: "bak", - restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}}}, - prioritizedResources: []schema.GroupResource{ - {Resource: "a"}, - {Resource: "b"}, - {Resource: "c"}, - }, - expectedReadDirs: []string{"bak/resources", "bak/resources/a/cluster", "bak/resources/c/cluster"}, - }, - { - name: "resource priorities are applied", - fileSystem: velerotest.NewFakeFileSystem().WithDirectory("bak/resources/a/cluster").WithDirectory("bak/resources/c/cluster"), - restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}}}, - baseDir: "bak", - prioritizedResources: []schema.GroupResource{ - {Resource: "c"}, - {Resource: "b"}, - {Resource: "a"}, - }, - expectedReadDirs: []string{"bak/resources", "bak/resources/c/cluster", "bak/resources/a/cluster"}, - }, - { - name: "basic namespace", - fileSystem: velerotest.NewFakeFileSystem().WithDirectory("bak/resources/a/namespaces/ns-1").WithDirectory("bak/resources/c/namespaces/ns-1"), - restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}}}, - baseDir: "bak", - prioritizedResources: []schema.GroupResource{ - {Resource: "a"}, - {Resource: "b"}, - {Resource: "c"}, - }, - expectedReadDirs: []string{"bak/resources", "bak/resources/a/namespaces", "bak/resources/a/namespaces/ns-1", "bak/resources/c/namespaces", "bak/resources/c/namespaces/ns-1"}, - }, - { - name: "error in a single resource doesn't terminate restore immediately, but is returned", - fileSystem: velerotest.NewFakeFileSystem(). - WithFile("bak/resources/a/namespaces/ns-1/invalid-json.json", []byte("invalid json")). - WithDirectory("bak/resources/c/namespaces/ns-1"), - restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}}}, - baseDir: "bak", - prioritizedResources: []schema.GroupResource{ - {Resource: "a"}, - {Resource: "b"}, - {Resource: "c"}, - }, - expectedErrors: Result{ - Namespaces: map[string][]string{ - "ns-1": {"error decoding \"bak/resources/a/namespaces/ns-1/invalid-json.json\": invalid character 'i' looking for beginning of value"}, - }, + name: "metadata other than namespace/name/labels/annotations gets removed", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", + builder.ForPod("ns-1", "pod-1"). + ObjectMeta( + builder.WithLabels("key-1", "val-1"), + builder.WithAnnotations("key-1", "val-1"), + builder.WithClusterName("cluster-1"), + builder.WithFinalizers("finalizer-1"), + ). + Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + }, + want: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1"). + ObjectMeta( + builder.WithLabels("key-1", "val-1", "velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"), + builder.WithAnnotations("key-1", "val-1"), + ). + Result(), + ), + }, + }, + { + name: "status gets removed", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", + &corev1api.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-1", + Name: "pod-1", + }, + Status: corev1api.PodStatus{ + Message: "a non-empty status", + }, + }, + ). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + }, + want: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").ObjectMeta(builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1")).Result(), + ), + }, + }, + { + name: "object gets labeled with full backup and restore names when they're both shorter than 63 characters", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", builder.ForPod("ns-1", "pod-1").Result()). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + }, + want: []*test.APIResource{ + test.Pods(builder.ForPod("ns-1", "pod-1").ObjectMeta(builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1")).Result()), + }, + }, + { + name: "object gets labeled with full backup and restore names when they're both equal to 63 characters", + restore: builder.ForRestore(velerov1api.DefaultNamespace, "the-really-long-kube-service-name-that-is-exactly-63-characters"). + Backup("the-really-long-kube-service-name-that-is-exactly-63-characters"). + Result(), + backup: builder.ForBackup(velerov1api.DefaultNamespace, "the-really-long-kube-service-name-that-is-exactly-63-characters").Result(), + tarball: newTarWriter(t). + addItems("pods", builder.ForPod("ns-1", "pod-1").Result()). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + }, + want: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1"). + ObjectMeta( + builder.WithLabels( + "velero.io/backup-name", "the-really-long-kube-service-name-that-is-exactly-63-characters", + "velero.io/restore-name", "the-really-long-kube-service-name-that-is-exactly-63-characters", + ), + ).Result(), + ), + }, + }, + { + name: "object gets labeled with shortened backup and restore names when they're both longer than 63 characters", + restore: builder.ForRestore(velerov1api.DefaultNamespace, "the-really-long-kube-service-name-that-is-much-greater-than-63-characters"). + Backup("the-really-long-kube-service-name-that-is-much-greater-than-63-characters"). + Result(), + backup: builder.ForBackup(velerov1api.DefaultNamespace, "the-really-long-kube-service-name-that-is-much-greater-than-63-characters").Result(), + tarball: newTarWriter(t). + addItems("pods", builder.ForPod("ns-1", "pod-1").Result()). + done(), + apiResources: []*test.APIResource{ + test.Pods(), + }, + want: []*test.APIResource{ + test.Pods(builder.ForPod("ns-1", "pod-1"). + ObjectMeta( + builder.WithLabels( + "velero.io/backup-name", "the-really-long-kube-service-name-that-is-much-greater-th8a11b3", + "velero.io/restore-name", "the-really-long-kube-service-name-that-is-much-greater-th8a11b3", + ), + ). + Result(), + ), + }, + }, + { + name: "no error when service account already exists in cluster and is identical to the backed up one", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("serviceaccounts", builder.ForServiceAccount("ns-1", "sa-1").Result()). + done(), + apiResources: []*test.APIResource{ + test.ServiceAccounts(builder.ForServiceAccount("ns-1", "sa-1").Result()), + }, + want: []*test.APIResource{ + test.ServiceAccounts(builder.ForServiceAccount("ns-1", "sa-1").Result()), + }, + }, + { + name: "service account secrets and image pull secrets are restored when service account already exists in cluster", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("serviceaccounts", &corev1api.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-1", + Name: "sa-1", + }, + Secrets: []corev1api.ObjectReference{{Name: "secret-1"}}, + ImagePullSecrets: []corev1api.LocalObjectReference{{Name: "pull-secret-1"}}, + }). + done(), + apiResources: []*test.APIResource{ + test.ServiceAccounts(builder.ForServiceAccount("ns-1", "sa-1").Result()), + }, + want: []*test.APIResource{ + test.ServiceAccounts(&corev1api.ServiceAccount{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "ServiceAccount", + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-1", + Name: "sa-1", + }, + Secrets: []corev1api.ObjectReference{{Name: "secret-1"}}, + ImagePullSecrets: []corev1api.LocalObjectReference{{Name: "pull-secret-1"}}, + }), }, - expectedReadDirs: []string{"bak/resources", "bak/resources/a/namespaces", "bak/resources/a/namespaces/ns-1", "bak/resources/c/namespaces", "bak/resources/c/namespaces/ns-1"}, }, } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - log := velerotest.NewLogger() - - nsClient := &velerotest.FakeNamespaceClient{} + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newHarness(t) - ctx := &context{ - restore: test.restore, - namespaceClient: nsClient, - fileSystem: test.fileSystem, - prioritizedResources: test.prioritizedResources, - log: log, - restoreDir: test.baseDir, + for _, r := range tc.apiResources { + h.addItems(t, r) } - nsClient.On("Get", mock.Anything, metav1.GetOptions{}).Return(&v1.Namespace{}, nil) - - warnings, errors := ctx.restoreFromDir() - - assert.Empty(t, warnings.Velero) - assert.Empty(t, warnings.Cluster) - assert.Empty(t, warnings.Namespaces) - assert.Equal(t, test.expectedErrors, errors) - - assert.Equal(t, test.expectedReadDirs, test.fileSystem.ReadDirCalls) + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: nil, + VolumeSnapshots: nil, + BackupReader: tc.tarball, + } + warnings, errs := h.restorer.Restore( + data, + nil, // actions + nil, // snapshot location lister + nil, // volume snapshotter getter + ) + + assertEmptyResults(t, warnings, errs) + assertRestoredItems(t, h, tc.want) }) } } -func TestNamespaceRemapping(t *testing.T) { - var ( - baseDir = "bak" - restore = &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}, NamespaceMapping: map[string]string{"ns-1": "ns-2"}}} - prioritizedResources = []schema.GroupResource{{Resource: "namespaces"}, {Resource: "configmaps"}} - labelSelector = labels.NewSelector() - fileSystem = velerotest.NewFakeFileSystem(). - WithFile("bak/resources/configmaps/namespaces/ns-1/cm-1.json", newTestConfigMap().WithNamespace("ns-1").ToJSON()). - WithFile("bak/resources/namespaces/cluster/ns-1.json", newTestNamespace("ns-1").ToJSON()) - expectedNS = "ns-2" - expectedObjs = toUnstructured(newTestConfigMap().WithNamespace("ns-2").ConfigMap) - ) - - resourceClient := &velerotest.FakeDynamicClient{} - for i := range expectedObjs { - addRestoreLabels(&expectedObjs[i], "", "") - resourceClient.On("Create", &expectedObjs[i]).Return(&expectedObjs[i], nil) - } - - dynamicFactory := &velerotest.FakeDynamicFactory{} - resource := metav1.APIResource{Name: "configmaps", Namespaced: true} - gv := schema.GroupVersion{Group: "", Version: "v1"} - dynamicFactory.On("ClientForGroupVersionResource", gv, resource, expectedNS).Return(resourceClient, nil) - - nsClient := &velerotest.FakeNamespaceClient{} - - ctx := &context{ - dynamicFactory: dynamicFactory, - fileSystem: fileSystem, - selector: labelSelector, - namespaceClient: nsClient, - prioritizedResources: prioritizedResources, - restore: restore, - backup: &api.Backup{}, - log: velerotest.NewLogger(), - applicableActions: make(map[schema.GroupResource][]resolvedAction), - resourceClients: make(map[resourceClientKey]pkgclient.Dynamic), - restoredItems: make(map[velero.ResourceIdentifier]struct{}), - restoreDir: baseDir, - } +// recordResourcesAction is a restore item action that can be configured +// to run for specific resources/namespaces and simply records the items +// that it is executed for. +type recordResourcesAction struct { + selector velero.ResourceSelector + ids []string + additionalItems []velero.ResourceIdentifier +} - nsClient.On("Get", "ns-2", metav1.GetOptions{}).Return(&v1.Namespace{}, k8serrors.NewNotFound(schema.GroupResource{Resource: "namespaces"}, "ns-2")) - ns := newTestNamespace("ns-2").Namespace - nsClient.On("Create", ns).Return(ns, nil) +func (a *recordResourcesAction) AppliesTo() (velero.ResourceSelector, error) { + return a.selector, nil +} - warnings, errors := ctx.restoreFromDir() +func (a *recordResourcesAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { + metadata, err := meta.Accessor(input.Item) + if err != nil { + return &velero.RestoreItemActionExecuteOutput{ + UpdatedItem: input.Item, + AdditionalItems: a.additionalItems, + }, err + } + a.ids = append(a.ids, kubeutil.NamespaceAndName(metadata)) - assert.Empty(t, warnings.Velero) - assert.Empty(t, warnings.Cluster) - assert.Empty(t, warnings.Namespaces) - assert.Empty(t, errors.Velero) - assert.Empty(t, errors.Cluster) - assert.Empty(t, errors.Namespaces) + return &velero.RestoreItemActionExecuteOutput{ + UpdatedItem: input.Item, + AdditionalItems: a.additionalItems, + }, nil +} - // ensure the remapped NS (only) was created via the namespaceClient - nsClient.AssertExpectations(t) +func (a *recordResourcesAction) ForResource(resource string) *recordResourcesAction { + a.selector.IncludedResources = append(a.selector.IncludedResources, resource) + return a +} - // ensure that we did not try to create namespaces via dynamic client - dynamicFactory.AssertNotCalled(t, "ClientForGroupVersionResource", gv, metav1.APIResource{Name: "namespaces", Namespaced: true}, "") +func (a *recordResourcesAction) ForNamespace(namespace string) *recordResourcesAction { + a.selector.IncludedNamespaces = append(a.selector.IncludedNamespaces, namespace) + return a +} - dynamicFactory.AssertExpectations(t) - resourceClient.AssertExpectations(t) +func (a *recordResourcesAction) ForLabelSelector(selector string) *recordResourcesAction { + a.selector.LabelSelector = selector + return a } -func TestRestoreResourceForNamespace(t *testing.T) { - var ( - trueVal = true - falseVal = false - truePtr = &trueVal - falsePtr = &falseVal - ) +func (a *recordResourcesAction) WithAdditionalItems(items []velero.ResourceIdentifier) *recordResourcesAction { + a.additionalItems = items + return a +} +// TestRestoreActionsRunsForCorrectItems runs restores with restore item actions, and +// verifies that each restore item action is run for the correct set of resources based on its +// AppliesTo() resource selector. Verification is done by using the recordResourcesAction struct, +// which records which resources it's executed for. +func TestRestoreActionsRunForCorrectItems(t *testing.T) { tests := []struct { - name string - namespace string - resourcePath string - labelSelector labels.Selector - includeClusterResources *bool - fileSystem *velerotest.FakeFileSystem - actions []resolvedAction - expectedErrors Result - expectedObjs []unstructured.Unstructured + name string + restore *velerov1api.Restore + backup *velerov1api.Backup + apiResources []*test.APIResource + tarball io.Reader + actions map[*recordResourcesAction][]string }{ { - name: "basic normal case", - namespace: "ns-1", - resourcePath: "configmaps", - labelSelector: labels.NewSelector(), - fileSystem: velerotest.NewFakeFileSystem(). - WithFile("configmaps/cm-1.json", newNamedTestConfigMap("cm-1").ToJSON()). - WithFile("configmaps/cm-2.json", newNamedTestConfigMap("cm-2").ToJSON()), - expectedObjs: toUnstructured( - newNamedTestConfigMap("cm-1").ConfigMap, - newNamedTestConfigMap("cm-2").ConfigMap, - ), - }, - { - name: "no such directory causes error", - namespace: "ns-1", - resourcePath: "configmaps", - fileSystem: velerotest.NewFakeFileSystem(), - expectedErrors: Result{ - Namespaces: map[string][]string{ - "ns-1": {"error reading \"configmaps\" resource directory: open configmaps: file does not exist"}, - }, + name: "single action with no selector runs for all items", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", builder.ForPod("ns-1", "pod-1").Result(), builder.ForPod("ns-2", "pod-2").Result()). + addItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result(), builder.ForPersistentVolume("pv-2").Result()). + done(), + apiResources: []*test.APIResource{test.Pods(), test.PVs()}, + actions: map[*recordResourcesAction][]string{ + new(recordResourcesAction): {"ns-1/pod-1", "ns-2/pod-2", "pv-1", "pv-2"}, }, }, { - name: "empty directory is no-op", - namespace: "ns-1", - resourcePath: "configmaps", - fileSystem: velerotest.NewFakeFileSystem().WithDirectory("configmaps"), - }, - { - name: "unmarshall failure does not cause immediate return", - namespace: "ns-1", - resourcePath: "configmaps", - labelSelector: labels.NewSelector(), - fileSystem: velerotest.NewFakeFileSystem(). - WithFile("configmaps/cm-1-invalid.json", []byte("this is not valid json")). - WithFile("configmaps/cm-2.json", newNamedTestConfigMap("cm-2").ToJSON()), - expectedErrors: Result{ - Namespaces: map[string][]string{ - "ns-1": {"error decoding \"configmaps/cm-1-invalid.json\": invalid character 'h' in literal true (expecting 'r')"}, - }, + name: "single action with a resource selector for namespaced resources runs only for matching resources", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", builder.ForPod("ns-1", "pod-1").Result(), builder.ForPod("ns-2", "pod-2").Result()). + addItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result(), builder.ForPersistentVolume("pv-2").Result()). + done(), + apiResources: []*test.APIResource{test.Pods(), test.PVs()}, + actions: map[*recordResourcesAction][]string{ + new(recordResourcesAction).ForResource("pods"): {"ns-1/pod-1", "ns-2/pod-2"}, }, - expectedObjs: toUnstructured(newNamedTestConfigMap("cm-2").ConfigMap), }, { - name: "matching label selector correctly includes", - namespace: "ns-1", - resourcePath: "configmaps", - labelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"foo": "bar"})), - fileSystem: velerotest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().WithLabels(map[string]string{"foo": "bar"}).ToJSON()), - expectedObjs: toUnstructured(newTestConfigMap().WithLabels(map[string]string{"foo": "bar"}).ConfigMap), + name: "single action with a resource selector for cluster-scoped resources runs only for matching resources", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", builder.ForPod("ns-1", "pod-1").Result(), builder.ForPod("ns-2", "pod-2").Result()). + addItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result(), builder.ForPersistentVolume("pv-2").Result()). + done(), + apiResources: []*test.APIResource{test.Pods(), test.PVs()}, + actions: map[*recordResourcesAction][]string{ + new(recordResourcesAction).ForResource("persistentvolumes"): {"pv-1", "pv-2"}, + }, }, { - name: "non-matching label selector correctly excludes", - namespace: "ns-1", - resourcePath: "configmaps", - labelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"foo": "not-bar"})), - fileSystem: velerotest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().WithLabels(map[string]string{"foo": "bar"}).ToJSON()), + name: "single action with a namespace selector runs only for resources in that namespace", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", builder.ForPod("ns-1", "pod-1").Result(), builder.ForPod("ns-2", "pod-2").Result()). + addItems("persistentvolumeclaims", builder.ForPersistentVolumeClaim("ns-1", "pvc-1").Result(), builder.ForPersistentVolumeClaim("ns-2", "pvc-2").Result()). + addItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result(), builder.ForPersistentVolume("pv-2").Result()). + done(), + apiResources: []*test.APIResource{test.Pods(), test.PVCs(), test.PVs()}, + actions: map[*recordResourcesAction][]string{ + new(recordResourcesAction).ForNamespace("ns-1"): {"ns-1/pod-1", "ns-1/pvc-1"}, + }, }, { - name: "namespace is remapped", - namespace: "ns-2", - resourcePath: "configmaps", - labelSelector: labels.NewSelector(), - fileSystem: velerotest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().WithNamespace("ns-1").ToJSON()), - expectedObjs: toUnstructured(newTestConfigMap().WithNamespace("ns-2").ConfigMap), + name: "single action with a resource and namespace selector runs only for matching resources in that namespace", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", builder.ForPod("ns-1", "pod-1").Result(), builder.ForPod("ns-2", "pod-2").Result()). + addItems("persistentvolumeclaims", builder.ForPersistentVolumeClaim("ns-1", "pvc-1").Result(), builder.ForPersistentVolumeClaim("ns-2", "pvc-2").Result()). + addItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result(), builder.ForPersistentVolume("pv-2").Result()). + done(), + apiResources: []*test.APIResource{test.Pods(), test.PVCs(), test.PVs()}, + actions: map[*recordResourcesAction][]string{ + new(recordResourcesAction).ForNamespace("ns-1").ForResource("pods"): {"ns-1/pod-1"}, + }, }, { - name: "custom restorer is correctly used", - namespace: "ns-1", - resourcePath: "configmaps", - labelSelector: labels.NewSelector(), - fileSystem: velerotest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().ToJSON()), - actions: []resolvedAction{ - { - RestoreItemAction: newFakeAction("configmaps"), - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("configmaps"), - namespaceIncludesExcludes: collections.NewIncludesExcludes(), - selector: labels.Everything(), - }, + name: "multiple actions, each with a different resource selector using short name, run for matching resources", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", builder.ForPod("ns-1", "pod-1").Result(), builder.ForPod("ns-2", "pod-2").Result()). + addItems("persistentvolumeclaims", builder.ForPersistentVolumeClaim("ns-1", "pvc-1").Result(), builder.ForPersistentVolumeClaim("ns-2", "pvc-2").Result()). + addItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result(), builder.ForPersistentVolume("pv-2").Result()). + done(), + apiResources: []*test.APIResource{test.Pods(), test.PVCs(), test.PVs()}, + actions: map[*recordResourcesAction][]string{ + new(recordResourcesAction).ForResource("po"): {"ns-1/pod-1", "ns-2/pod-2"}, + new(recordResourcesAction).ForResource("pv"): {"pv-1", "pv-2"}, }, - expectedObjs: toUnstructured(newTestConfigMap().WithLabels(map[string]string{"fake-restorer": "foo"}).ConfigMap), }, { - name: "custom restorer for different group/resource is not used", - namespace: "ns-1", - resourcePath: "configmaps", - labelSelector: labels.NewSelector(), - fileSystem: velerotest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().ToJSON()), - actions: []resolvedAction{ - { - RestoreItemAction: newFakeAction("foo-resource"), - resourceIncludesExcludes: collections.NewIncludesExcludes().Includes("foo-resource"), - namespaceIncludesExcludes: collections.NewIncludesExcludes(), - selector: labels.Everything(), - }, + name: "actions with selectors that don't match anything don't run for any resources", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", builder.ForPod("ns-1", "pod-1").Result()). + addItems("persistentvolumeclaims", builder.ForPersistentVolumeClaim("ns-2", "pvc-2").Result()). + done(), + apiResources: []*test.APIResource{test.Pods(), test.PVCs(), test.PVs()}, + actions: map[*recordResourcesAction][]string{ + new(recordResourcesAction).ForNamespace("ns-1").ForResource("persistentvolumeclaims"): nil, + new(recordResourcesAction).ForNamespace("ns-2").ForResource("pods"): nil, }, - expectedObjs: toUnstructured(newTestConfigMap().ConfigMap), - }, - { - name: "cluster-scoped resources are skipped when IncludeClusterResources=false", - namespace: "", - resourcePath: "persistentvolumes", - labelSelector: labels.NewSelector(), - includeClusterResources: falsePtr, - fileSystem: velerotest.NewFakeFileSystem().WithFile("persistentvolumes/pv-1.json", newTestPV().ToJSON()), - }, - { - name: "namespaced resources are not skipped when IncludeClusterResources=false", - namespace: "ns-1", - resourcePath: "configmaps", - labelSelector: labels.NewSelector(), - includeClusterResources: falsePtr, - fileSystem: velerotest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().ToJSON()), - expectedObjs: toUnstructured(newTestConfigMap().ConfigMap), - }, - { - name: "cluster-scoped resources are not skipped when IncludeClusterResources=true", - namespace: "", - resourcePath: "persistentvolumes", - labelSelector: labels.NewSelector(), - includeClusterResources: truePtr, - fileSystem: velerotest.NewFakeFileSystem().WithFile("persistentvolumes/pv-1.json", newTestPV().ToJSON()), - expectedObjs: toUnstructured(newTestPV().PersistentVolume), - }, - { - name: "namespaced resources are not skipped when IncludeClusterResources=true", - namespace: "ns-1", - resourcePath: "configmaps", - labelSelector: labels.NewSelector(), - includeClusterResources: truePtr, - fileSystem: velerotest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().ToJSON()), - expectedObjs: toUnstructured(newTestConfigMap().ConfigMap), - }, - { - name: "cluster-scoped resources are not skipped when IncludeClusterResources=nil", - namespace: "", - resourcePath: "persistentvolumes", - labelSelector: labels.NewSelector(), - includeClusterResources: nil, - fileSystem: velerotest.NewFakeFileSystem().WithFile("persistentvolumes/pv-1.json", newTestPV().ToJSON()), - expectedObjs: toUnstructured(newTestPV().PersistentVolume), - }, - { - name: "namespaced resources are not skipped when IncludeClusterResources=nil", - namespace: "ns-1", - resourcePath: "configmaps", - labelSelector: labels.NewSelector(), - includeClusterResources: nil, - fileSystem: velerotest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().ToJSON()), - expectedObjs: toUnstructured(newTestConfigMap().ConfigMap), - }, - { - name: "serviceaccounts are restored", - namespace: "ns-1", - resourcePath: "serviceaccounts", - labelSelector: labels.NewSelector(), - includeClusterResources: nil, - fileSystem: velerotest.NewFakeFileSystem().WithFile("serviceaccounts/sa-1.json", newTestServiceAccount().ToJSON()), - expectedObjs: toUnstructured(newTestServiceAccount().ServiceAccount), - }, - { - name: "non-mirror pods are restored", - namespace: "ns-1", - resourcePath: "pods", - labelSelector: labels.NewSelector(), - includeClusterResources: nil, - fileSystem: velerotest.NewFakeFileSystem(). - WithFile( - "pods/pod.json", - NewTestUnstructured(). - WithAPIVersion("v1"). - WithKind("Pod"). - WithNamespace("ns-1"). - WithName("pod1"). - ToJSON(), - ), - expectedObjs: []unstructured.Unstructured{ - *(NewTestUnstructured(). - WithAPIVersion("v1"). - WithKind("Pod"). - WithNamespace("ns-1"). - WithName("pod1"). - Unstructured), - }, - }, - { - name: "mirror pods are not restored", - namespace: "ns-1", - resourcePath: "pods", - labelSelector: labels.NewSelector(), - includeClusterResources: nil, - fileSystem: velerotest.NewFakeFileSystem(). - WithFile( - "pods/pod.json", - NewTestUnstructured(). - WithAPIVersion("v1"). - WithKind("Pod"). - WithNamespace("ns-1"). - WithName("pod1"). - WithAnnotations(v1.MirrorPodAnnotationKey). - ToJSON(), - ), }, } - var ( - client = fake.NewSimpleClientset() - sharedInformers = informers.NewSharedInformerFactory(client, 0) - snapshotLocationLister = sharedInformers.Velero().V1().VolumeSnapshotLocations().Lister() - ) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newHarness(t) - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - resourceClient := &velerotest.FakeDynamicClient{} - for i := range test.expectedObjs { - addRestoreLabels(&test.expectedObjs[i], "my-restore", "my-backup") - resourceClient.On("Create", &test.expectedObjs[i]).Return(&test.expectedObjs[i], nil) + for _, r := range tc.apiResources { + h.addItems(t, r) } - dynamicFactory := &velerotest.FakeDynamicFactory{} - gv := schema.GroupVersion{Group: "", Version: "v1"} - - configMapResource := metav1.APIResource{Name: "configmaps", Namespaced: true} - dynamicFactory.On("ClientForGroupVersionResource", gv, configMapResource, test.namespace).Return(resourceClient, nil) + actions := []velero.RestoreItemAction{} + for action := range tc.actions { + actions = append(actions, action) + } - pvResource := metav1.APIResource{Name: "persistentvolumes", Namespaced: false} - dynamicFactory.On("ClientForGroupVersionResource", gv, pvResource, test.namespace).Return(resourceClient, nil) - resourceClient.On("Watch", metav1.ListOptions{}).Return(&fakeWatch{}, nil) - if test.resourcePath == "persistentvolumes" { - resourceClient.On("Get", mock.Anything, metav1.GetOptions{}).Return(&unstructured.Unstructured{}, k8serrors.NewNotFound(schema.GroupResource{Resource: "persistentvolumes"}, "")) + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: nil, + VolumeSnapshots: nil, + BackupReader: tc.tarball, + } + warnings, errs := h.restorer.Restore( + data, + actions, + nil, // snapshot location lister + nil, // volume snapshotter getter + ) + + assertEmptyResults(t, warnings, errs) + + for action, want := range tc.actions { + sort.Strings(want) + sort.Strings(action.ids) + assert.Equal(t, want, action.ids) } + }) + } +} - // Assume the persistentvolume doesn't already exist in the cluster. - saResource := metav1.APIResource{Name: "serviceaccounts", Namespaced: true} - dynamicFactory.On("ClientForGroupVersionResource", gv, saResource, test.namespace).Return(resourceClient, nil) +// pluggableAction is a restore item action that can be plugged with an Execute +// function body at runtime. +type pluggableAction struct { + selector velero.ResourceSelector + executeFunc func(*velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) +} - podResource := metav1.APIResource{Name: "pods", Namespaced: true} - dynamicFactory.On("ClientForGroupVersionResource", gv, podResource, test.namespace).Return(resourceClient, nil) +func (a *pluggableAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { + if a.executeFunc == nil { + return &velero.RestoreItemActionExecuteOutput{ + UpdatedItem: input.Item, + }, nil + } - ctx := &context{ - dynamicFactory: dynamicFactory, - actions: test.actions, - fileSystem: test.fileSystem, - selector: test.labelSelector, - restore: &api.Restore{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: api.DefaultNamespace, - Name: "my-restore", - }, - Spec: api.RestoreSpec{ - IncludeClusterResources: test.includeClusterResources, - BackupName: "my-backup", - }, - }, - backup: &api.Backup{}, - log: velerotest.NewLogger(), - pvRestorer: &pvRestorer{ - logger: logging.DefaultLogger(logrus.DebugLevel), - volumeSnapshotterGetter: &fakeVolumeSnapshotterGetter{ - volumeMap: map[velerotest.VolumeBackupInfo]string{{SnapshotID: "snap-1"}: "volume-1"}, - volumeID: "volume-1", - }, - snapshotLocationLister: snapshotLocationLister, - backup: &api.Backup{}, - }, - applicableActions: make(map[schema.GroupResource][]resolvedAction), - resourceClients: make(map[resourceClientKey]pkgclient.Dynamic), - restoredItems: make(map[velero.ResourceIdentifier]struct{}), - } + return a.executeFunc(input) +} - warnings, errors := ctx.restoreResource(test.resourcePath, test.namespace, test.resourcePath) +func (a *pluggableAction) AppliesTo() (velero.ResourceSelector, error) { + return a.selector, nil +} - assert.Empty(t, warnings.Velero) - assert.Empty(t, warnings.Cluster) - assert.Empty(t, warnings.Namespaces) - assert.Equal(t, test.expectedErrors, errors) - }) +// TestRestoreActionModifications runs restores with restore item actions that modify resources, and +// verifies that that the modified item is correctly created in the API. Verification is done by looking +// at the full object in the API. +func TestRestoreActionModifications(t *testing.T) { + // modifyingActionGetter is a helper function that returns a *pluggableAction, whose Execute(...) + // method modifies the item being passed in by calling the 'modify' function on it. + modifyingActionGetter := func(modify func(*unstructured.Unstructured)) *pluggableAction { + return &pluggableAction{ + executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { + obj, ok := input.Item.(*unstructured.Unstructured) + if !ok { + return nil, errors.Errorf("unexpected type %T", input.Item) + } + + res := obj.DeepCopy() + modify(res) + + return &velero.RestoreItemActionExecuteOutput{ + UpdatedItem: res, + }, nil + }, + } } -} -func TestRestoreLabels(t *testing.T) { tests := []struct { - name string - namespace string - resourcePath string - backupName string - restoreName string - labelSelector labels.Selector - includeClusterResources *bool - fileSystem *velerotest.FakeFileSystem - actions []resolvedAction - expectedErrors Result - expectedObjs []unstructured.Unstructured + name string + restore *velerov1api.Restore + backup *velerov1api.Backup + apiResources []*test.APIResource + tarball io.Reader + actions []velero.RestoreItemAction + want []*test.APIResource }{ { - name: "backup name and restore name less than 63 characters", - namespace: "ns-1", - resourcePath: "configmaps", - backupName: "less-than-63-characters", - restoreName: "less-than-63-characters-12345", - labelSelector: labels.NewSelector(), - fileSystem: velerotest.NewFakeFileSystem(). - WithFile("configmaps/cm-1.json", newNamedTestConfigMap("cm-1").ToJSON()), - expectedObjs: toUnstructured( - newNamedTestConfigMap("cm-1").WithLabels(map[string]string{ - api.BackupNameLabel: "less-than-63-characters", - api.RestoreNameLabel: "less-than-63-characters-12345", - }).ConfigMap, - ), - }, - { - name: "backup name equal to 63 characters", - namespace: "ns-1", - resourcePath: "configmaps", - backupName: "the-really-long-kube-service-name-that-is-exactly-63-characters", - restoreName: "the-really-long-kube-service-name-that-is-exactly-63-characters-12345", - labelSelector: labels.NewSelector(), - fileSystem: velerotest.NewFakeFileSystem(). - WithFile("configmaps/cm-1.json", newNamedTestConfigMap("cm-1").ToJSON()), - expectedObjs: toUnstructured( - newNamedTestConfigMap("cm-1").WithLabels(map[string]string{ - api.BackupNameLabel: "the-really-long-kube-service-name-that-is-exactly-63-characters", - api.RestoreNameLabel: "the-really-long-kube-service-name-that-is-exactly-63-char0871f3", - }).ConfigMap, - ), - }, - { - name: "backup name greter than 63 characters", - namespace: "ns-1", - resourcePath: "configmaps", - backupName: "the-really-long-kube-service-name-that-is-much-greater-than-63-characters", - restoreName: "the-really-long-kube-service-name-that-is-much-greater-than-63-characters-12345", - labelSelector: labels.NewSelector(), - fileSystem: velerotest.NewFakeFileSystem(). - WithFile("configmaps/cm-1.json", newNamedTestConfigMap("cm-1").ToJSON()), - expectedObjs: toUnstructured( - newNamedTestConfigMap("cm-1").WithLabels(map[string]string{ - api.BackupNameLabel: "the-really-long-kube-service-name-that-is-much-greater-th8a11b3", - api.RestoreNameLabel: "the-really-long-kube-service-name-that-is-much-greater-th1bf26f", - }).ConfigMap, - ), + name: "action that adds a label to item gets restored", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t).addItems("pods", builder.ForPod("ns-1", "pod-1").Result()).done(), + apiResources: []*test.APIResource{test.Pods()}, + actions: []velero.RestoreItemAction{ + modifyingActionGetter(func(item *unstructured.Unstructured) { + item.SetLabels(map[string]string{"updated": "true"}) + }), + }, + want: []*test.APIResource{ + test.Pods( + builder.ForPod("ns-1", "pod-1").ObjectMeta(builder.WithLabels("updated", "true")).Result(), + ), + }, + }, + { + name: "action that removes a label to item gets restored", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t).addItems("pods", builder.ForPod("ns-1", "pod-1").ObjectMeta(builder.WithLabels("should-be-removed", "true")).Result()).done(), + apiResources: []*test.APIResource{test.Pods()}, + actions: []velero.RestoreItemAction{ + modifyingActionGetter(func(item *unstructured.Unstructured) { + item.SetLabels(nil) + }), + }, + want: []*test.APIResource{ + test.Pods(builder.ForPod("ns-1", "pod-1").Result()), + }, }, + // TODO action that modifies namespace/name - what's the expected behavior? } - var ( - client = fake.NewSimpleClientset() - sharedInformers = informers.NewSharedInformerFactory(client, 0) - snapshotLocationLister = sharedInformers.Velero().V1().VolumeSnapshotLocations().Lister() - ) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newHarness(t) - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - resourceClient := &velerotest.FakeDynamicClient{} - for i := range test.expectedObjs { - resourceClient.On("Create", &test.expectedObjs[i]).Return(&test.expectedObjs[i], nil) + for _, r := range tc.apiResources { + h.addItems(t, r) } - dynamicFactory := &velerotest.FakeDynamicFactory{} - gv := schema.GroupVersion{Group: "", Version: "v1"} + // every restored item should have the restore and backup name labels, set + // them here so we don't have to do it in every test case definition above. + for _, resource := range tc.want { + for _, item := range resource.Items { + labels := item.GetLabels() + if labels == nil { + labels = make(map[string]string) + } - configMapResource := metav1.APIResource{Name: "configmaps", Namespaced: true} - dynamicFactory.On("ClientForGroupVersionResource", gv, configMapResource, test.namespace).Return(resourceClient, nil) + labels["velero.io/restore-name"] = tc.restore.Name + labels["velero.io/backup-name"] = tc.restore.Spec.BackupName - ctx := &context{ - dynamicFactory: dynamicFactory, - actions: test.actions, - fileSystem: test.fileSystem, - selector: test.labelSelector, - restore: &api.Restore{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: api.DefaultNamespace, - Name: test.restoreName, - }, - Spec: api.RestoreSpec{ - IncludeClusterResources: test.includeClusterResources, - BackupName: test.backupName, - }, - }, - backup: &api.Backup{}, - log: velerotest.NewLogger(), - pvRestorer: &pvRestorer{ - logger: logging.DefaultLogger(logrus.DebugLevel), - volumeSnapshotterGetter: &fakeVolumeSnapshotterGetter{ - volumeMap: map[velerotest.VolumeBackupInfo]string{{SnapshotID: "snap-1"}: "volume-1"}, - volumeID: "volume-1", - }, - snapshotLocationLister: snapshotLocationLister, - backup: &api.Backup{}, - }, - applicableActions: make(map[schema.GroupResource][]resolvedAction), - resourceClients: make(map[resourceClientKey]pkgclient.Dynamic), - restoredItems: make(map[velero.ResourceIdentifier]struct{}), + item.SetLabels(labels) + } } - warnings, errors := ctx.restoreResource(test.resourcePath, test.namespace, test.resourcePath) - - assert.Empty(t, warnings.Velero) - assert.Empty(t, warnings.Cluster) - assert.Empty(t, warnings.Namespaces) - assert.Equal(t, test.expectedErrors, errors) + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: nil, + VolumeSnapshots: nil, + BackupReader: tc.tarball, + } + warnings, errs := h.restorer.Restore( + data, + tc.actions, + nil, // snapshot location lister + nil, // volume snapshotter getter + ) + + assertEmptyResults(t, warnings, errs) + assertRestoredItems(t, h, tc.want) }) } } -func TestRestoringExistingServiceAccount(t *testing.T) { - fromCluster := newTestServiceAccount() - fromClusterUnstructured, err := runtime.DefaultUnstructuredConverter.ToUnstructured(fromCluster.ServiceAccount) - require.NoError(t, err) - - different := newTestServiceAccount().WithImagePullSecret("image-secret").WithSecret("secret") - differentUnstructured, err := runtime.DefaultUnstructuredConverter.ToUnstructured(different.ServiceAccount) - require.NoError(t, err) - +// TestRestoreActionAdditionalItems runs restores with restore item actions that return additional items +// to be restored, and verifies that that the correct set of items is created in the API. Verification is +// done by looking at the namespaces/names of the items in the API; contents are not checked. +func TestRestoreActionAdditionalItems(t *testing.T) { tests := []struct { - name string - expectedPatch []byte - fromBackup *unstructured.Unstructured + name string + restore *velerov1api.Restore + backup *velerov1api.Backup + tarball io.Reader + apiResources []*test.APIResource + actions []velero.RestoreItemAction + want map[*test.APIResource][]string }{ { - name: "fromCluster and fromBackup are exactly the same", - fromBackup: &unstructured.Unstructured{Object: fromClusterUnstructured}, + name: "additional items that are already being restored are not restored twice", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t).addItems("pods", builder.ForPod("ns-1", "pod-1").Result(), builder.ForPod("ns-2", "pod-2").Result()).done(), + apiResources: []*test.APIResource{test.Pods()}, + actions: []velero.RestoreItemAction{ + &pluggableAction{ + selector: velero.ResourceSelector{IncludedNamespaces: []string{"ns-1"}}, + executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { + return &velero.RestoreItemActionExecuteOutput{ + UpdatedItem: input.Item, + AdditionalItems: []velero.ResourceIdentifier{ + {GroupResource: kuberesource.Pods, Namespace: "ns-2", Name: "pod-2"}, + }, + }, nil + }, + }, + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1", "ns-2/pod-2"}, + }, + }, + { + name: "when using a restore namespace filter, additional items that are in a non-included namespace are not restored", + restore: defaultRestore().IncludedNamespaces("ns-1").Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t).addItems("pods", builder.ForPod("ns-1", "pod-1").Result(), builder.ForPod("ns-2", "pod-2").Result()).done(), + apiResources: []*test.APIResource{test.Pods()}, + actions: []velero.RestoreItemAction{ + &pluggableAction{ + executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { + return &velero.RestoreItemActionExecuteOutput{ + UpdatedItem: input.Item, + AdditionalItems: []velero.ResourceIdentifier{ + {GroupResource: kuberesource.Pods, Namespace: "ns-2", Name: "pod-2"}, + }, + }, nil + }, + }, + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1"}, + }, + }, + { + name: "when using a restore namespace filter, additional items that are cluster-scoped are restored", + restore: defaultRestore().IncludedNamespaces("ns-1").Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", builder.ForPod("ns-1", "pod-1").Result()). + addItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result()). + done(), + apiResources: []*test.APIResource{test.Pods(), test.PVs()}, + actions: []velero.RestoreItemAction{ + &pluggableAction{ + executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { + return &velero.RestoreItemActionExecuteOutput{ + UpdatedItem: input.Item, + AdditionalItems: []velero.ResourceIdentifier{ + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, + }, + }, nil + }, + }, + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1"}, + test.PVs(): {"/pv-1"}, + }, + }, + { + name: "when using a restore resource filter, additional items that are non-included resources are not restored", + restore: defaultRestore().IncludedResources("pods").Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", builder.ForPod("ns-1", "pod-1").Result()). + addItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result()). + done(), + apiResources: []*test.APIResource{test.Pods(), test.PVs()}, + actions: []velero.RestoreItemAction{ + &pluggableAction{ + executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { + return &velero.RestoreItemActionExecuteOutput{ + UpdatedItem: input.Item, + AdditionalItems: []velero.ResourceIdentifier{ + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, + }, + }, nil + }, + }, + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1"}, + test.PVs(): nil, + }, }, { - name: "fromCluster and fromBackup are different", - fromBackup: &unstructured.Unstructured{Object: differentUnstructured}, - expectedPatch: []byte(`{"imagePullSecrets":[{"name":"image-secret"}],"secrets":[{"name":"secret"}]}`), + name: "when IncludeClusterResources=false, additional items that are cluster-scoped are not restored", + restore: defaultRestore().IncludeClusterResources(false).Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("pods", builder.ForPod("ns-1", "pod-1").Result()). + addItems("persistentvolumes", builder.ForPersistentVolume("pv-1").Result()). + done(), + apiResources: []*test.APIResource{test.Pods(), test.PVs()}, + actions: []velero.RestoreItemAction{ + &pluggableAction{ + executeFunc: func(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { + return &velero.RestoreItemActionExecuteOutput{ + UpdatedItem: input.Item, + AdditionalItems: []velero.ResourceIdentifier{ + {GroupResource: kuberesource.PersistentVolumes, Name: "pv-1"}, + }, + }, nil + }, + }, + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-1"}, + test.PVs(): nil, + }, }, } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - resourceClient := &velerotest.FakeDynamicClient{} - defer resourceClient.AssertExpectations(t) - name := fromCluster.GetName() - - // restoreResource will add the restore label to object provided to create, so we need to make a copy to provide to our expected call - m := make(map[string]interface{}) - for k, v := range test.fromBackup.Object { - m[k] = v - } - fromBackupWithLabel := &unstructured.Unstructured{Object: m} - addRestoreLabels(fromBackupWithLabel, "my-restore", "my-backup") - // resetMetadataAndStatus will strip the creationTimestamp before calling Create - fromBackupWithLabel.SetCreationTimestamp(metav1.Time{Time: time.Time{}}) - - resourceClient.On("Create", fromBackupWithLabel).Return(new(unstructured.Unstructured), k8serrors.NewAlreadyExists(kuberesource.ServiceAccounts, name)) - resourceClient.On("Get", name, metav1.GetOptions{}).Return(&unstructured.Unstructured{Object: fromClusterUnstructured}, nil) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newHarness(t) - if len(test.expectedPatch) > 0 { - resourceClient.On("Patch", name, test.expectedPatch).Return(test.fromBackup, nil) + for _, r := range tc.apiResources { + h.addItems(t, r) } - dynamicFactory := &velerotest.FakeDynamicFactory{} - gv := schema.GroupVersion{Group: "", Version: "v1"} - - resource := metav1.APIResource{Name: "serviceaccounts", Namespaced: true} - dynamicFactory.On("ClientForGroupVersionResource", gv, resource, "ns-1").Return(resourceClient, nil) - fromBackupJSON, err := json.Marshal(test.fromBackup) - require.NoError(t, err) - ctx := &context{ - dynamicFactory: dynamicFactory, - actions: []resolvedAction{}, - fileSystem: velerotest.NewFakeFileSystem(). - WithFile("foo/resources/serviceaccounts/namespaces/ns-1/sa-1.json", fromBackupJSON), - selector: labels.NewSelector(), - restore: &api.Restore{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: api.DefaultNamespace, - Name: "my-restore", - }, - Spec: api.RestoreSpec{ - IncludeClusterResources: nil, - BackupName: "my-backup", - }, - }, - backup: &api.Backup{}, - log: velerotest.NewLogger(), - applicableActions: make(map[schema.GroupResource][]resolvedAction), - resourceClients: make(map[resourceClientKey]pkgclient.Dynamic), - restoredItems: make(map[velero.ResourceIdentifier]struct{}), + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: nil, + VolumeSnapshots: nil, + BackupReader: tc.tarball, } - warnings, errors := ctx.restoreResource("serviceaccounts", "ns-1", "foo/resources/serviceaccounts/namespaces/ns-1/") - - assert.Empty(t, warnings.Velero) - assert.Empty(t, warnings.Cluster) - assert.Empty(t, warnings.Namespaces) - assert.Equal(t, Result{}, errors) + warnings, errs := h.restorer.Restore( + data, + tc.actions, + nil, // snapshot location lister + nil, // volume snapshotter getter + ) + + assertEmptyResults(t, warnings, errs) + assertAPIContents(t, h, tc.want) }) } } -func TestRestoringPVsWithoutSnapshots(t *testing.T) { - pv := `apiVersion: v1 -kind: PersistentVolume -metadata: - annotations: - EXPORT_block: "\nEXPORT\n{\n\tExport_Id = 1;\n\tPath = /export/pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce;\n\tPseudo - = /export/pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce;\n\tAccess_Type = RW;\n\tSquash - = no_root_squash;\n\tSecType = sys;\n\tFilesystem_id = 1.1;\n\tFSAL {\n\t\tName - = VFS;\n\t}\n}\n" - Export_Id: "1" - Project_Id: "0" - Project_block: "" - Provisioner_Id: 5fdf4025-78a5-11e8-9ece-0242ac110004 - kubernetes.io/createdby: nfs-dynamic-provisioner - pv.kubernetes.io/provisioned-by: example.com/nfs - volume.beta.kubernetes.io/mount-options: vers=4.1 - creationTimestamp: 2018-06-25T18:27:35Z - finalizers: - - kubernetes.io/pv-protection - name: pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce - resourceVersion: "2576" - selfLink: /api/v1/persistentvolumes/pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce - uid: 6ecd24e4-78a5-11e8-a0d8-e2ad1e9734ce -spec: - accessModes: - - ReadWriteMany - capacity: - storage: 1Mi - claimRef: - apiVersion: v1 - kind: PersistentVolumeClaim - name: nfs - namespace: default - resourceVersion: "2565" - uid: 6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce - nfs: - path: /export/pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce - server: 10.103.235.254 - storageClassName: example-nfs -status: - phase: Bound` - - pvc := `apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - annotations: - control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"5fdf5572-78a5-11e8-9ece-0242ac110004","leaseDurationSeconds":15,"acquireTime":"2018-06-25T18:27:35Z","renewTime":"2018-06-25T18:27:37Z","leaderTransitions":0}' - kubectl.kubernetes.io/last-applied-configuration: | - {"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"name":"nfs","namespace":"default"},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"1Mi"}},"storageClassName":"example-nfs"}} - pv.kubernetes.io/bind-completed: "yes" - pv.kubernetes.io/bound-by-controller: "yes" - volume.beta.kubernetes.io/storage-provisioner: example.com/nfs - creationTimestamp: 2018-06-25T18:27:28Z - finalizers: - - kubernetes.io/pvc-protection - name: nfs - namespace: default - resourceVersion: "2578" - selfLink: /api/v1/namespaces/default/persistentvolumeclaims/nfs - uid: 6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Mi - storageClassName: example-nfs - volumeName: pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce -status: - accessModes: - - ReadWriteMany - capacity: - storage: 1Mi - phase: Bound` - +// TestShouldRestore runs the ShouldRestore function for various permutations of +// existing/nonexisting/being-deleted PVs, PVCs, and namespaces, and verifies the +// result/error matches expectations. +func TestShouldRestore(t *testing.T) { tests := []struct { - name string - haveSnapshot bool - reclaimPolicy string - expectPVCVolumeName bool - expectedPVCAnnotationsMissing sets.String - expectPVCreation bool - expectPVFound bool + name string + pvName string + apiResources []*test.APIResource + namespaces []*corev1api.Namespace + want bool + wantErr error }{ { - name: "backup has snapshot, reclaim policy delete, no existing PV found", - haveSnapshot: true, - reclaimPolicy: "Delete", - expectPVCVolumeName: true, - expectPVCreation: true, + name: "when PV is not found, result is true", + pvName: "pv-1", + want: true, }, { - name: "backup has snapshot, reclaim policy delete, existing PV found", - haveSnapshot: true, - reclaimPolicy: "Delete", - expectPVCVolumeName: true, - expectPVCreation: false, - expectPVFound: true, + name: "when PV is found and has Phase=Released, result is false", + pvName: "pv-1", + apiResources: []*test.APIResource{ + test.PVs(&corev1api.PersistentVolume{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "PersistentVolume", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "pv-1", + }, + Status: corev1api.PersistentVolumeStatus{ + Phase: corev1api.VolumeReleased, + }, + }), + }, + want: false, }, { - name: "backup has snapshot, reclaim policy retain, no existing PV found", - haveSnapshot: true, - reclaimPolicy: "Retain", - expectPVCVolumeName: true, - expectPVCreation: true, + name: "when PV is found and has associated PVC and namespace that aren't deleting, result is false", + pvName: "pv-1", + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1").ClaimRef("ns-1", "pvc-1").Result(), + ), + test.PVCs(builder.ForPersistentVolumeClaim("ns-1", "pvc-1").Result()), + }, + namespaces: []*corev1api.Namespace{builder.ForNamespace("ns-1").Result()}, + want: false, }, { - name: "backup has snapshot, reclaim policy retain, existing PV found", - haveSnapshot: true, - reclaimPolicy: "Retain", - expectPVCVolumeName: true, - expectPVCreation: false, - expectPVFound: true, + name: "when PV is found and has associated PVC that is deleting, result is false + timeout error", + pvName: "pv-1", + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1").ClaimRef("ns-1", "pvc-1").Result(), + ), + test.PVCs( + builder.ForPersistentVolumeClaim("ns-1", "pvc-1").ObjectMeta(builder.WithDeletionTimestamp(time.Now())).Result(), + ), + }, + want: false, + wantErr: errors.New("timed out waiting for the condition"), }, { - name: "backup has snapshot, reclaim policy retain, existing PV found", - haveSnapshot: true, - reclaimPolicy: "Retain", - expectPVCVolumeName: true, - expectPVCreation: false, - expectPVFound: true, + name: "when PV is found, has associated PVC that's not deleting, has associated NS that is terminating, result is false + timeout error", + pvName: "pv-1", + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1").ClaimRef("ns-1", "pvc-1").Result(), + ), + test.PVCs(builder.ForPersistentVolumeClaim("ns-1", "pvc-1").Result()), + }, + namespaces: []*corev1api.Namespace{ + builder.ForNamespace("ns-1").Phase(corev1api.NamespaceTerminating).Result(), + }, + want: false, + wantErr: errors.New("timed out waiting for the condition"), }, { - name: "no snapshot, reclaim policy delete, no existing PV", - haveSnapshot: false, - reclaimPolicy: "Delete", - expectPVCVolumeName: false, - expectedPVCAnnotationsMissing: sets.NewString("pv.kubernetes.io/bind-completed", "pv.kubernetes.io/bound-by-controller"), + name: "when PV is found, has associated PVC that's not deleting, has associated NS that has deletion timestamp, result is false + timeout error", + pvName: "pv-1", + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1").ClaimRef("ns-1", "pvc-1").Result(), + ), + test.PVCs(builder.ForPersistentVolumeClaim("ns-1", "pvc-1").Result()), + }, + namespaces: []*corev1api.Namespace{ + builder.ForNamespace("ns-1").ObjectMeta(builder.WithDeletionTimestamp(time.Now())).Result(), + }, + want: false, + wantErr: errors.New("timed out waiting for the condition"), }, { - name: "no snapshot, reclaim policy retain, no existing PV found", - haveSnapshot: false, - reclaimPolicy: "Retain", - expectPVCVolumeName: true, - expectPVCreation: true, + name: "when PV is found, associated PVC is not found, result is false + timeout error", + pvName: "pv-1", + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1").ClaimRef("ns-1", "pvc-1").Result(), + ), + }, + want: false, + wantErr: errors.New("timed out waiting for the condition"), }, { - name: "no snapshot, reclaim policy retain, existing PV found", - haveSnapshot: false, - reclaimPolicy: "Retain", - expectPVCVolumeName: true, - expectPVCreation: false, - expectPVFound: true, + name: "when PV is found, has associated PVC, associated namespace not found, result is false + timeout error", + pvName: "pv-1", + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1").ClaimRef("ns-1", "pvc-1").Result(), + ), + test.PVCs(builder.ForPersistentVolumeClaim("ns-1", "pvc-1").Result()), + }, + want: false, + wantErr: errors.New("timed out waiting for the condition"), }, } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - dynamicFactory := &velerotest.FakeDynamicFactory{} - gv := schema.GroupVersion{Group: "", Version: "v1"} - pvClient := &velerotest.FakeDynamicClient{} - defer pvClient.AssertExpectations(t) + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newHarness(t) - pvResource := metav1.APIResource{Name: "persistentvolumes", Namespaced: false} - dynamicFactory.On("ClientForGroupVersionResource", gv, pvResource, "").Return(pvClient, nil) + ctx := &context{ + log: h.log, + dynamicFactory: client.NewDynamicFactory(h.DynamicClient), + namespaceClient: h.KubeClient.CoreV1().Namespaces(), + resourceTerminatingTimeout: time.Millisecond, + } - pvcClient := &velerotest.FakeDynamicClient{} - defer pvcClient.AssertExpectations(t) + for _, resource := range tc.apiResources { + h.addItems(t, resource) + } - pvcResource := metav1.APIResource{Name: "persistentvolumeclaims", Namespaced: true} - dynamicFactory.On("ClientForGroupVersionResource", gv, pvcResource, "default").Return(pvcClient, nil) + for _, ns := range tc.namespaces { + _, err := ctx.namespaceClient.Create(ns) + require.NoError(t, err) + } - obj, _, err := scheme.Codecs.UniversalDecoder(v1.SchemeGroupVersion).Decode([]byte(pv), nil, nil) - require.NoError(t, err) - pvObj, ok := obj.(*v1.PersistentVolume) - require.True(t, ok) - pvObj.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimPolicy(test.reclaimPolicy) - pvBytes, err := json.Marshal(pvObj) + pvClient, err := ctx.dynamicFactory.ClientForGroupVersionResource( + schema.GroupVersion{Group: "", Version: "v1"}, + metav1.APIResource{Name: "persistentvolumes"}, + "", + ) require.NoError(t, err) - obj, _, err = scheme.Codecs.UniversalDecoder(v1.SchemeGroupVersion).Decode([]byte(pvc), nil, nil) - require.NoError(t, err) - pvcObj, ok := obj.(*v1.PersistentVolumeClaim) - require.True(t, ok) - pvcBytes, err := json.Marshal(pvcObj) - require.NoError(t, err) + res, err := ctx.shouldRestore(tc.pvName, pvClient) + assert.Equal(t, tc.want, res) + if tc.wantErr != nil { + if assert.NotNil(t, err, "expected a non-nil error") { + assert.EqualError(t, err, tc.wantErr.Error()) + } + } else { + assert.Nil(t, err) + } + }) + } +} - unstructuredPVCMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(pvcObj) - require.NoError(t, err) - unstructuredPVC := &unstructured.Unstructured{Object: unstructuredPVCMap} +func assertRestoredItems(t *testing.T, h *harness, want []*test.APIResource) { + t.Helper() - nsClient := &velerotest.FakeNamespaceClient{} - ns := newTestNamespace(pvcObj.Namespace).Namespace - nsClient.On("Get", pvcObj.Namespace, mock.Anything).Return(ns, nil) + for _, resource := range want { + resourceClient := h.DynamicClient.Resource(resource.GVR()) + for _, item := range resource.Items { + var client dynamic.ResourceInterface + if item.GetNamespace() != "" { + client = resourceClient.Namespace(item.GetNamespace()) + } else { + client = resourceClient + } - backup := &api.Backup{} + res, err := client.Get(item.GetName(), metav1.GetOptions{}) + if !assert.NoError(t, err) { + continue + } - pvRestorer := new(mockPVRestorer) - defer pvRestorer.AssertExpectations(t) + itemJSON, err := json.Marshal(item) + if !assert.NoError(t, err) { + continue + } - ctx := &context{ - dynamicFactory: dynamicFactory, - actions: []resolvedAction{}, - fileSystem: velerotest.NewFakeFileSystem(). - WithFile("foo/resources/persistentvolumes/cluster/pv.json", pvBytes). - WithFile("foo/resources/persistentvolumeclaims/default/pvc.json", pvcBytes), - selector: labels.NewSelector(), - prioritizedResources: []schema.GroupResource{ - kuberesource.PersistentVolumes, - kuberesource.PersistentVolumeClaims, + t.Logf("%v", string(itemJSON)) + + u := make(map[string]interface{}) + if !assert.NoError(t, json.Unmarshal(itemJSON, &u)) { + continue + } + want := &unstructured.Unstructured{Object: u} + + // These fields get non-nil zero values in the unstructured objects if they're + // empty in the structured objects. Remove them to make comparison easier. + unstructured.RemoveNestedField(want.Object, "metadata", "creationTimestamp") + unstructured.RemoveNestedField(want.Object, "status") + + assert.Equal(t, want, res) + } + } +} + +// volumeSnapshotterGetter is a simple implementation of the VolumeSnapshotterGetter +// interface that returns velero.VolumeSnapshotters from a map if they exist. +type volumeSnapshotterGetter map[string]velero.VolumeSnapshotter + +func (vsg volumeSnapshotterGetter) GetVolumeSnapshotter(name string) (velero.VolumeSnapshotter, error) { + snapshotter, ok := vsg[name] + if !ok { + return nil, errors.New("volume snapshotter not found") + } + + return snapshotter, nil +} + +// volumeSnapshotter is a test fake for the velero.VolumeSnapshotter interface +type volumeSnapshotter struct { + // a map from snapshotID to volumeID + snapshotVolumes map[string]string +} + +// Init is a no-op. +func (vs *volumeSnapshotter) Init(config map[string]string) error { + return nil +} + +// CreateVolumeFromSnapshot looks up the specified snapshotID in the snapshotVolumes +// map and returns the corresponding volumeID if it exists, or an error otherwise. +func (vs *volumeSnapshotter) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ string, iops *int64) (volumeID string, err error) { + volumeID, ok := vs.snapshotVolumes[snapshotID] + if !ok { + return "", errors.New("snapshot not found") + } + + return volumeID, nil +} + +// SetVolumeID sets the persistent volume's spec.awsElasticBlockStore.volumeID field +// with the provided volumeID. +func (*volumeSnapshotter) SetVolumeID(pv runtime.Unstructured, volumeID string) (runtime.Unstructured, error) { + unstructured.SetNestedField(pv.UnstructuredContent(), volumeID, "spec", "awsElasticBlockStore", "volumeID") + return pv, nil +} + +// GetVolumeID panics because it's not expected to be used for restores. +func (*volumeSnapshotter) GetVolumeID(pv runtime.Unstructured) (string, error) { + panic("GetVolumeID should not be used for restores") +} + +// CreateSnapshot panics because it's not expected to be used for restores. +func (*volumeSnapshotter) CreateSnapshot(volumeID, volumeAZ string, tags map[string]string) (snapshotID string, err error) { + panic("CreateSnapshot should not be used for restores") +} + +// GetVolumeInfo panics because it's not expected to be used for restores. +func (*volumeSnapshotter) GetVolumeInfo(volumeID, volumeAZ string) (string, *int64, error) { + panic("GetVolumeInfo should not be used for restores") +} + +// DeleteSnapshot panics because it's not expected to be used for restores. +func (*volumeSnapshotter) DeleteSnapshot(snapshotID string) error { + panic("DeleteSnapshot should not be used for backups") +} + +// TestRestorePersistentVolumes runs restores for persistent volumes and verifies that +// they are restored as expected, including restoring volumes from snapshots when expected. +// Verification is done by looking at the contents of the API and the metadata/spec/status of +// the items in the API. +func TestRestorePersistentVolumes(t *testing.T) { + tests := []struct { + name string + restore *velerov1api.Restore + backup *velerov1api.Backup + tarball io.Reader + apiResources []*test.APIResource + volumeSnapshots []*volume.Snapshot + volumeSnapshotLocations []*velerov1api.VolumeSnapshotLocation + volumeSnapshotterGetter volumeSnapshotterGetter + want []*test.APIResource + }{ + { + name: "when a PV with a reclaim policy of delete has no snapshot and does not exist in-cluster, it does not get restored, and its PVC gets reset for dynamic provisioning", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete).ClaimRef("ns-1", "pvc-1").Result(), + ). + addItems("persistentvolumeclaims", + builder.ForPersistentVolumeClaim("ns-1", "pvc-1"). + VolumeName("pv-1"). + ObjectMeta( + builder.WithAnnotations("pv.kubernetes.io/bind-completed", "true", "pv.kubernetes.io/bound-by-controller", "true", "foo", "bar"), + ). + Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.PVs(), + test.PVCs(), + }, + want: []*test.APIResource{ + test.PVs(), + test.PVCs( + builder.ForPersistentVolumeClaim("ns-1", "pvc-1"). + ObjectMeta( + builder.WithAnnotations("foo", "bar"), + builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"), + ). + Result(), + ), + }, + }, + { + name: "when a PV with a reclaim policy of retain has no snapshot and does not exist in-cluster, it gets restored, without its claim ref", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain).ClaimRef("ns-1", "pvc-1").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.PVs(), + test.PVCs(), + }, + want: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + ObjectMeta( + builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"), + ). + Result(), + ), + }, + }, + { + name: "when a PV with a reclaim policy of delete has a snapshot and does not exist in-cluster, the snapshot and PV are restored", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1").ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete).AWSEBSVolumeID("old-volume").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.PVs(), + test.PVCs(), + }, + volumeSnapshots: []*volume.Snapshot{ + { + Spec: volume.SnapshotSpec{ + BackupName: "backup-1", + Location: "default", + PersistentVolumeName: "pv-1", + }, + Status: volume.SnapshotStatus{ + Phase: volume.SnapshotPhaseCompleted, + ProviderSnapshotID: "snapshot-1", + }, }, - restore: &api.Restore{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: api.DefaultNamespace, - Name: "my-restore", + }, + volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(), + }, + volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{ + "provider-1": &volumeSnapshotter{ + snapshotVolumes: map[string]string{"snapshot-1": "new-volume"}, + }, + }, + want: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete). + AWSEBSVolumeID("new-volume"). + ObjectMeta( + builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"), + ). + Result(), + ), + }, + }, + { + name: "when a PV with a reclaim policy of retain has a snapshot and does not exist in-cluster, the snapshot and PV are restored", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + AWSEBSVolumeID("old-volume"). + Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.PVs(), + test.PVCs(), + }, + volumeSnapshots: []*volume.Snapshot{ + { + Spec: volume.SnapshotSpec{ + BackupName: "backup-1", + Location: "default", + PersistentVolumeName: "pv-1", + }, + Status: volume.SnapshotStatus{ + Phase: volume.SnapshotPhaseCompleted, + ProviderSnapshotID: "snapshot-1", }, }, - backup: backup, - log: velerotest.NewLogger(), - pvsToProvision: sets.NewString(), - pvRestorer: pvRestorer, - namespaceClient: nsClient, - applicableActions: make(map[schema.GroupResource][]resolvedAction), - resourceClients: make(map[resourceClientKey]pkgclient.Dynamic), - restoredItems: make(map[velero.ResourceIdentifier]struct{}), - } - - if test.haveSnapshot { - ctx.volumeSnapshots = append(ctx.volumeSnapshots, &volume.Snapshot{ + }, + volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(), + }, + volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{ + "provider-1": &volumeSnapshotter{ + snapshotVolumes: map[string]string{"snapshot-1": "new-volume"}, + }, + }, + want: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + AWSEBSVolumeID("new-volume"). + ObjectMeta( + builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"), + ). + Result(), + ), + }, + }, + { + name: "when a PV with a reclaim policy of delete has a snapshot and exists in-cluster, neither the snapshot nor the PV are restored", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete). + AWSEBSVolumeID("old-volume"). + Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete). + AWSEBSVolumeID("old-volume"). + Result(), + ), + test.PVCs(), + }, + volumeSnapshots: []*volume.Snapshot{ + { Spec: volume.SnapshotSpec{ - PersistentVolumeName: "pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce", + BackupName: "backup-1", + Location: "default", + PersistentVolumeName: "pv-1", }, Status: volume.SnapshotStatus{ - ProviderSnapshotID: "snap", + Phase: volume.SnapshotPhaseCompleted, + ProviderSnapshotID: "snapshot-1", }, - }) - } - - unstructuredPVMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(pvObj) - require.NoError(t, err) - unstructuredPV := &unstructured.Unstructured{Object: unstructuredPVMap} - - if test.expectPVFound { - // Copy the PV so that later modifcations don't affect what's returned by our faked calls. - inClusterPV := unstructuredPV.DeepCopy() - pvClient.On("Get", inClusterPV.GetName(), metav1.GetOptions{}).Return(inClusterPV, nil) - pvClient.On("Create", mock.Anything).Return(inClusterPV, k8serrors.NewAlreadyExists(kuberesource.PersistentVolumes, inClusterPV.GetName())) - inClusterPVC := unstructuredPVC.DeepCopy() - pvcClient.On("Get", pvcObj.Name, mock.Anything).Return(inClusterPVC, nil) - } + }, + }, + volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(), + }, + volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{ + // the volume snapshotter fake is not configured with any snapshotID -> volumeID + // mappings as a way to verify that the snapshot is not restored, since if it were + // restored, we'd get an error of "snapshot not found". + "provider-1": &volumeSnapshotter{}, + }, + want: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimDelete). + AWSEBSVolumeID("old-volume"). + Result(), + ), + }, + }, + { + name: "when a PV with a reclaim policy of retain has a snapshot and exists in-cluster, neither the snapshot nor the PV are restored", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + AWSEBSVolumeID("old-volume"). + Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + AWSEBSVolumeID("old-volume"). + Result(), + ), + test.PVCs(), + }, + volumeSnapshots: []*volume.Snapshot{ + { + Spec: volume.SnapshotSpec{ + BackupName: "backup-1", + Location: "default", + PersistentVolumeName: "pv-1", + }, + Status: volume.SnapshotStatus{ + Phase: volume.SnapshotPhaseCompleted, + ProviderSnapshotID: "snapshot-1", + }, + }, + }, + volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(), + }, + volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{ + // the volume snapshotter fake is not configured with any snapshotID -> volumeID + // mappings as a way to verify that the snapshot is not restored, since if it were + // restored, we'd get an error of "snapshot not found". + "provider-1": &volumeSnapshotter{}, + }, + want: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + AWSEBSVolumeID("old-volume"). + Result(), + ), + }, + }, + { + name: "when a PV with a snapshot is used by a PVC in a namespace that's being remapped, and the original PV exists in-cluster, the PV is renamed", + restore: defaultRestore().NamespaceMappings("source-ns", "target-ns").Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems( + "persistentvolumes", + builder.ForPersistentVolume("source-pv").AWSEBSVolumeID("source-volume").ClaimRef("source-ns", "pvc-1").Result(), + ). + addItems( + "persistentvolumeclaims", + builder.ForPersistentVolumeClaim("source-ns", "pvc-1").VolumeName("source-pv").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("source-pv").AWSEBSVolumeID("source-volume").ClaimRef("source-ns", "pvc-1").Result(), + ), + test.PVCs(), + }, + volumeSnapshots: []*volume.Snapshot{ + { + Spec: volume.SnapshotSpec{ + BackupName: "backup-1", + Location: "default", + PersistentVolumeName: "source-pv", + }, + Status: volume.SnapshotStatus{ + Phase: volume.SnapshotPhaseCompleted, + ProviderSnapshotID: "snapshot-1", + }, + }, + }, + volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(), + }, + volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{ + "provider-1": &volumeSnapshotter{ + snapshotVolumes: map[string]string{"snapshot-1": "new-volume"}, + }, + }, + want: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("source-pv").AWSEBSVolumeID("source-volume").ClaimRef("source-ns", "pvc-1").Result(), + // note that the renamed PV is not expected to have a claimRef in this test; that would be + // added after creation by the Kubernetes PV/PVC controller when it does a bind. + builder.ForPersistentVolume("renamed-source-pv"). + ObjectMeta( + builder.WithAnnotations("velero.io/original-pv-name", "source-pv"), + builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"), + ). + AWSEBSVolumeID("new-volume"). + Result(), + ), + test.PVCs( + builder.ForPersistentVolumeClaim("target-ns", "pvc-1"). + ObjectMeta( + builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"), + ). + VolumeName("renamed-source-pv"). + Result(), + ), + }, + }, + { + name: "when a PV with a snapshot is used by a PVC in a namespace that's being remapped, and the original PV does not exist in-cluster, the PV is not renamed", + restore: defaultRestore().NamespaceMappings("source-ns", "target-ns").Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems( + "persistentvolumes", + builder.ForPersistentVolume("source-pv").AWSEBSVolumeID("source-volume").ClaimRef("source-ns", "pvc-1").Result(), + ). + addItems( + "persistentvolumeclaims", + builder.ForPersistentVolumeClaim("source-ns", "pvc-1").VolumeName("source-pv").Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.PVs(), + test.PVCs(), + }, + volumeSnapshots: []*volume.Snapshot{ + { + Spec: volume.SnapshotSpec{ + BackupName: "backup-1", + Location: "default", + PersistentVolumeName: "source-pv", + }, + Status: volume.SnapshotStatus{ + Phase: volume.SnapshotPhaseCompleted, + ProviderSnapshotID: "snapshot-1", + }, + }, + }, + volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ + builder.ForVolumeSnapshotLocation(velerov1api.DefaultNamespace, "default").Provider("provider-1").Result(), + }, + volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{ + "provider-1": &volumeSnapshotter{ + snapshotVolumes: map[string]string{"snapshot-1": "new-volume"}, + }, + }, + want: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("source-pv"). + ObjectMeta( + builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"), + ). + AWSEBSVolumeID("new-volume"). + Result(), + ), + test.PVCs( + builder.ForPersistentVolumeClaim("target-ns", "pvc-1"). + ObjectMeta( + builder.WithLabels("velero.io/backup-name", "backup-1", "velero.io/restore-name", "restore-1"), + ). + VolumeName("source-pv"). + Result(), + ), + }, + }, + { + name: "when a PV with a reclaim policy of retain has a snapshot and exists in-cluster, neither the snapshot nor the PV are restored", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + tarball: newTarWriter(t). + addItems("persistentvolumes", + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + AWSEBSVolumeID("old-volume"). + Result(), + ). + done(), + apiResources: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + AWSEBSVolumeID("old-volume"). + Result(), + ), + test.PVCs(), + }, + volumeSnapshots: []*volume.Snapshot{ + { + Spec: volume.SnapshotSpec{ + BackupName: "backup-1", + Location: "default", + PersistentVolumeName: "pv-1", + }, + Status: volume.SnapshotStatus{ + Phase: volume.SnapshotPhaseCompleted, + ProviderSnapshotID: "snapshot-1", + }, + }, + }, + volumeSnapshotLocations: []*velerov1api.VolumeSnapshotLocation{ + { + ObjectMeta: metav1.ObjectMeta{ + Namespace: velerov1api.DefaultNamespace, + Name: "default", + }, + Spec: velerov1api.VolumeSnapshotLocationSpec{ + Provider: "provider-1", + }, + }, + }, + volumeSnapshotterGetter: map[string]velero.VolumeSnapshotter{ + // the volume snapshotter fake is not configured with any snapshotID -> volumeID + // mappings as a way to verify that the snapshot is not restored, since if it were + // restored, we'd get an error of "snapshot not found". + "provider-1": &volumeSnapshotter{}, + }, - // Only set up the client expectation if the test has the proper prerequisites - if test.haveSnapshot || test.reclaimPolicy != "Delete" { - pvClient.On("Get", unstructuredPV.GetName(), metav1.GetOptions{}).Return(&unstructured.Unstructured{}, k8serrors.NewNotFound(schema.GroupResource{Resource: "persistentvolumes"}, unstructuredPV.GetName())) - } + want: []*test.APIResource{ + test.PVs( + builder.ForPersistentVolume("pv-1"). + ReclaimPolicy(corev1api.PersistentVolumeReclaimRetain). + AWSEBSVolumeID("old-volume"). + Result(), + ), + }, + }, + } - pvToRestore := unstructuredPV.DeepCopy() - restoredPV := unstructuredPV.DeepCopy() + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newHarness(t) + h.restorer.resourcePriorities = []string{"persistentvolumes", "persistentvolumeclaims"} + h.restorer.pvRenamer = func(oldName string) string { return "renamed-" + oldName } - if test.expectPVCreation { - // just to ensure we have the data flowing correctly - restoredPV.Object["foo"] = "bar" - pvRestorer.On("executePVAction", pvToRestore).Return(restoredPV, nil) + // set up the VolumeSnapshotLocation informer/lister and add test data to it + vslInformer := velerov1informers.NewSharedInformerFactory(h.VeleroClient, 0).Velero().V1().VolumeSnapshotLocations() + for _, vsl := range tc.volumeSnapshotLocations { + require.NoError(t, vslInformer.Informer().GetStore().Add(vsl)) } - resetMetadataAndStatus(unstructuredPV) - addRestoreLabels(unstructuredPV, ctx.restore.Name, ctx.restore.Spec.BackupName) - unstructuredPV.Object["foo"] = "bar" - - if test.expectPVCreation { - createdPV := unstructuredPV.DeepCopy() - pvClient.On("Create", unstructuredPV).Return(createdPV, nil) + for _, r := range tc.apiResources { + h.addItems(t, r) } - // Restore PV - warnings, errors := ctx.restoreResource("persistentvolumes", "", "foo/resources/persistentvolumes/cluster/") + // Collect the IDs of all of the wanted resources so we can ensure the + // exact set exists in the API after restore. + wantIDs := make(map[*test.APIResource][]string) + for i, resource := range tc.want { + wantIDs[tc.want[i]] = []string{} - assert.Empty(t, warnings.Velero) - assert.Empty(t, warnings.Namespaces) - assert.Equal(t, Result{}, errors) - assert.Empty(t, warnings.Cluster) - - // Prep PVC restore - // Handle expectations - if !test.expectPVCVolumeName { - pvcObj.Spec.VolumeName = "" - } - for _, key := range test.expectedPVCAnnotationsMissing.List() { - delete(pvcObj.Annotations, key) + for _, item := range resource.Items { + wantIDs[tc.want[i]] = append(wantIDs[tc.want[i]], fmt.Sprintf("%s/%s", item.GetNamespace(), item.GetName())) + } } - // Recreate the unstructured PVC since the object was edited. - unstructuredPVCMap, err = runtime.DefaultUnstructuredConverter.ToUnstructured(pvcObj) - require.NoError(t, err) - unstructuredPVC = &unstructured.Unstructured{Object: unstructuredPVCMap} - - resetMetadataAndStatus(unstructuredPVC) - addRestoreLabels(unstructuredPVC, ctx.restore.Name, ctx.restore.Spec.BackupName) - - createdPVC := unstructuredPVC.DeepCopy() - // just to ensure we have the data flowing correctly - createdPVC.Object["foo"] = "bar" - - pvcClient.On("Create", unstructuredPVC).Return(createdPVC, nil) - - // Restore PVC - warnings, errors = ctx.restoreResource("persistentvolumeclaims", "default", "foo/resources/persistentvolumeclaims/default/") - - assert.Empty(t, warnings.Velero) - assert.Empty(t, warnings.Cluster) - assert.Empty(t, warnings.Namespaces) - assert.Equal(t, Result{}, errors) + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + VolumeSnapshots: tc.volumeSnapshots, + BackupReader: tc.tarball, + } + warnings, errs := h.restorer.Restore( + data, + nil, // actions + vslInformer.Lister(), + tc.volumeSnapshotterGetter, + ) + + assertEmptyResults(t, warnings, errs) + assertAPIContents(t, h, wantIDs) + assertRestoredItems(t, h, tc.want) }) } } -type mockPVRestorer struct { - mock.Mock +type fakeResticRestorerFactory struct { + restorer *resticmocks.Restorer } -func (r *mockPVRestorer) executePVAction(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) { - args := r.Called(obj) - return args.Get(0).(*unstructured.Unstructured), args.Error(1) +func (f *fakeResticRestorerFactory) NewRestorer(ctx.Context, *velerov1api.Restore) (restic.Restorer, error) { + return f.restorer, nil } -type mockWatch struct { - mock.Mock -} +// TestRestoreWithRestic verifies that a call to RestorePodVolumes was made as and when +// expected for the given pods by using a mock for the restic restorer. +func TestRestoreWithRestic(t *testing.T) { + tests := []struct { + name string + restore *velerov1api.Restore + backup *velerov1api.Backup + apiResources []*test.APIResource + podVolumeBackups []*velerov1api.PodVolumeBackup + podWithPVBs, podWithoutPVBs []*corev1api.Pod + want map[*test.APIResource][]string + }{ + { + name: "a pod that exists in given backup and contains associated PVBs should have should have RestorePodVolumes called", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + apiResources: []*test.APIResource{test.Pods()}, + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("pod-1").Result(), + builder.ForPodVolumeBackup("velero", "pvb-2").PodName("pod-2").Result(), + builder.ForPodVolumeBackup("velero", "pvb-3").PodName("pod-4").Result(), + }, + podWithPVBs: []*corev1api.Pod{ + builder.ForPod("ns-1", "pod-2"). + Result(), + builder.ForPod("ns-2", "pod-4"). + Result(), + }, + podWithoutPVBs: []*corev1api.Pod{ + builder.ForPod("ns-2", "pod-3"). + Result(), + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-2", "ns-2/pod-3", "ns-2/pod-4"}, + }, + }, + { + name: "a pod that exists in given backup but does not contain associated PVBs should not have should have RestorePodVolumes called", + restore: defaultRestore().Result(), + backup: defaultBackup().Result(), + apiResources: []*test.APIResource{test.Pods()}, + podVolumeBackups: []*velerov1api.PodVolumeBackup{ + builder.ForPodVolumeBackup("velero", "pvb-1").PodName("pod-1").Result(), + builder.ForPodVolumeBackup("velero", "pvb-2").PodName("pod-2").Result(), + }, + podWithPVBs: []*corev1api.Pod{}, + podWithoutPVBs: []*corev1api.Pod{ + builder.ForPod("ns-1", "pod-3"). + Result(), + builder.ForPod("ns-2", "pod-4"). + Result(), + }, + want: map[*test.APIResource][]string{ + test.Pods(): {"ns-1/pod-3", "ns-2/pod-4"}, + }, + }, + } -func (w *mockWatch) Stop() { - w.Called() -} + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newHarness(t) + restorer := new(resticmocks.Restorer) + defer restorer.AssertExpectations(t) + h.restorer.resticRestorerFactory = &fakeResticRestorerFactory{ + restorer: restorer, + } -func (w *mockWatch) ResultChan() <-chan watch.Event { - args := w.Called() - return args.Get(0).(chan watch.Event) -} + // needed only to indicate resource types that can be restored, in this case, pods + for _, resource := range tc.apiResources { + h.addItems(t, resource) + } + + tarball := newTarWriter(t) + + // these backed up pods don't have any PVBs associated with them, so a call to RestorePodVolumes is not expected to be made for them + for _, pod := range tc.podWithoutPVBs { + tarball.addItems("pods", pod) + } + + // these backed up pods have PVBs associated with them, so a call to RestorePodVolumes will be made for each of them + for _, pod := range tc.podWithPVBs { + tarball.addItems("pods", pod) + + // the restore process adds these labels before restoring, so we must add them here too otherwise they won't match + pod.Labels = map[string]string{"velero.io/backup-name": tc.backup.Name, "velero.io/restore-name": tc.restore.Name} + expectedArgs := restic.RestoreData{ + Restore: tc.restore, + Pod: pod, + PodVolumeBackups: tc.podVolumeBackups, + SourceNamespace: pod.Namespace, + BackupLocation: "", + } + restorer. + On("RestorePodVolumes", expectedArgs). + Return(nil) + } -type fakeWatch struct{} + data := Request{ + Log: h.log, + Restore: tc.restore, + Backup: tc.backup, + PodVolumeBackups: tc.podVolumeBackups, + BackupReader: tarball.done(), + } -func (w *fakeWatch) Stop() {} + warnings, errs := h.restorer.Restore( + data, + nil, // actions + nil, // snapshot location lister + nil, // volume snapshotter getter + ) -func (w *fakeWatch) ResultChan() <-chan watch.Event { - return make(chan watch.Event) + assertEmptyResults(t, warnings, errs) + assertAPIContents(t, h, tc.want) + }) + } } -func TestHasControllerOwner(t *testing.T) { +func TestPrioritizeResources(t *testing.T) { tests := []struct { - name string - object map[string]interface{} - expectOwner bool + name string + apiResources map[string][]string + priorities []string + includes []string + excludes []string + expected []string }{ { - name: "missing metadata", - object: map[string]interface{}{}, - }, - { - name: "missing ownerReferences", - object: map[string]interface{}{ - "metadata": map[string]interface{}{}, - }, - expectOwner: false, - }, - { - name: "have ownerReferences, no controller fields", - object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "ownerReferences": []interface{}{ - map[string]interface{}{"foo": "bar"}, - }, - }, + name: "priorities & ordering are correctly applied", + apiResources: map[string][]string{ + "v1": {"aaa", "bbb", "configmaps", "ddd", "namespaces", "ooo", "pods", "sss"}, }, - expectOwner: false, + priorities: []string{"namespaces", "configmaps", "pods"}, + includes: []string{"*"}, + expected: []string{"namespaces", "configmaps", "pods", "aaa", "bbb", "ddd", "ooo", "sss"}, }, { - name: "have ownerReferences, controller=false", - object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "ownerReferences": []interface{}{ - map[string]interface{}{"controller": false}, - }, - }, + name: "includes are correctly applied", + apiResources: map[string][]string{ + "v1": {"aaa", "bbb", "configmaps", "ddd", "namespaces", "ooo", "pods", "sss"}, }, - expectOwner: false, + priorities: []string{"namespaces", "configmaps", "pods"}, + includes: []string{"namespaces", "aaa", "sss"}, + expected: []string{"namespaces", "aaa", "sss"}, }, { - name: "have ownerReferences, controller=true", - object: map[string]interface{}{ - "metadata": map[string]interface{}{ - "ownerReferences": []interface{}{ - map[string]interface{}{"controller": false}, - map[string]interface{}{"controller": false}, - map[string]interface{}{"controller": true}, - }, - }, + name: "excludes are correctly applied", + apiResources: map[string][]string{ + "v1": {"aaa", "bbb", "configmaps", "ddd", "namespaces", "ooo", "pods", "sss"}, }, - expectOwner: true, + priorities: []string{"namespaces", "configmaps", "pods"}, + includes: []string{"*"}, + excludes: []string{"ooo", "pods"}, + expected: []string{"namespaces", "configmaps", "aaa", "bbb", "ddd", "sss"}, }, } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - u := &unstructured.Unstructured{Object: test.object} - hasOwner := hasControllerOwner(u.GetOwnerReferences()) - assert.Equal(t, test.expectOwner, hasOwner) + logger := testutil.NewLogger() + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + discoveryClient := &test.DiscoveryClient{ + FakeDiscovery: kubefake.NewSimpleClientset().Discovery().(*discoveryfake.FakeDiscovery), + } + + helper, err := discovery.NewHelper(discoveryClient, logger) + require.NoError(t, err) + + // add all the test case's API resources to the discovery client + for gvString, resources := range tc.apiResources { + gv, err := schema.ParseGroupVersion(gvString) + require.NoError(t, err) + + for _, resource := range resources { + discoveryClient.WithAPIResource(&test.APIResource{ + Group: gv.Group, + Version: gv.Version, + Name: resource, + }) + } + } + + require.NoError(t, helper.Refresh()) + + includesExcludes := collections.NewIncludesExcludes().Includes(tc.includes...).Excludes(tc.excludes...) + + result, err := prioritizeResources(helper, tc.priorities, includesExcludes, logger) + require.NoError(t, err) + + require.Equal(t, len(tc.expected), len(result)) + + for i := range result { + if e, a := tc.expected[i], result[i].Resource; e != a { + t.Errorf("index %d, expected %s, got %s", i, e, a) + } + } }) } } @@ -1294,7 +2450,7 @@ func TestResetMetadataAndStatus(t *testing.T) { }{ { name: "no metadata causes error", - obj: NewTestUnstructured().Unstructured, + obj: &unstructured.Unstructured{}, expectedErr: true, }, { @@ -1381,7 +2537,7 @@ func TestIsCompleted(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - u := velerotest.UnstructuredOrDie(test.content) + u := testutil.UnstructuredOrDie(test.content) backup, err := isCompleted(u, test.groupResource) if assert.Equal(t, test.expectedErr, err != nil) { @@ -1391,241 +2547,6 @@ func TestIsCompleted(t *testing.T) { } } -func TestShouldRestore(t *testing.T) { - pv := `apiVersion: v1 -kind: PersistentVolume -metadata: - annotations: - EXPORT_block: "\nEXPORT\n{\n\tExport_Id = 1;\n\tPath = /export/pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce;\n\tPseudo - = /export/pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce;\n\tAccess_Type = RW;\n\tSquash - = no_root_squash;\n\tSecType = sys;\n\tFilesystem_id = 1.1;\n\tFSAL {\n\t\tName - = VFS;\n\t}\n}\n" - Export_Id: "1" - Project_Id: "0" - Project_block: "" - Provisioner_Id: 5fdf4025-78a5-11e8-9ece-0242ac110004 - kubernetes.io/createdby: nfs-dynamic-provisioner - pv.kubernetes.io/provisioned-by: example.com/nfs - volume.beta.kubernetes.io/mount-options: vers=4.1 - creationTimestamp: 2018-06-25T18:27:35Z - finalizers: - - kubernetes.io/pv-protection - name: pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce - resourceVersion: "2576" - selfLink: /api/v1/persistentvolumes/pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce - uid: 6ecd24e4-78a5-11e8-a0d8-e2ad1e9734ce -spec: - accessModes: - - ReadWriteMany - capacity: - storage: 1Mi - claimRef: - apiVersion: v1 - kind: PersistentVolumeClaim - name: nfs - namespace: default - resourceVersion: "2565" - uid: 6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce - nfs: - path: /export/pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce - server: 10.103.235.254 - storageClassName: example-nfs -status: - phase: Bound` - - pvc := `apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - annotations: - control-plane.alpha.kubernetes.io/leader: '{"holderIdentity":"5fdf5572-78a5-11e8-9ece-0242ac110004","leaseDurationSeconds":15,"acquireTime":"2018-06-25T18:27:35Z","renewTime":"2018-06-25T18:27:37Z","leaderTransitions":0}' - kubectl.kubernetes.io/last-applied-configuration: | - {"apiVersion":"v1","kind":"PersistentVolumeClaim","metadata":{"annotations":{},"name":"nfs","namespace":"default"},"spec":{"accessModes":["ReadWriteMany"],"resources":{"requests":{"storage":"1Mi"}},"storageClassName":"example-nfs"}} - pv.kubernetes.io/bind-completed: "yes" - pv.kubernetes.io/bound-by-controller: "yes" - volume.beta.kubernetes.io/storage-provisioner: example.com/nfs - creationTimestamp: 2018-06-25T18:27:28Z - finalizers: - - kubernetes.io/pvc-protection - name: nfs - namespace: default - resourceVersion: "2578" - selfLink: /api/v1/namespaces/default/persistentvolumeclaims/nfs - uid: 6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce -spec: - accessModes: - - ReadWriteMany - resources: - requests: - storage: 1Mi - storageClassName: example-nfs - volumeName: pvc-6a74b5af-78a5-11e8-a0d8-e2ad1e9734ce -status: - accessModes: - - ReadWriteMany - capacity: - storage: 1Mi - phase: Bound` - - tests := []struct { - name string - expectNSFound bool - expectPVFound bool - pvPhase string - expectPVCFound bool - expectPVCGet bool - expectPVCDeleting bool - expectNSGet bool - expectNSDeleting bool - nsPhase v1.NamespacePhase - expectedResult bool - }{ - { - name: "pv not found, no associated pvc or namespace", - expectedResult: true, - }, - { - name: "pv found, phase released", - pvPhase: string(v1.VolumeReleased), - expectPVFound: true, - expectedResult: false, - }, - { - name: "pv found, has associated pvc and namespace that's aren't deleting", - expectPVFound: true, - expectPVCGet: true, - expectNSGet: true, - expectPVCFound: true, - expectedResult: false, - }, - { - name: "pv found, has associated pvc that's deleting, don't look up namespace", - expectPVFound: true, - expectPVCGet: true, - expectPVCFound: true, - expectPVCDeleting: true, - expectedResult: false, - }, - { - name: "pv found, has associated pvc that's not deleting, has associated namespace that's terminating", - expectPVFound: true, - expectPVCGet: true, - expectPVCFound: true, - expectNSGet: true, - expectNSFound: true, - nsPhase: v1.NamespaceTerminating, - expectedResult: false, - }, - { - name: "pv found, has associated pvc that's not deleting, has associated namespace that has deletion timestamp", - expectPVFound: true, - expectPVCGet: true, - expectPVCFound: true, - expectNSGet: true, - expectNSFound: true, - expectNSDeleting: true, - expectedResult: false, - }, - { - name: "pv found, associated pvc not found, namespace not queried", - expectPVFound: true, - expectPVCGet: true, - expectedResult: false, - }, - { - name: "pv found, associated pvc found, namespace not found", - expectPVFound: true, - expectPVCGet: true, - expectPVCFound: true, - expectNSGet: true, - expectedResult: false, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - dynamicFactory := &velerotest.FakeDynamicFactory{} - gv := schema.GroupVersion{Group: "", Version: "v1"} - - pvClient := &velerotest.FakeDynamicClient{} - defer pvClient.AssertExpectations(t) - - pvResource := metav1.APIResource{Name: "persistentvolumes", Namespaced: false} - dynamicFactory.On("ClientForGroupVersionResource", gv, pvResource, "").Return(pvClient, nil) - - pvcClient := &velerotest.FakeDynamicClient{} - defer pvcClient.AssertExpectations(t) - - pvcResource := metav1.APIResource{Name: "persistentvolumeclaims", Namespaced: true} - dynamicFactory.On("ClientForGroupVersionResource", gv, pvcResource, "default").Return(pvcClient, nil) - - obj, _, err := scheme.Codecs.UniversalDecoder(v1.SchemeGroupVersion).Decode([]byte(pv), nil, &unstructured.Unstructured{}) - pvObj := obj.(*unstructured.Unstructured) - require.NoError(t, err) - - obj, _, err = scheme.Codecs.UniversalDecoder(v1.SchemeGroupVersion).Decode([]byte(pvc), nil, &unstructured.Unstructured{}) - pvcObj := obj.(*unstructured.Unstructured) - require.NoError(t, err) - - nsClient := &velerotest.FakeNamespaceClient{} - defer nsClient.AssertExpectations(t) - ns := newTestNamespace(pvcObj.GetNamespace()).Namespace - - // Set up test expectations - if test.pvPhase != "" { - require.NoError(t, unstructured.SetNestedField(pvObj.Object, test.pvPhase, "status", "phase")) - } - - if test.expectPVFound { - pvClient.On("Get", pvObj.GetName(), metav1.GetOptions{}).Return(pvObj, nil) - } else { - pvClient.On("Get", pvObj.GetName(), metav1.GetOptions{}).Return(&unstructured.Unstructured{}, k8serrors.NewNotFound(schema.GroupResource{Resource: "persistentvolumes"}, pvObj.GetName())) - } - - if test.expectPVCDeleting { - pvcObj.SetDeletionTimestamp(&metav1.Time{Time: time.Now()}) - } - - // the pv needs to be found before moving on to look for pvc/namespace - // however, even if the pv is found, we may be testing the PV's phase and not expecting - // the pvc/namespace to be looked up - if test.expectPVCGet { - if test.expectPVCFound { - pvcClient.On("Get", pvcObj.GetName(), metav1.GetOptions{}).Return(pvcObj, nil) - } else { - pvcClient.On("Get", pvcObj.GetName(), metav1.GetOptions{}).Return(&unstructured.Unstructured{}, k8serrors.NewNotFound(schema.GroupResource{Resource: "persistentvolumeclaims"}, pvcObj.GetName())) - } - } - - if test.nsPhase != "" { - ns.Status.Phase = test.nsPhase - } - - if test.expectNSDeleting { - ns.SetDeletionTimestamp(&metav1.Time{Time: time.Now()}) - } - - if test.expectNSGet { - if test.expectNSFound { - nsClient.On("Get", pvcObj.GetNamespace(), mock.Anything).Return(ns, nil) - } else { - nsClient.On("Get", pvcObj.GetNamespace(), metav1.GetOptions{}).Return(&v1.Namespace{}, k8serrors.NewNotFound(schema.GroupResource{Resource: "namespaces"}, pvcObj.GetNamespace())) - } - } - - ctx := &context{ - dynamicFactory: dynamicFactory, - log: velerotest.NewLogger(), - namespaceClient: nsClient, - resourceTerminatingTimeout: 1 * time.Millisecond, - } - - result, err := ctx.shouldRestore(pvObj.GetName(), pvClient) - - assert.Equal(t, test.expectedResult, result) - }) - } -} - func TestGetItemFilePath(t *testing.T) { res := getItemFilePath("root", "resource", "", "item") assert.Equal(t, "root/resources/resource/cluster/item.json", res) @@ -1634,342 +2555,233 @@ func TestGetItemFilePath(t *testing.T) { assert.Equal(t, "root/resources/resource/namespaces/namespace/item.json", res) } -type testUnstructured struct { - *unstructured.Unstructured -} - -func NewTestUnstructured() *testUnstructured { - obj := &testUnstructured{ - Unstructured: &unstructured.Unstructured{ - Object: make(map[string]interface{}), - }, - } - - return obj -} - -func (obj *testUnstructured) WithAPIVersion(v string) *testUnstructured { - obj.Object["apiVersion"] = v - return obj -} - -func (obj *testUnstructured) WithKind(k string) *testUnstructured { - obj.Object["kind"] = k - return obj -} - -func (obj *testUnstructured) WithMetadata(fields ...string) *testUnstructured { - return obj.withMap("metadata", fields...) -} - -func (obj *testUnstructured) WithSpec(fields ...string) *testUnstructured { - if _, found := obj.Object["spec"]; found { - panic("spec already set - you probably didn't mean to do this twice!") - } - return obj.withMap("spec", fields...) -} - -func (obj *testUnstructured) WithStatus(fields ...string) *testUnstructured { - return obj.withMap("status", fields...) -} - -func (obj *testUnstructured) WithMetadataField(field string, value interface{}) *testUnstructured { - return obj.withMapEntry("metadata", field, value) -} - -func (obj *testUnstructured) WithSpecField(field string, value interface{}) *testUnstructured { - return obj.withMapEntry("spec", field, value) -} - -func (obj *testUnstructured) WithStatusField(field string, value interface{}) *testUnstructured { - return obj.withMapEntry("status", field, value) -} +// assertResourceCreationOrder ensures that resources were created in the expected +// order. Any resources *not* in resourcePriorities are required to come *after* all +// resources in any order. +func assertResourceCreationOrder(t *testing.T, resourcePriorities []string, createdResources []resourceID) { + // lastSeen tracks the index in 'resourcePriorities' of the last resource type + // we saw created. Once we've seen a resource in 'resourcePriorities', we should + // never see another instance of a prior resource. + lastSeen := 0 + + // Find the index in 'resourcePriorities' of the resource type for + // the current item, if it exists. This index ('current') *must* + // be greater than or equal to 'lastSeen', which was the last resource + // we saw, since otherwise the current resource would be out of order. By + // initializing current to len(ordered), we're saying that if the resource + // is not explicitly in orderedResources, then it must come *after* + // all orderedResources. + for _, r := range createdResources { + current := len(resourcePriorities) + for i, item := range resourcePriorities { + if item == r.groupResource { + current = i + break + } + } -func (obj *testUnstructured) WithAnnotations(fields ...string) *testUnstructured { - vals := map[string]string{} - for _, field := range fields { - vals[field] = "foo" + // the index of the current resource must be the same as or greater than the index of + // the last resource we saw for the restored order to be correct. + assert.True(t, current >= lastSeen, "%s was restored out of order", r.groupResource) + lastSeen = current } - - return obj.WithAnnotationValues(vals) } -func (obj *testUnstructured) WithAnnotationValues(fieldVals map[string]string) *testUnstructured { - annotations := make(map[string]interface{}) - for field, val := range fieldVals { - annotations[field] = val - } - - obj = obj.WithMetadataField("annotations", annotations) - - return obj +type resourceID struct { + groupResource string + nsAndName string } -func (obj *testUnstructured) WithNamespace(ns string) *testUnstructured { - return obj.WithMetadataField("namespace", ns) +// createRecorder provides a Reactor that can be used to capture +// resources created in a fake client. +type createRecorder struct { + t *testing.T + resources []resourceID } -func (obj *testUnstructured) WithName(name string) *testUnstructured { - return obj.WithMetadataField("name", name) -} +func (cr *createRecorder) reactor() func(kubetesting.Action) (bool, runtime.Object, error) { + return func(action kubetesting.Action) (bool, runtime.Object, error) { + createAction, ok := action.(kubetesting.CreateAction) + if !ok { + return false, nil, nil + } -func (obj *testUnstructured) ToJSON() []byte { - bytes, err := json.Marshal(obj.Object) - if err != nil { - panic(err) - } - return bytes -} + accessor, err := meta.Accessor(createAction.GetObject()) + assert.NoError(cr.t, err) -func (obj *testUnstructured) withMap(name string, fields ...string) *testUnstructured { - m := make(map[string]interface{}) - obj.Object[name] = m + cr.resources = append(cr.resources, resourceID{ + groupResource: action.GetResource().GroupResource().String(), + nsAndName: fmt.Sprintf("%s/%s", action.GetNamespace(), accessor.GetName()), + }) - for _, field := range fields { - m[field] = "foo" + return false, nil, nil } - - return obj } -func (obj *testUnstructured) withMapEntry(mapName, field string, value interface{}) *testUnstructured { - var m map[string]interface{} - - if res, ok := obj.Unstructured.Object[mapName]; !ok { - m = make(map[string]interface{}) - obj.Unstructured.Object[mapName] = m - } else { - m = res.(map[string]interface{}) - } - - m[field] = value - - return obj +func defaultRestore() *builder.RestoreBuilder { + return builder.ForRestore(velerov1api.DefaultNamespace, "restore-1").Backup("backup-1") } -func toUnstructured(objs ...runtime.Object) []unstructured.Unstructured { - res := make([]unstructured.Unstructured, 0, len(objs)) +// assertAPIContents asserts that the dynamic client on the provided harness contains +// all of the items specified in 'want' (a map from an APIResource definition to a slice +// of resource identifiers, formatted as /). +func assertAPIContents(t *testing.T, h *harness, want map[*test.APIResource][]string) { + t.Helper() - for _, obj := range objs { - jsonObj, err := json.Marshal(obj) + for r, want := range want { + res, err := h.DynamicClient.Resource(r.GVR()).List(metav1.ListOptions{}) + assert.NoError(t, err) if err != nil { - panic(err) + continue } - var unstructuredObj unstructured.Unstructured - - if err := json.Unmarshal(jsonObj, &unstructuredObj); err != nil { - panic(err) + got := sets.NewString() + for _, item := range res.Items { + got.Insert(fmt.Sprintf("%s/%s", item.GetNamespace(), item.GetName())) } - metadata := unstructuredObj.Object["metadata"].(map[string]interface{}) - - delete(metadata, "creationTimestamp") - - delete(unstructuredObj.Object, "status") - - res = append(res, unstructuredObj) + assert.Equal(t, sets.NewString(want...), got) } - - return res } -type testServiceAccount struct { - *v1.ServiceAccount -} +func assertEmptyResults(t *testing.T, res ...Result) { + t.Helper() -func newTestServiceAccount() *testServiceAccount { - return &testServiceAccount{ - ServiceAccount: &v1.ServiceAccount{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ServiceAccount", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: "ns-1", - Name: "test-sa", - CreationTimestamp: metav1.Time{Time: time.Now()}, - }, - }, + for _, r := range res { + assert.Empty(t, r.Cluster) + assert.Empty(t, r.Namespaces) + assert.Empty(t, r.Velero) } } -func (sa *testServiceAccount) WithImagePullSecret(name string) *testServiceAccount { - secret := v1.LocalObjectReference{Name: name} - sa.ImagePullSecrets = append(sa.ImagePullSecrets, secret) - return sa -} - -func (sa *testServiceAccount) WithSecret(name string) *testServiceAccount { - secret := v1.ObjectReference{Name: name} - sa.Secrets = append(sa.Secrets, secret) - return sa +type tarWriter struct { + t *testing.T + buf *bytes.Buffer + gzw *gzip.Writer + tw *tar.Writer } -func (sa *testServiceAccount) ToJSON() []byte { - bytes, _ := json.Marshal(sa.ServiceAccount) - return bytes -} +func newTarWriter(t *testing.T) *tarWriter { + tw := new(tarWriter) + tw.t = t + tw.buf = new(bytes.Buffer) + tw.gzw = gzip.NewWriter(tw.buf) + tw.tw = tar.NewWriter(tw.gzw) -type testPersistentVolume struct { - *v1.PersistentVolume + return tw } -func newTestPV() *testPersistentVolume { - return &testPersistentVolume{ - PersistentVolume: &v1.PersistentVolume{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "PersistentVolume", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "test-pv", - }, - Status: v1.PersistentVolumeStatus{}, - }, - } -} +func (tw *tarWriter) addItems(groupResource string, items ...metav1.Object) *tarWriter { + tw.t.Helper() -func (pv *testPersistentVolume) ToJSON() []byte { - bytes, _ := json.Marshal(pv.PersistentVolume) - return bytes -} + for _, obj := range items { -type testNamespace struct { - *v1.Namespace -} + var path string + if obj.GetNamespace() == "" { + path = fmt.Sprintf("resources/%s/cluster/%s.json", groupResource, obj.GetName()) + } else { + path = fmt.Sprintf("resources/%s/namespaces/%s/%s.json", groupResource, obj.GetNamespace(), obj.GetName()) + } -func newTestNamespace(name string) *testNamespace { - return &testNamespace{ - Namespace: &v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - }, + tw.add(path, obj) } -} -func (ns *testNamespace) ToJSON() []byte { - bytes, _ := json.Marshal(ns.Namespace) - return bytes + return tw } -type testConfigMap struct { - *v1.ConfigMap -} +func (tw *tarWriter) add(name string, obj interface{}) *tarWriter { + tw.t.Helper() -func newTestConfigMap() *testConfigMap { - return newNamedTestConfigMap("cm-1") -} + var data []byte + var err error -func newNamedTestConfigMap(name string) *testConfigMap { - return &testConfigMap{ - ConfigMap: &v1.ConfigMap{ - TypeMeta: metav1.TypeMeta{ - APIVersion: "v1", - Kind: "ConfigMap", - }, - ObjectMeta: metav1.ObjectMeta{ - Namespace: "ns-1", - Name: name, - }, - Data: map[string]string{ - "foo": "bar", - }, - }, + switch obj.(type) { + case runtime.Object: + data, err = encode.Encode(obj.(runtime.Object), "json") + case []byte: + data = obj.([]byte) + default: + data, err = json.Marshal(obj) } -} + require.NoError(tw.t, err) -func (cm *testConfigMap) WithNamespace(name string) *testConfigMap { - cm.Namespace = name - return cm -} - -func (cm *testConfigMap) WithLabels(labels map[string]string) *testConfigMap { - cm.Labels = labels - return cm -} + require.NoError(tw.t, tw.tw.WriteHeader(&tar.Header{ + Name: name, + Size: int64(len(data)), + Typeflag: tar.TypeReg, + Mode: 0755, + ModTime: time.Now(), + })) -func (cm *testConfigMap) WithControllerOwner() *testConfigMap { - t := true - ownerRef := metav1.OwnerReference{ - Controller: &t, - } - cm.ConfigMap.OwnerReferences = append(cm.ConfigMap.OwnerReferences, ownerRef) - return cm -} + _, err = tw.tw.Write(data) + require.NoError(tw.t, err) -func (cm *testConfigMap) ToJSON() []byte { - bytes, _ := json.Marshal(cm.ConfigMap) - return bytes + return tw } -type fakeAction struct { - resource string -} +func (tw *tarWriter) done() *bytes.Buffer { + require.NoError(tw.t, tw.tw.Close()) + require.NoError(tw.t, tw.gzw.Close()) -type fakeVolumeSnapshotterGetter struct { - fakeVolumeSnapshotter *velerotest.FakeVolumeSnapshotter - volumeMap map[velerotest.VolumeBackupInfo]string - volumeID string + return tw.buf } -func (r *fakeVolumeSnapshotterGetter) GetVolumeSnapshotter(provider string) (velero.VolumeSnapshotter, error) { - if r.fakeVolumeSnapshotter == nil { - r.fakeVolumeSnapshotter = &velerotest.FakeVolumeSnapshotter{ - RestorableVolumes: r.volumeMap, - VolumeID: r.volumeID, - } - } - return r.fakeVolumeSnapshotter, nil -} +type harness struct { + *test.APIServer -func newFakeAction(resource string) *fakeAction { - return &fakeAction{resource} + restorer *kubernetesRestorer + log logrus.FieldLogger } -func (r *fakeAction) AppliesTo() (velero.ResourceSelector, error) { - return velero.ResourceSelector{ - IncludedResources: []string{r.resource}, - }, nil -} +func newHarness(t *testing.T) *harness { + t.Helper() -func (r *fakeAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { - labels, found, err := unstructured.NestedMap(input.Item.UnstructuredContent(), "metadata", "labels") - if err != nil { - return nil, err - } - if !found { - labels = make(map[string]interface{}) - } + apiServer := test.NewAPIServer(t) + log := logrus.StandardLogger() - labels["fake-restorer"] = "foo" + discoveryHelper, err := discovery.NewHelper(apiServer.DiscoveryClient, log) + require.NoError(t, err) - if err := unstructured.SetNestedField(input.Item.UnstructuredContent(), labels, "metadata", "labels"); err != nil { - return nil, err + return &harness{ + APIServer: apiServer, + restorer: &kubernetesRestorer{ + discoveryHelper: discoveryHelper, + dynamicFactory: client.NewDynamicFactory(apiServer.DynamicClient), + namespaceClient: apiServer.KubeClient.CoreV1().Namespaces(), + resourceTerminatingTimeout: time.Minute, + logger: log, + fileSystem: testutil.NewFakeFileSystem(), + + // unsupported + resticRestorerFactory: nil, + resticTimeout: 0, + }, + log: log, } +} - unstructuredObj, ok := input.Item.(*unstructured.Unstructured) - if !ok { - return nil, errors.New("Unexpected type") - } +func (h *harness) addItems(t *testing.T, resource *test.APIResource) { + t.Helper() - // want the baseline functionality too - res, err := resetMetadataAndStatus(unstructuredObj) - if err != nil { - return nil, err - } + h.DiscoveryClient.WithAPIResource(resource) + require.NoError(t, h.restorer.discoveryHelper.Refresh()) - return velero.NewRestoreItemActionExecuteOutput(res), nil -} + for _, item := range resource.Items { + obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(item) + require.NoError(t, err) -type fakeNamespaceClient struct { - createdNamespaces []*v1.Namespace + unstructuredObj := &unstructured.Unstructured{Object: obj} - corev1.NamespaceInterface -} + // These fields have non-nil zero values in the unstructured objects. We remove + // them to make comparison easier in our tests. + unstructured.RemoveNestedField(unstructuredObj.Object, "metadata", "creationTimestamp") + unstructured.RemoveNestedField(unstructuredObj.Object, "status") -func (nsc *fakeNamespaceClient) Create(ns *v1.Namespace) (*v1.Namespace, error) { - nsc.createdNamespaces = append(nsc.createdNamespaces, ns) - return ns, nil + if resource.Namespaced { + _, err = h.DynamicClient.Resource(resource.GVR()).Namespace(item.GetNamespace()).Create(unstructuredObj, metav1.CreateOptions{}) + } else { + _, err = h.DynamicClient.Resource(resource.GVR()).Create(unstructuredObj, metav1.CreateOptions{}) + } + require.NoError(t, err) + } } diff --git a/pkg/restore/service_account_action_test.go b/pkg/restore/service_account_action_test.go index e63230b85bb..83e04841f4d 100644 --- a/pkg/restore/service_account_action_test.go +++ b/pkg/restore/service_account_action_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "github.com/heptio/velero/pkg/plugin/velero" - "github.com/heptio/velero/pkg/util/test" + "github.com/heptio/velero/pkg/test" ) func TestServiceAccountActionAppliesTo(t *testing.T) { diff --git a/pkg/restore/service_action_test.go b/pkg/restore/service_action_test.go index d4f338c13b5..ee8c8e1305d 100644 --- a/pkg/restore/service_action_test.go +++ b/pkg/restore/service_action_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "github.com/heptio/velero/pkg/plugin/velero" - velerotest "github.com/heptio/velero/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/test" ) func svcJSON(ports ...corev1api.ServicePort) string { diff --git a/pkg/serverstatusrequest/builder.go b/pkg/serverstatusrequest/builder.go deleted file mode 100644 index b7d4bebad4b..00000000000 --- a/pkg/serverstatusrequest/builder.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2018 the Velero contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package serverstatusrequest - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" -) - -type Builder struct { - serverStatusRequest velerov1api.ServerStatusRequest -} - -// NewBuilder returns a Builder for a ServerStatusRequest. -func NewBuilder() *Builder { - return &Builder{ - serverStatusRequest: velerov1api.ServerStatusRequest{ - TypeMeta: metav1.TypeMeta{ - APIVersion: velerov1api.SchemeGroupVersion.String(), - Kind: "ServerStatusRequest", - }, - }, - } -} - -// ServerStatusRequest returns the built ServerStatusRequest API object. -func (b *Builder) ServerStatusRequest() *velerov1api.ServerStatusRequest { - return &b.serverStatusRequest -} - -func (b *Builder) Namespace(namespace string) *Builder { - b.serverStatusRequest.Namespace = namespace - return b -} - -func (b *Builder) Name(name string) *Builder { - b.serverStatusRequest.Name = name - return b -} - -func (b *Builder) GenerateName(name string) *Builder { - b.serverStatusRequest.GenerateName = name - return b -} - -func (b *Builder) Phase(phase velerov1api.ServerStatusRequestPhase) *Builder { - b.serverStatusRequest.Status.Phase = phase - return b -} - -func (b *Builder) ProcessedTimestamp(time time.Time) *Builder { - b.serverStatusRequest.Status.ProcessedTimestamp.Time = time - return b -} - -func (b *Builder) ServerVersion(version string) *Builder { - b.serverStatusRequest.Status.ServerVersion = version - return b -} - -func (b *Builder) Plugins(plugins []velerov1api.PluginInfo) *Builder { - b.serverStatusRequest.Status.Plugins = plugins - return b -} diff --git a/pkg/serverstatusrequest/process_test.go b/pkg/serverstatusrequest/process_test.go index fc6966655a5..b4de2f46454 100644 --- a/pkg/serverstatusrequest/process_test.go +++ b/pkg/serverstatusrequest/process_test.go @@ -29,13 +29,14 @@ import ( "k8s.io/apimachinery/pkg/util/clock" velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/builder" "github.com/heptio/velero/pkg/buildinfo" "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" "github.com/heptio/velero/pkg/plugin/framework" ) -func statusRequestBuilder() *Builder { - return NewBuilder().Namespace(velerov1api.DefaultNamespace).Name("sr-1") +func statusRequestBuilder() *builder.ServerStatusRequestBuilder { + return builder.ForServerStatusRequest(velerov1api.DefaultNamespace, "sr-1") } func TestProcess(t *testing.T) { @@ -56,7 +57,7 @@ func TestProcess(t *testing.T) { }{ { name: "server status request with empty phase gets processed", - req: statusRequestBuilder().ServerStatusRequest(), + req: statusRequestBuilder().Result(), reqPluginLister: &fakePluginLister{ plugins: []framework.PluginIdentifier{ { @@ -75,13 +76,13 @@ func TestProcess(t *testing.T) { Kind: "VolumeSnapshotter", }, }). - ServerStatusRequest(), + Result(), }, { name: "server status request with phase=New gets processed", req: statusRequestBuilder(). Phase(velerov1api.ServerStatusRequestPhaseNew). - ServerStatusRequest(), + Result(), reqPluginLister: &fakePluginLister{ plugins: []framework.PluginIdentifier{ { @@ -108,14 +109,14 @@ func TestProcess(t *testing.T) { Kind: "VolumeSnapshotter", }, }). - ServerStatusRequest(), + Result(), }, { name: "server status request with phase=Processed gets deleted if expired", req: statusRequestBuilder(). Phase(velerov1api.ServerStatusRequestPhaseProcessed). ProcessedTimestamp(now.Add(-61 * time.Second)). - ServerStatusRequest(), + Result(), reqPluginLister: &fakePluginLister{ plugins: []framework.PluginIdentifier{ { @@ -131,20 +132,20 @@ func TestProcess(t *testing.T) { req: statusRequestBuilder(). Phase(velerov1api.ServerStatusRequestPhaseProcessed). ProcessedTimestamp(now.Add(-59 * time.Second)). - ServerStatusRequest(), + Result(), expected: statusRequestBuilder(). Phase(velerov1api.ServerStatusRequestPhaseProcessed). ProcessedTimestamp(now.Add(-59 * time.Second)). - ServerStatusRequest(), + Result(), }, { name: "server status request with invalid phase returns an error", req: statusRequestBuilder(). Phase(velerov1api.ServerStatusRequestPhase("an-invalid-phase")). - ServerStatusRequest(), + Result(), expected: statusRequestBuilder(). Phase(velerov1api.ServerStatusRequestPhase("an-invalid-phase")). - ServerStatusRequest(), + Result(), expectedErrMsg: "unexpected ServerStatusRequest phase \"an-invalid-phase\"", }, } diff --git a/pkg/test/api_server.go b/pkg/test/api_server.go new file mode 100644 index 00000000000..90c3a483b05 --- /dev/null +++ b/pkg/test/api_server.go @@ -0,0 +1,57 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "testing" + + "k8s.io/apimachinery/pkg/runtime" + discoveryfake "k8s.io/client-go/discovery/fake" + dynamicfake "k8s.io/client-go/dynamic/fake" + kubefake "k8s.io/client-go/kubernetes/fake" + + "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" +) + +// APIServer contains in-memory fakes for all of the relevant +// Kubernetes API server clients. +type APIServer struct { + VeleroClient *fake.Clientset + KubeClient *kubefake.Clientset + DynamicClient *dynamicfake.FakeDynamicClient + DiscoveryClient *DiscoveryClient +} + +// NewAPIServer constructs an APIServer with all of its clients +// initialized. +func NewAPIServer(t *testing.T) *APIServer { + t.Helper() + + var ( + veleroClient = fake.NewSimpleClientset() + kubeClient = kubefake.NewSimpleClientset() + dynamicClient = dynamicfake.NewSimpleDynamicClient(runtime.NewScheme()) + discoveryClient = &DiscoveryClient{FakeDiscovery: kubeClient.Discovery().(*discoveryfake.FakeDiscovery)} + ) + + return &APIServer{ + VeleroClient: veleroClient, + KubeClient: kubeClient, + DynamicClient: dynamicClient, + DiscoveryClient: discoveryClient, + } +} diff --git a/pkg/util/test/comparisons.go b/pkg/test/comparisons.go similarity index 100% rename from pkg/util/test/comparisons.go rename to pkg/test/comparisons.go diff --git a/pkg/test/discovery_client.go b/pkg/test/discovery_client.go index 5a5afbfeca5..fe06af635ea 100644 --- a/pkg/test/discovery_client.go +++ b/pkg/test/discovery_client.go @@ -38,10 +38,11 @@ func (c *DiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, // TEST HELPERS // -func (c *DiscoveryClient) WithResource(group, version, resource string, namespaced bool, shortNames ...string) *DiscoveryClient { +// WithAPIResource adds the API resource to the discovery client. +func (c *DiscoveryClient) WithAPIResource(resource *APIResource) *DiscoveryClient { gv := metav1.GroupVersion{ - Group: group, - Version: version, + Group: resource.Group, + Version: resource.Version, } var resourceList *metav1.APIResourceList @@ -61,20 +62,20 @@ func (c *DiscoveryClient) WithResource(group, version, resource string, namespac } for _, itm := range resourceList.APIResources { - if itm.Name == resource { + if itm.Name == resource.Name { return c } } resourceList.APIResources = append(resourceList.APIResources, metav1.APIResource{ - Name: resource, - SingularName: strings.TrimSuffix(resource, "s"), - Namespaced: namespaced, - Group: group, - Version: version, - Kind: strings.Title(strings.TrimSuffix(resource, "s")), + Name: resource.Name, + SingularName: strings.TrimSuffix(resource.Name, "s"), + Namespaced: resource.Namespaced, + Group: resource.Group, + Version: resource.Version, + Kind: strings.Title(strings.TrimSuffix(resource.Name, "s")), Verbs: metav1.Verbs([]string{"list", "create", "get", "delete"}), - ShortNames: shortNames, + ShortNames: []string{resource.ShortName}, }) return c diff --git a/pkg/util/test/fake_discovery_helper.go b/pkg/test/fake_discovery_helper.go similarity index 100% rename from pkg/util/test/fake_discovery_helper.go rename to pkg/test/fake_discovery_helper.go diff --git a/pkg/util/test/fake_dynamic.go b/pkg/test/fake_dynamic.go similarity index 100% rename from pkg/util/test/fake_dynamic.go rename to pkg/test/fake_dynamic.go diff --git a/pkg/util/test/fake_file_system.go b/pkg/test/fake_file_system.go similarity index 100% rename from pkg/util/test/fake_file_system.go rename to pkg/test/fake_file_system.go diff --git a/pkg/util/test/fake_mapper.go b/pkg/test/fake_mapper.go similarity index 100% rename from pkg/util/test/fake_mapper.go rename to pkg/test/fake_mapper.go diff --git a/pkg/util/test/fake_namespace.go b/pkg/test/fake_namespace.go similarity index 100% rename from pkg/util/test/fake_namespace.go rename to pkg/test/fake_namespace.go diff --git a/pkg/util/test/fake_volume_snapshotter.go b/pkg/test/fake_volume_snapshotter.go similarity index 100% rename from pkg/util/test/fake_volume_snapshotter.go rename to pkg/test/fake_volume_snapshotter.go diff --git a/pkg/util/test/helpers.go b/pkg/test/helpers.go similarity index 100% rename from pkg/util/test/helpers.go rename to pkg/test/helpers.go diff --git a/pkg/util/test/mock_pod_command_executor.go b/pkg/test/mock_pod_command_executor.go similarity index 100% rename from pkg/util/test/mock_pod_command_executor.go rename to pkg/test/mock_pod_command_executor.go diff --git a/pkg/test/resources.go b/pkg/test/resources.go new file mode 100644 index 00000000000..153dc83b03b --- /dev/null +++ b/pkg/test/resources.go @@ -0,0 +1,131 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// APIResource stores information about a specific Kubernetes API +// resource. +type APIResource struct { + Group string + Version string + Name string + ShortName string + Namespaced bool + Items []metav1.Object +} + +// GVR returns a GroupVersionResource representing the resource. +func (r *APIResource) GVR() schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: r.Group, + Version: r.Version, + Resource: r.Name, + } +} + +// Pods returns an APIResource describing core/v1's Pods. +func Pods(items ...metav1.Object) *APIResource { + return &APIResource{ + Group: "", + Version: "v1", + Name: "pods", + ShortName: "po", + Namespaced: true, + Items: items, + } +} + +func PVCs(items ...metav1.Object) *APIResource { + return &APIResource{ + Group: "", + Version: "v1", + Name: "persistentvolumeclaims", + ShortName: "pvc", + Namespaced: true, + Items: items, + } +} + +func PVs(items ...metav1.Object) *APIResource { + return &APIResource{ + Group: "", + Version: "v1", + Name: "persistentvolumes", + ShortName: "pv", + Namespaced: false, + Items: items, + } +} + +func Secrets(items ...metav1.Object) *APIResource { + return &APIResource{ + Group: "", + Version: "v1", + Name: "secrets", + ShortName: "secrets", + Namespaced: true, + Items: items, + } +} + +func Deployments(items ...metav1.Object) *APIResource { + return &APIResource{ + Group: "apps", + Version: "v1", + Name: "deployments", + ShortName: "deploy", + Namespaced: true, + Items: items, + } +} + +func ExtensionsDeployments(items ...metav1.Object) *APIResource { + return &APIResource{ + Group: "extensions", + Version: "v1", + Name: "deployments", + ShortName: "deploy", + Namespaced: true, + Items: items, + } +} + +func Namespaces(items ...metav1.Object) *APIResource { + return &APIResource{ + Group: "", + Version: "v1", + Name: "namespaces", + ShortName: "ns", + Namespaced: false, + Items: items, + } +} + +func ServiceAccounts(items ...metav1.Object) *APIResource { + return &APIResource{ + Group: "", + Version: "v1", + Name: "serviceaccounts", + ShortName: "sa", + Namespaced: true, + Items: items, + } +} diff --git a/pkg/util/test/test_logger.go b/pkg/test/test_logger.go similarity index 100% rename from pkg/util/test/test_logger.go rename to pkg/test/test_logger.go diff --git a/pkg/util/kube/resource_requirements.go b/pkg/util/kube/resource_requirements.go new file mode 100644 index 00000000000..a1974516ca6 --- /dev/null +++ b/pkg/util/kube/resource_requirements.go @@ -0,0 +1,80 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +import ( + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +// ParseResourceRequirements takes a set of CPU and memory requests and limit string +// values and returns a ResourceRequirements struct to be used in a Container. +// An error is returned if we cannot parse the request/limit. +func ParseResourceRequirements(cpuRequest, memRequest, cpuLimit, memLimit string) (corev1.ResourceRequirements, error) { + resources := corev1.ResourceRequirements{ + Requests: corev1.ResourceList{}, + Limits: corev1.ResourceList{}, + } + + parsedCPURequest, err := resource.ParseQuantity(cpuRequest) + if err != nil { + return resources, errors.Wrapf(err, `couldn't parse CPU request "%s"`, cpuRequest) + } + + parsedMemRequest, err := resource.ParseQuantity(memRequest) + if err != nil { + return resources, errors.Wrapf(err, `couldn't parse memory request "%s"`, memRequest) + } + + parsedCPULimit, err := resource.ParseQuantity(cpuLimit) + if err != nil { + return resources, errors.Wrapf(err, `couldn't parse CPU limit "%s"`, cpuLimit) + } + + parsedMemLimit, err := resource.ParseQuantity(memLimit) + if err != nil { + return resources, errors.Wrapf(err, `couldn't parse memory limit "%s"`, memLimit) + } + + // A quantity of 0 is treated as unbounded + unbounded := resource.MustParse("0") + + if parsedCPULimit != unbounded && parsedCPURequest.Cmp(parsedCPULimit) > 0 { + return resources, errors.WithStack(errors.Errorf(`CPU request "%s" must be less than or equal to CPU limit "%s"`, cpuRequest, cpuLimit)) + } + + if parsedMemLimit != unbounded && parsedMemRequest.Cmp(parsedMemLimit) > 0 { + return resources, errors.WithStack(errors.Errorf(`Memory request "%s" must be less than or equal to Memory limit "%s"`, memRequest, memLimit)) + } + + // Only set resources if they are not unbounded + if parsedCPURequest != unbounded { + resources.Requests[corev1.ResourceCPU] = parsedCPURequest + } + if parsedMemRequest != unbounded { + resources.Requests[corev1.ResourceMemory] = parsedMemRequest + } + if parsedCPULimit != unbounded { + resources.Limits[corev1.ResourceCPU] = parsedCPULimit + } + if parsedMemLimit != unbounded { + resources.Limits[corev1.ResourceMemory] = parsedMemLimit + } + + return resources, nil +} diff --git a/pkg/util/kube/resource_requirements_test.go b/pkg/util/kube/resource_requirements_test.go new file mode 100644 index 00000000000..68bdfc49707 --- /dev/null +++ b/pkg/util/kube/resource_requirements_test.go @@ -0,0 +1,102 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kube + +import ( + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +func TestParseResourceRequirements(t *testing.T) { + type args struct { + cpuRequest string + memRequest string + cpuLimit string + memLimit string + } + tests := []struct { + name string + args args + wantErr bool + expected *corev1.ResourceRequirements + }{ + {"unbounded quantities", args{"0", "0", "0", "0"}, false, &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{}, + Limits: corev1.ResourceList{}, + }}, + {"valid quantities", args{"100m", "128Mi", "200m", "256Mi"}, false, nil}, + {"CPU request with unbounded limit", args{"100m", "128Mi", "0", "256Mi"}, false, &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("128Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("256Mi"), + }, + }}, + {"Mem request with unbounded limit", args{"100m", "128Mi", "200m", "0"}, false, &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("128Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("200m"), + }, + }}, + {"CPU/Mem requests with unbounded limits", args{"100m", "128Mi", "0", "0"}, false, &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("128Mi"), + }, + Limits: corev1.ResourceList{}, + }}, + {"invalid quantity", args{"100m", "invalid", "200m", "256Mi"}, true, nil}, + {"CPU request greater than limit", args{"300m", "128Mi", "200m", "256Mi"}, true, nil}, + {"memory request greater than limit", args{"100m", "512Mi", "200m", "256Mi"}, true, nil}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ParseResourceRequirements(tt.args.cpuRequest, tt.args.memRequest, tt.args.cpuLimit, tt.args.memLimit) + if tt.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + + var expected corev1.ResourceRequirements + if tt.expected == nil { + expected = corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse(tt.args.cpuRequest), + corev1.ResourceMemory: resource.MustParse(tt.args.memRequest), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse(tt.args.cpuLimit), + corev1.ResourceMemory: resource.MustParse(tt.args.memLimit), + }, + } + } else { + expected = *tt.expected + } + + assert.Equal(t, expected, got) + }) + } +} diff --git a/pkg/util/kube/utils.go b/pkg/util/kube/utils.go index 93eb34cf110..d20037e78ac 100644 --- a/pkg/util/kube/utils.go +++ b/pkg/util/kube/utils.go @@ -93,7 +93,8 @@ func EnsureNamespaceExistsAndIsReady(namespace *corev1api.Namespace, client core // GetVolumeDirectory gets the name of the directory on the host, under /var/lib/kubelet/pods//volumes/, // where the specified volume lives. -func GetVolumeDirectory(pod *corev1api.Pod, volumeName string, pvcLister corev1listers.PersistentVolumeClaimLister) (string, error) { +// For volumes with a CSIVolumeSource, append "/mount" to the directory name. +func GetVolumeDirectory(pod *corev1api.Pod, volumeName string, pvcLister corev1listers.PersistentVolumeClaimLister, pvLister corev1listers.PersistentVolumeLister) (string, error) { var volume *corev1api.Volume for _, item := range pod.Spec.Volumes { @@ -107,14 +108,30 @@ func GetVolumeDirectory(pod *corev1api.Pod, volumeName string, pvcLister corev1l return "", errors.New("volume not found in pod") } + // This case implies the administrator created the PV and attached it directly, without PVC. + // Note that only one VolumeSource can be populated per Volume on a pod if volume.VolumeSource.PersistentVolumeClaim == nil { + if volume.VolumeSource.CSI != nil { + return volume.Name + "/mount", nil + } return volume.Name, nil } + // Most common case is that we have a PVC VolumeSource, and we need to check the PV it points to for a CSI source. pvc, err := pvcLister.PersistentVolumeClaims(pod.Namespace).Get(volume.VolumeSource.PersistentVolumeClaim.ClaimName) if err != nil { return "", errors.WithStack(err) } + pv, err := pvLister.Get(pvc.Spec.VolumeName) + if err != nil { + return "", errors.WithStack(err) + } + + // PV's been created with a CSI source. + if pv.Spec.CSI != nil { + return pvc.Spec.VolumeName + "/mount", nil + } + return pvc.Spec.VolumeName, nil } diff --git a/pkg/util/kube/utils_test.go b/pkg/util/kube/utils_test.go index 6b2b018f931..1554f33aaa0 100644 --- a/pkg/util/kube/utils_test.go +++ b/pkg/util/kube/utils_test.go @@ -20,13 +20,18 @@ import ( "testing" "time" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" - "k8s.io/api/core/v1" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + kubeinformers "k8s.io/client-go/informers" - velerotest "github.com/heptio/velero/pkg/util/test" + "github.com/heptio/velero/pkg/builder" + "github.com/heptio/velero/pkg/test" + velerotest "github.com/heptio/velero/pkg/test" ) func TestNamespaceAndName(t *testing.T) { @@ -37,7 +42,7 @@ func TestEnsureNamespaceExistsAndIsReady(t *testing.T) { tests := []struct { name string expectNSFound bool - nsPhase v1.NamespacePhase + nsPhase corev1.NamespacePhase nsDeleting bool expectCreate bool alreadyExists bool @@ -51,7 +56,7 @@ func TestEnsureNamespaceExistsAndIsReady(t *testing.T) { { name: "namespace found, terminating phase", expectNSFound: true, - nsPhase: v1.NamespaceTerminating, + nsPhase: corev1.NamespaceTerminating, expectedResult: false, }, { @@ -73,14 +78,14 @@ func TestEnsureNamespaceExistsAndIsReady(t *testing.T) { { name: "namespace not found initially, create returns already exists error, returned namespace is terminating", alreadyExists: true, - nsPhase: v1.NamespaceTerminating, + nsPhase: corev1.NamespaceTerminating, expectedResult: false, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - namespace := &v1.Namespace{ + namespace := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: "test", }, @@ -102,7 +107,7 @@ func TestEnsureNamespaceExistsAndIsReady(t *testing.T) { if test.expectNSFound { nsClient.On("Get", "test", metav1.GetOptions{}).Return(namespace, nil) } else { - nsClient.On("Get", "test", metav1.GetOptions{}).Return(&v1.Namespace{}, k8serrors.NewNotFound(schema.GroupResource{Resource: "namespaces"}, "test")) + nsClient.On("Get", "test", metav1.GetOptions{}).Return(&corev1.Namespace{}, k8serrors.NewNotFound(schema.GroupResource{Resource: "namespaces"}, "test")) } if test.alreadyExists { @@ -120,3 +125,75 @@ func TestEnsureNamespaceExistsAndIsReady(t *testing.T) { } } + +type harness struct { + *test.APIServer + + log logrus.FieldLogger +} + +func newHarness(t *testing.T) *harness { + t.Helper() + + return &harness{ + APIServer: test.NewAPIServer(t), + log: logrus.StandardLogger(), + } +} + +// TestGetVolumeDirectorySuccess tests that the GetVolumeDirectory function +// returns a volume's name or a volume's name plus '/mount' when a PVC is present. +func TestGetVolumeDirectorySuccess(t *testing.T) { + tests := []struct { + name string + pod *corev1.Pod + pvc *corev1.PersistentVolumeClaim + pv *corev1.PersistentVolume + want string + }{ + { + name: "Non-CSI volume with a PVC/PV returns the volume's name", + pod: builder.ForPod("ns-1", "my-pod").Volumes(builder.ForVolume("my-vol").PersistentVolumeClaimSource("my-pvc").Result()).Result(), + pvc: builder.ForPersistentVolumeClaim("ns-1", "my-pvc").VolumeName("a-pv").Result(), + pv: builder.ForPersistentVolume("a-pv").Result(), + want: "a-pv", + }, + { + name: "CSI volume with a PVC/PV appends '/mount' to the volume name", + pod: builder.ForPod("ns-1", "my-pod").Volumes(builder.ForVolume("my-vol").PersistentVolumeClaimSource("my-pvc").Result()).Result(), + pvc: builder.ForPersistentVolumeClaim("ns-1", "my-pvc").VolumeName("a-pv").Result(), + pv: builder.ForPersistentVolume("a-pv").CSI("csi.test.com", "provider-volume-id").Result(), + want: "a-pv/mount", + }, + { + name: "CSI volume mounted without a PVC appends '/mount' to the volume name", + pod: builder.ForPod("ns-1", "my-pod").Volumes(builder.ForVolume("my-vol").CSISource("csi.test.com").Result()).Result(), + want: "my-vol/mount", + }, + { + name: "Non-CSI volume without a PVC returns the volume name", + pod: builder.ForPod("ns-1", "my-pod").Volumes(builder.ForVolume("my-vol").Result()).Result(), + want: "my-vol", + }, + } + + for _, tc := range tests { + h := newHarness(t) + + pvcInformer := kubeinformers.NewSharedInformerFactoryWithOptions(h.KubeClient, 0, kubeinformers.WithNamespace("ns-1")).Core().V1().PersistentVolumeClaims() + pvInformer := kubeinformers.NewSharedInformerFactory(h.KubeClient, 0).Core().V1().PersistentVolumes() + + if tc.pvc != nil { + require.NoError(t, pvcInformer.Informer().GetStore().Add(tc.pvc)) + } + if tc.pv != nil { + require.NoError(t, pvInformer.Informer().GetStore().Add(tc.pv)) + } + + // Function under test + dir, err := GetVolumeDirectory(tc.pod, tc.pod.Spec.Volumes[0].Name, pvcInformer.Lister(), pvInformer.Lister()) + + require.NoError(t, err) + assert.Equal(t, tc.want, dir) + } +} diff --git a/pkg/util/logging/default_logger.go b/pkg/util/logging/default_logger.go index 0b98666b968..5586ebac5a8 100644 --- a/pkg/util/logging/default_logger.go +++ b/pkg/util/logging/default_logger.go @@ -1,5 +1,5 @@ /* -Copyright 2018 the Velero contributors. +Copyright 2019 the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -32,10 +32,14 @@ func DefaultHooks() []logrus.Hook { } // DefaultLogger returns a Logger with the default properties -// and hooks. -func DefaultLogger(level logrus.Level) *logrus.Logger { +// and hooks. The desired output format is passed as a LogFormat Enum. +func DefaultLogger(level logrus.Level, format Format) *logrus.Logger { logger := logrus.New() + if format == FormatJSON { + logger.Formatter = new(logrus.JSONFormatter) + } + // Make sure the output is set to stdout so log messages don't show up as errors in cloud log dashboards. logger.Out = os.Stdout diff --git a/pkg/util/logging/default_logger_test.go b/pkg/util/logging/default_logger_test.go index 2c5f4fa1c12..7aab5049622 100644 --- a/pkg/util/logging/default_logger_test.go +++ b/pkg/util/logging/default_logger_test.go @@ -25,11 +25,16 @@ import ( ) func TestDefaultLogger(t *testing.T) { - logger := DefaultLogger(logrus.InfoLevel) - assert.Equal(t, logrus.InfoLevel, logger.Level) - assert.Equal(t, os.Stdout, logger.Out) + formatFlag := NewFormatFlag() - for _, level := range logrus.AllLevels { - assert.Equal(t, DefaultHooks(), logger.Hooks[level]) + for _, testFormat := range formatFlag.AllowedValues() { + formatFlag.Set(testFormat) + logger := DefaultLogger(logrus.InfoLevel, formatFlag.Parse()) + assert.Equal(t, logrus.InfoLevel, logger.Level) + assert.Equal(t, os.Stdout, logger.Out) + + for _, level := range logrus.AllLevels { + assert.Equal(t, DefaultHooks(), logger.Hooks[level]) + } } } diff --git a/pkg/util/logging/format_flag.go b/pkg/util/logging/format_flag.go new file mode 100644 index 00000000000..82915e19e3d --- /dev/null +++ b/pkg/util/logging/format_flag.go @@ -0,0 +1,51 @@ +/* +Copyright 2018 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package logging + +import "github.com/heptio/velero/pkg/cmd/util/flag" + +// Format is a string representation of the desired output format for logs +type Format string + +const ( + FormatText Format = "text" + FormatJSON Format = "json" + defaultValue Format = FormatText +) + +// FormatFlag is a command-line flag for setting the logrus +// log format. +type FormatFlag struct { + *flag.Enum + defaultValue Format +} + +// NewFormatFlag constructs a new log level flag. +func NewFormatFlag() *FormatFlag { + return &FormatFlag{ + Enum: flag.NewEnum( + string(defaultValue), + string(FormatText), + string(FormatJSON), + ), + defaultValue: defaultValue, + } +} + +// Parse returns the flag's value as a Format. +func (f *FormatFlag) Parse() Format { + return Format(f.String()) +} diff --git a/pkg/util/test/test_backup.go b/pkg/util/test/test_backup.go deleted file mode 100644 index be371c95926..00000000000 --- a/pkg/util/test/test_backup.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Copyright 2017 the Velero contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package test - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - v1 "github.com/heptio/velero/pkg/apis/velero/v1" -) - -type TestBackup struct { - *v1.Backup -} - -func NewTestBackup() *TestBackup { - return &TestBackup{ - Backup: &v1.Backup{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: v1.DefaultNamespace, - }, - }, - } -} - -func (b *TestBackup) WithNamespace(namespace string) *TestBackup { - b.Namespace = namespace - return b -} - -func (b *TestBackup) WithName(name string) *TestBackup { - b.Name = name - return b -} - -func (b *TestBackup) WithLabel(key, value string) *TestBackup { - if b.Labels == nil { - b.Labels = make(map[string]string) - } - b.Labels[key] = value - - return b -} - -func (b *TestBackup) WithPhase(phase v1.BackupPhase) *TestBackup { - b.Status.Phase = phase - return b -} - -func (b *TestBackup) WithIncludedResources(r ...string) *TestBackup { - b.Spec.IncludedResources = r - return b -} - -func (b *TestBackup) WithExcludedResources(r ...string) *TestBackup { - b.Spec.ExcludedResources = r - return b -} - -func (b *TestBackup) WithIncludedNamespaces(ns ...string) *TestBackup { - b.Spec.IncludedNamespaces = ns - return b -} - -func (b *TestBackup) WithExcludedNamespaces(ns ...string) *TestBackup { - b.Spec.ExcludedNamespaces = ns - return b -} - -func (b *TestBackup) WithTTL(ttl time.Duration) *TestBackup { - b.Spec.TTL = metav1.Duration{Duration: ttl} - return b -} - -func (b *TestBackup) WithExpiration(expiration time.Time) *TestBackup { - b.Status.Expiration = metav1.Time{Time: expiration} - return b -} - -func (b *TestBackup) WithVersion(version int) *TestBackup { - b.Status.Version = version - return b -} - -func (b *TestBackup) WithSnapshotVolumes(value bool) *TestBackup { - b.Spec.SnapshotVolumes = &value - return b -} - -func (b *TestBackup) WithSnapshotVolumesPointer(value *bool) *TestBackup { - b.Spec.SnapshotVolumes = value - return b -} - -func (b *TestBackup) WithDeletionTimestamp(time time.Time) *TestBackup { - b.DeletionTimestamp = &metav1.Time{Time: time} - return b -} - -func (b *TestBackup) WithResourceVersion(version string) *TestBackup { - b.ResourceVersion = version - return b -} - -func (b *TestBackup) WithFinalizers(finalizers ...string) *TestBackup { - b.ObjectMeta.Finalizers = append(b.ObjectMeta.Finalizers, finalizers...) - - return b -} - -func (b *TestBackup) WithStartTimestamp(startTime time.Time) *TestBackup { - b.Status.StartTimestamp = metav1.Time{Time: startTime} - return b -} - -func (b *TestBackup) WithStorageLocation(location string) *TestBackup { - b.Spec.StorageLocation = location - return b -} - -func (b *TestBackup) WithVolumeSnapshotLocations(locations ...string) *TestBackup { - b.Spec.VolumeSnapshotLocations = locations - return b -} diff --git a/pkg/util/test/test_backup_storage_location.go b/pkg/util/test/test_backup_storage_location.go deleted file mode 100644 index d26ccce1267..00000000000 --- a/pkg/util/test/test_backup_storage_location.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2017, 2019 the Velero contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package test - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - v1 "github.com/heptio/velero/pkg/apis/velero/v1" -) - -type TestBackupStorageLocation struct { - *v1.BackupStorageLocation -} - -func NewTestBackupStorageLocation() *TestBackupStorageLocation { - return &TestBackupStorageLocation{ - BackupStorageLocation: &v1.BackupStorageLocation{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: v1.DefaultNamespace, - }, - }, - } -} - -func (b *TestBackupStorageLocation) WithNamespace(namespace string) *TestBackupStorageLocation { - b.Namespace = namespace - return b -} - -func (b *TestBackupStorageLocation) WithName(name string) *TestBackupStorageLocation { - b.Name = name - return b -} - -func (b *TestBackupStorageLocation) WithLabel(key, value string) *TestBackupStorageLocation { - if b.Labels == nil { - b.Labels = make(map[string]string) - } - b.Labels[key] = value - return b -} - -func (b *TestBackupStorageLocation) WithProvider(name string) *TestBackupStorageLocation { - b.Spec.Provider = name - return b -} - -func (b *TestBackupStorageLocation) WithObjectStorage(bucketName string) *TestBackupStorageLocation { - if b.Spec.StorageType.ObjectStorage == nil { - b.Spec.StorageType.ObjectStorage = &v1.ObjectStorageLocation{} - } - b.Spec.ObjectStorage.Bucket = bucketName - return b -} - -func (b *TestBackupStorageLocation) WithAccessMode(accessMode v1.BackupStorageLocationAccessMode) *TestBackupStorageLocation { - b.Spec.AccessMode = accessMode - return b -} diff --git a/pkg/util/test/test_restore.go b/pkg/util/test/test_restore.go deleted file mode 100644 index 5598c7ff779..00000000000 --- a/pkg/util/test/test_restore.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2017 the Velero contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package test - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - api "github.com/heptio/velero/pkg/apis/velero/v1" -) - -type TestRestore struct { - *api.Restore -} - -func NewTestRestore(ns, name string, phase api.RestorePhase) *TestRestore { - return &TestRestore{ - Restore: &api.Restore{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: ns, - Name: name, - }, - Spec: api.RestoreSpec{}, - Status: api.RestoreStatus{ - Phase: phase, - }, - }, - } -} - -func NewDefaultTestRestore() *TestRestore { - return NewTestRestore(api.DefaultNamespace, "", api.RestorePhase("")) -} - -func (r *TestRestore) WithIncludedNamespace(name string) *TestRestore { - r.Spec.IncludedNamespaces = append(r.Spec.IncludedNamespaces, name) - return r -} - -func (r *TestRestore) WithExcludedNamespace(name string) *TestRestore { - r.Spec.ExcludedNamespaces = append(r.Spec.ExcludedNamespaces, name) - return r -} - -func (r *TestRestore) WithValidationError(err string) *TestRestore { - r.Status.ValidationErrors = append(r.Status.ValidationErrors, err) - return r -} - -func (r *TestRestore) WithBackup(name string) *TestRestore { - r.Spec.BackupName = name - return r -} - -func (r *TestRestore) WithSchedule(name string) *TestRestore { - r.Spec.ScheduleName = name - return r -} - -func (r *TestRestore) WithErrors(i int) *TestRestore { - r.Status.Errors = i - return r -} - -func (r *TestRestore) WithRestorePVs(value bool) *TestRestore { - r.Spec.RestorePVs = &value - return r -} - -func (r *TestRestore) WithMappedNamespace(from string, to string) *TestRestore { - if r.Spec.NamespaceMapping == nil { - r.Spec.NamespaceMapping = make(map[string]string) - } - r.Spec.NamespaceMapping[from] = to - return r -} - -func (r *TestRestore) WithIncludedResource(resource string) *TestRestore { - r.Spec.IncludedResources = append(r.Spec.IncludedResources, resource) - return r -} - -func (r *TestRestore) WithExcludedResource(resource string) *TestRestore { - r.Spec.ExcludedResources = append(r.Spec.ExcludedResources, resource) - return r -} diff --git a/pkg/util/test/test_schedule.go b/pkg/util/test/test_schedule.go deleted file mode 100644 index 84ff61badfc..00000000000 --- a/pkg/util/test/test_schedule.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2017 the Velero contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package test - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - api "github.com/heptio/velero/pkg/apis/velero/v1" -) - -type TestSchedule struct { - *api.Schedule -} - -func NewTestSchedule(namespace, name string) *TestSchedule { - return &TestSchedule{ - Schedule: &api.Schedule{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: namespace, - Name: name, - }, - }, - } -} - -func (s *TestSchedule) WithPhase(phase api.SchedulePhase) *TestSchedule { - s.Status.Phase = phase - return s -} - -func (s *TestSchedule) WithValidationError(msg string) *TestSchedule { - s.Status.ValidationErrors = append(s.Status.ValidationErrors, msg) - return s -} - -func (s *TestSchedule) WithCronSchedule(cronExpression string) *TestSchedule { - s.Spec.Schedule = cronExpression - return s -} - -func (s *TestSchedule) WithLastBackupTime(timeString string) *TestSchedule { - t, _ := time.Parse("2006-01-02 15:04:05", timeString) - s.Status.LastBackup = metav1.Time{Time: t} - return s -} diff --git a/pkg/util/test/test_volume_snapshot_location.go b/pkg/util/test/test_volume_snapshot_location.go deleted file mode 100644 index d46649a8f02..00000000000 --- a/pkg/util/test/test_volume_snapshot_location.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2018 the Velero contributors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package test - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - v1 "github.com/heptio/velero/pkg/apis/velero/v1" -) - -type TestVolumeSnapshotLocation struct { - *v1.VolumeSnapshotLocation -} - -func NewTestVolumeSnapshotLocation() *TestVolumeSnapshotLocation { - return &TestVolumeSnapshotLocation{ - VolumeSnapshotLocation: &v1.VolumeSnapshotLocation{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: v1.DefaultNamespace, - }, - Spec: v1.VolumeSnapshotLocationSpec{ - Provider: "aws", - Config: map[string]string{"region": "us-west-1"}, - }, - }, - } -} - -func (location *TestVolumeSnapshotLocation) WithName(name string) *TestVolumeSnapshotLocation { - location.Name = name - return location -} - -func (location *TestVolumeSnapshotLocation) WithProvider(name string) *TestVolumeSnapshotLocation { - location.Spec.Provider = name - return location -} - -func (location *TestVolumeSnapshotLocation) WithProviderConfig(info []LocationInfo) []*TestVolumeSnapshotLocation { - var locations []*TestVolumeSnapshotLocation - - for _, v := range info { - location := &TestVolumeSnapshotLocation{ - VolumeSnapshotLocation: &v1.VolumeSnapshotLocation{ - ObjectMeta: metav1.ObjectMeta{ - Name: v.Name, - Namespace: v1.DefaultNamespace, - }, - Spec: v1.VolumeSnapshotLocationSpec{ - Provider: v.Provider, - Config: v.Config, - }, - }, - } - locations = append(locations, location) - } - return locations -} - -type LocationInfo struct { - Name, Provider string - Config map[string]string -} diff --git a/site/Gemfile b/site/Gemfile index 9932b638ef6..eac36243f0a 100644 --- a/site/Gemfile +++ b/site/Gemfile @@ -1,3 +1,4 @@ source 'https://rubygems.org' gem 'github-pages' gem 'redcarpet' +gem 'jekyll-seo-tag' diff --git a/site/Gemfile.lock b/site/Gemfile.lock index 6f3ea50694e..b36ce1e4fd5 100644 --- a/site/Gemfile.lock +++ b/site/Gemfile.lock @@ -205,7 +205,7 @@ GEM jekyll-seo-tag (~> 2.1) minitest (5.11.3) multipart-post (2.1.1) - nokogiri (1.10.3) + nokogiri (1.10.4) mini_portile2 (~> 2.4.0) octokit (4.14.0) sawyer (~> 0.8.0, >= 0.5.3) @@ -244,6 +244,7 @@ PLATFORMS DEPENDENCIES github-pages + jekyll-seo-tag redcarpet BUNDLED WITH diff --git a/site/README-JEKYLL.md b/site/README-JEKYLL.md index 9d5d2205cbf..aa0f679e00f 100644 --- a/site/README-JEKYLL.md +++ b/site/README-JEKYLL.md @@ -1,3 +1,7 @@ +# Running in Docker + +To run this site in a Docker container, you can use `make serve-docs` from the root directory. + # Dependencies for MacOS Install the following for an easy to use dev environment: @@ -23,7 +27,21 @@ This mirrors the plug-ins used by GitHub Pages on your local machine including J 3. `cd velero/site` 4. `rbenv local 2.6.3` 5. `bundle install` -6. Serve the site and watch for markup/sass changes `jekyll serve --livereload`. You may need to run `bundle exec jekyll serve --livereload`. +6. Serve the site and watch for markup/sass changes `jekyll serve --livereload --incremental`. You may need to run `bundle exec jekyll serve --livereload --incremental`. 7. View your website at http://127.0.0.1:4000/ 8. Commit any changes and push everything to your fork. 9. Once you're ready, submit a PR of your changes. Netlify will automatically generate a preview of your changes. + + +# Adding a New Docs Version + +To add a new set of versioned docs to go with a new Velero release: + +1. In the root of the repository, run: + + ```bash + # set to the appropriate version number + NEW_DOCS_VERSION=vX.Y.Z make gen-docs + ``` + +1. In `site/_config.yml`, under the `defaults` field, add an entry for the new version just under `master` by copying the most recent version's entry and updating the version numbers. diff --git a/site/_config.yml b/site/_config.yml index 6fd6bec8f31..5c702c2f08b 100644 --- a/site/_config.yml +++ b/site/_config.yml @@ -2,9 +2,12 @@ title: Velero email: author: Velero Authors -description: -url: +description: Backup and migrate Kubernetes resources and persistent volumes +url: velero.io logo: Velero.svg +twitter: + username: projectvelero + card: summary vm_logo: vm-logo.png gh_repo: https://github.com/heptio/velero markdown: redcarpet @@ -14,7 +17,7 @@ footer: title: Getting Started content: To help you get started, see the documentation. cta_title: '' - cta_url: /docs/master + cta_url: /docs cta_text: Documentation vm-link: http://vmware.github.io/ @@ -31,6 +34,9 @@ footer_social_links: RSS: fa_icon: fa fa-rss url: feed.xml + GitHub: + fa_icon: fab fa-github + url: https://github.com/heptio/velero defaults: - scope: @@ -49,6 +55,12 @@ defaults: version: master gh: https://github.com/heptio/velero/tree/master layout: "docs" + - scope: + path: docs/v1.1.0 + values: + version: v1.1.0 + gh: https://github.com/heptio/velero/tree/v1.1.0 + layout: "docs" - scope: path: docs/v1.0.0 values: @@ -122,14 +134,21 @@ defaults: gh: https://github.com/heptio/velero/tree/v0.3.0 layout: "docs" +page_gen: + - data: shortlinks + template: redirect + name: key + dir: docs + collections: - contributors - casestudies versioning: true -latest: v1.0.0 +latest: v1.1.0 versions: - master +- v1.1.0 - v1.0.0 - v0.11.0 - v0.10.0 @@ -158,7 +177,8 @@ plugins: - jekyll-optional-front-matter # Parse Markdown files that do not have front-matter callouts - jekyll-titles-from-headings # pull the page title from the first Markdown heading when none is specified. - jekyll-paginate # pagination object for collections (e.g. posts) - + - jekyll-redirect-from + - jekyll-seo-tag # Include these subdirectories include: diff --git a/site/_data/master-toc.yml b/site/_data/master-toc.yml index 88c632ec4ab..5fec8fa024a 100644 --- a/site/_data/master-toc.yml +++ b/site/_data/master-toc.yml @@ -1,7 +1,3 @@ -# historically we haven't provided a new TOC for the master branch docs -# but a doc reorg suggests that it would be useful at this point -# for future docs builds: use this file as the basis for new version TOCs - toc: - title: Introduction subfolderitems: @@ -17,9 +13,9 @@ toc: subfolderitems: - page: Overview url: /install-overview - - page: Upgrade to 1.0 - url: /upgrade-to-1.0 - - page: Quick Start with In-Cluster Minio + - page: Upgrade to 1.1 + url: /upgrade-to-1.1 + - page: Quick start with in-cluster MinIO url: /get-started - page: Run on AWS url: /aws-config @@ -27,14 +23,18 @@ toc: url: /azure-config - page: Run on GCP url: /gcp-config - - page: Restic Setup + - page: Restic setup url: /restic - title: Use subfolderitems: - - page: Disaster Recovery + - page: Disaster recovery url: /disaster-case - page: Cluster migration url: /migration-case + - page: Backup reference + url: /backup-reference + - page: Restore reference + url: /restore-reference - title: Troubleshoot subfolderitems: - page: Troubleshooting @@ -45,7 +45,7 @@ toc: url: /debugging-restores - page: Troubleshoot Restic url: /restic#troubleshooting - - title: Customize Velero + - title: Customize subfolderitems: - page: Build from source url: /build-from-source diff --git a/site/_data/shortlinks.yml b/site/_data/shortlinks.yml new file mode 100644 index 00000000000..ffa78fe333e --- /dev/null +++ b/site/_data/shortlinks.yml @@ -0,0 +1,12 @@ +- title: Troubleshooting + key: troubleshooting + destination: troubleshooting +- title: Support Matrix + key: support-matrix + destination: support-matrix +- title: ZenHub + key: zenhub + destination: zenhub +- title: Install Overview + key: install-overview + destination: install-overview diff --git a/site/_data/toc-mapping.yml b/site/_data/toc-mapping.yml index ab2dffcdca9..6e742c49f72 100644 --- a/site/_data/toc-mapping.yml +++ b/site/_data/toc-mapping.yml @@ -3,7 +3,8 @@ # that the navigation for older versions still work. master: master-toc -v1.0.0: v1-0-toc +v1.1.0: v1-1-0-toc +v1.0.0: v1-0-0-toc v0.11.0: v011-toc v0.10.0: v010-toc v0.9.0: v9-toc diff --git a/site/_data/v1-0-toc.yml b/site/_data/v1-0-0-toc.yml similarity index 100% rename from site/_data/v1-0-toc.yml rename to site/_data/v1-0-0-toc.yml diff --git a/site/_data/v1-1-0-toc.yml b/site/_data/v1-1-0-toc.yml new file mode 100644 index 00000000000..5fec8fa024a --- /dev/null +++ b/site/_data/v1-1-0-toc.yml @@ -0,0 +1,69 @@ +toc: + - title: Introduction + subfolderitems: + - page: About Velero + url: /index.html + - page: How Velero works + url: /about + - page: About locations + url: /locations + - page: Supported platforms + url: /support-matrix + - title: Install + subfolderitems: + - page: Overview + url: /install-overview + - page: Upgrade to 1.1 + url: /upgrade-to-1.1 + - page: Quick start with in-cluster MinIO + url: /get-started + - page: Run on AWS + url: /aws-config + - page: Run on Azure + url: /azure-config + - page: Run on GCP + url: /gcp-config + - page: Restic setup + url: /restic + - title: Use + subfolderitems: + - page: Disaster recovery + url: /disaster-case + - page: Cluster migration + url: /migration-case + - page: Backup reference + url: /backup-reference + - page: Restore reference + url: /restore-reference + - title: Troubleshoot + subfolderitems: + - page: Troubleshooting + url: /troubleshooting + - page: Troubleshoot an install or setup + url: /debugging-install + - page: Troubleshoot a restore + url: /debugging-restores + - page: Troubleshoot Restic + url: /restic#troubleshooting + - title: Customize + subfolderitems: + - page: Build from source + url: /build-from-source + - page: Run in any namespace + url: /namespace + - page: Extend + url: /extend + - page: Extend with plugins + url: /plugins + - page: Extend with hooks + url: /hooks + - title: More information + subfolderitems: + - page: Backup file format + url: /output-file-format + - page: API types + url: /api-types + - page: FAQ + url: /faq + - page: ZenHub + url: /zenhub diff --git a/site/_includes/footer.html b/site/_includes/footer.html index 2c22d9cbe2f..cabdd200449 100644 --- a/site/_includes/footer.html +++ b/site/_includes/footer.html @@ -16,7 +16,7 @@
{{ site.footer.title }}