diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 268902d4d5..2d398fbee0 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -14,11 +14,11 @@ about: Tell us about a problem you are experiencing **The output of the following commands will help us better understand what's going on**: (Pasting long output into a [GitHub gist](https://gist.github.com) or other pastebin is fine.) -* `kubectl logs deployment/ark -n heptio-ark` -* `ark backup describe ` or `kubectl get backup/ -n heptio-ark -o yaml` -* `ark backup logs ` -* `ark restore describe ` or `kubectl get restore/ -n heptio-ark -o yaml` -* `ark restore logs ` +* `kubectl logs deployment/velero -n velero` +* `velero backup describe ` or `kubectl get backup/ -n velero -o yaml` +* `velero backup logs ` +* `velero restore describe ` or `kubectl get restore/ -n velero -o yaml` +* `velero restore logs ` **Anything else you would like to add:** @@ -27,7 +27,7 @@ about: Tell us about a problem you are experiencing **Environment:** -- Ark version (use `ark version`): +- Velero version (use `velero version`): - Kubernetes version (use `kubectl version`): - Kubernetes installer & version: - Cloud provider or hardware configuration: diff --git a/.github/ISSUE_TEMPLATE/feature-enhancement-request.md b/.github/ISSUE_TEMPLATE/feature-enhancement-request.md index d3ec31dce8..378ff0763b 100644 --- a/.github/ISSUE_TEMPLATE/feature-enhancement-request.md +++ b/.github/ISSUE_TEMPLATE/feature-enhancement-request.md @@ -14,7 +14,7 @@ about: Suggest an idea for this project **Environment:** -- Ark version (use `ark version`): +- Velero version (use `velero version`): - Kubernetes version (use `kubectl version`): - Kubernetes installer & version: - Cloud provider or hardware configuration: diff --git a/.gitignore b/.gitignore index fdd7a5821b..8c9eb8707f 100644 --- a/.gitignore +++ b/.gitignore @@ -27,7 +27,7 @@ _testmain.go debug -/ark +/velero .idea/ .container-* diff --git a/.goreleaser.yml b/.goreleaser.yml index 66c8ce2309..f20965c81f 100644 --- a/.goreleaser.yml +++ b/.goreleaser.yml @@ -17,7 +17,7 @@ before: hooks: - ./hack/set-example-tags.sh builds: - - main: ./cmd/ark/main.go + - main: ./cmd/velero/main.go env: - CGO_ENABLED=0 goos: @@ -39,7 +39,7 @@ builds: - goos: windows goarch: arm64 ldflags: - - -X "github.com/heptio/ark/pkg/buildinfo.Version={{ .Tag }}" -X "github.com/heptio/ark/pkg/buildinfo.GitSHA={{ .FullCommit }}" -X "github.com/heptio/ark/pkg/buildinfo.GitTreeState={{ .Env.GIT_TREE_STATE }}" + - -X "github.com/heptio/velero/pkg/buildinfo.Version={{ .Tag }}" -X "github.com/heptio/velero/pkg/buildinfo.GitSHA={{ .FullCommit }}" -X "github.com/heptio/velero/pkg/buildinfo.GitTreeState={{ .Env.GIT_TREE_STATE }}" archive: name_template: "{{ .ProjectName }}-{{ .Tag }}-{{ .Os }}-{{ .Arch }}" files: @@ -50,5 +50,5 @@ checksum: release: github: owner: heptio - name: ark + name: velero draft: true diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 5dfffe86ad..e35e26210e 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,4 +1,4 @@ -# Heptio Ark Community Code of Conduct +# Velero Community Code of Conduct ## Contributor Code of Conduct diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d935b518d8..61905daa2b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -7,7 +7,7 @@ should be a new file created in the `changelogs/unreleased` folder. The file sho naming convention of `pr-username` and the contents of the file should be your text for the changelog. - ark/changelogs/unreleased <- folder + velero/changelogs/unreleased <- folder 000-username <- file diff --git a/Dockerfile-ark-restic-restore-helper.alpine b/Dockerfile-velero-restic-restore-helper.alpine similarity index 87% rename from Dockerfile-ark-restic-restore-helper.alpine rename to Dockerfile-velero-restic-restore-helper.alpine index a7f231cf37..c3edbcb2ab 100644 --- a/Dockerfile-ark-restic-restore-helper.alpine +++ b/Dockerfile-velero-restic-restore-helper.alpine @@ -16,8 +16,8 @@ FROM alpine:3.8 MAINTAINER Steve Kriss -ADD /bin/linux/amd64/ark-restic-restore-helper . +ADD /bin/linux/amd64/velero-restic-restore-helper . USER nobody:nobody -ENTRYPOINT [ "/ark-restic-restore-helper" ] +ENTRYPOINT [ "/velero-restic-restore-helper" ] diff --git a/Dockerfile-ark.alpine b/Dockerfile-velero.alpine similarity index 94% rename from Dockerfile-ark.alpine rename to Dockerfile-velero.alpine index 2fb1a34929..909e7ef52b 100644 --- a/Dockerfile-ark.alpine +++ b/Dockerfile-velero.alpine @@ -24,8 +24,8 @@ RUN apk add --update --no-cache bzip2 && \ mv restic_0.9.3_linux_amd64 /usr/bin/restic && \ chmod +x /usr/bin/restic -ADD /bin/linux/amd64/ark /ark +ADD /bin/linux/amd64/velero /velero USER nobody:nobody -ENTRYPOINT ["/ark"] +ENTRYPOINT ["/velero"] diff --git a/Makefile b/Makefile index 296ba5b810..a79a87691a 100644 --- a/Makefile +++ b/Makefile @@ -15,10 +15,10 @@ # limitations under the License. # The binary to build (just the basename). -BIN ?= ark +BIN ?= velero # This repo's root import path (under GOPATH). -PKG := github.com/heptio/ark +PKG := github.com/heptio/velero # Where to push the docker image. REGISTRY ?= gcr.io/heptio-images @@ -63,7 +63,7 @@ IMAGE = $(REGISTRY)/$(BIN) # If you want to build AND push all containers, see the 'all-push' rule. all: @$(MAKE) build - @$(MAKE) build BIN=ark-restic-restore-helper + @$(MAKE) build BIN=velero-restic-restore-helper build-%: @$(MAKE) --no-print-directory ARCH=$* build @@ -104,7 +104,7 @@ _output/bin/$(GOOS)/$(GOARCH)/$(BIN): build-dirs TTY := $(shell tty -s && echo "-t") -BUILDER_IMAGE := ark-builder +BUILDER_IMAGE := velero-builder # Example: make shell CMD="date > datefile" shell: build-dirs build-image @@ -146,7 +146,7 @@ endif all-containers: $(MAKE) container - $(MAKE) container BIN=ark-restic-restore-helper + $(MAKE) container BIN=velero-restic-restore-helper $(MAKE) build-fsfreeze container: verify test .container-$(DOTFILE_IMAGE) container-name @@ -160,7 +160,7 @@ container-name: all-push: $(MAKE) push - $(MAKE) push BIN=ark-restic-restore-helper + $(MAKE) push BIN=velero-restic-restore-helper $(MAKE) push-fsfreeze diff --git a/README.md b/README.md index 8ca5aacabd..005079d847 100644 --- a/README.md +++ b/README.md @@ -1,35 +1,36 @@ -# Heptio Ark +# Velero -**Maintainers:** [Heptio][0] +![](velero.png) [![Build Status][1]][2] +## Heptio Ark is now Velero! + +#### We're working on our first Velero release and instructions for migrating your Ark deployments to Velero. Stay tuned! + ## Overview -Ark gives you tools to back up and restore your Kubernetes cluster resources and persistent volumes. Ark lets you: +Velero gives you tools to back up and restore your Kubernetes cluster resources and persistent volumes. Velero lets you: * Take backups of your cluster and restore in case of loss. * Copy cluster resources to other clusters. * Replicate your production environment for development and testing environments. -Ark consists of: +Velero consists of: * A server that runs on your cluster * A command-line client that runs locally -You can run Ark in clusters on a cloud provider or on-premises. For detailed information, see [Compatible Storage Providers][99]. +You can run Velero in clusters on a cloud provider or on-premises. For detailed information, see [Compatible Storage Providers][99]. -## Breaking changes - -Ark version 0.10.0 introduces a number of breaking changes. Before you upgrade to version 0.10.0, make sure to read [the documentation on upgrading][98]. ## More information -[The documentation][29] provides a getting started guide, plus information about building from source, architecture, extending Ark, and more. +[The documentation][29] provides a getting started guide, plus information about building from source, architecture, extending Velero, and more. ## Troubleshooting -If you encounter issues, review the [troubleshooting docs][30], [file an issue][4], or talk to us on the [#ark-dr channel][25] on the Kubernetes Slack server. +If you encounter issues, review the [troubleshooting docs][30], [file an issue][4], or talk to us on the [#velero channel][25] on the Kubernetes Slack server. ## Contributing @@ -51,29 +52,26 @@ Feedback and discussion are available on [the mailing list][24]. See [the list of releases][6] to find out about feature changes. -[0]: https://github.com/heptio -[1]: https://travis-ci.org/heptio/ark.svg?branch=master -[2]: https://travis-ci.org/heptio/ark +[1]: https://travis-ci.org/heptio/velero.svg?branch=master +[2]: https://travis-ci.org/heptio/velero -[4]: https://github.com/heptio/ark/issues -[5]: https://github.com/heptio/ark/blob/master/CONTRIBUTING.md -[6]: https://github.com/heptio/ark/releases +[4]: https://github.com/heptio/velero/issues +[5]: https://github.com/heptio/velero/blob/master/CONTRIBUTING.md +[6]: https://github.com/heptio/velero/releases -[8]: https://github.com/heptio/ark/blob/master/CODE_OF_CONDUCT.md +[8]: https://github.com/heptio/velero/blob/master/CODE_OF_CONDUCT.md [9]: https://kubernetes.io/docs/setup/ [10]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-with-homebrew-on-macos [11]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#tabset-1 [12]: https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/README.md [14]: https://github.com/kubernetes/kubernetes - -[24]: http://j.hept.io/ark-list -[25]: https://kubernetes.slack.com/messages/ark-dr -[26]: https://github.com/heptio/ark/blob/master/docs/zenhub.md +[24]: https://groups.google.com/forum/#!forum/projectvelero +[25]: https://kubernetes.slack.com/messages/velero +[26]: https://github.com/heptio/velero/blob/master/docs/zenhub.md -[29]: https://heptio.github.io/ark/ +[29]: https://heptio.github.io/velero/ [30]: /docs/troubleshooting.md -[98]: /docs/upgrading-to-v0.10.md [99]: /docs/support-matrix.md diff --git a/SUPPORT.md b/SUPPORT.md index 48a657fecf..292958e4d0 100644 --- a/SUPPORT.md +++ b/SUPPORT.md @@ -1,5 +1,5 @@ -# Ark Support +# Velero Support -Thanks for trying out Ark! We welcome all feedback, please consider joining our mailing list: +Thanks for trying out Velero! We welcome all feedback, please consider joining our mailing list: -- [Mailing List](http://j.hept.io/ark-list) \ No newline at end of file +- [Mailing List](http://j.hept.io/ark-list) diff --git a/changelogs/CHANGELOG-0.10.md b/changelogs/CHANGELOG-0.10.md index 059e3049b4..7614802b5c 100644 --- a/changelogs/CHANGELOG-0.10.md +++ b/changelogs/CHANGELOG-0.10.md @@ -245,5 +245,5 @@ need to be updated for v0.10. - [eabef085](https://github.com/heptio/ark/commit/eabef085) Update generated Ark code based on the 1.11 k8s.io/code-generator script - [f5eac0b4](https://github.com/heptio/ark/commit/f5eac0b4) Update vendored library code for Kubernetes 1.11 -[1]: https://github.com/heptio/ark/blob/master/docs/upgrading-to-v0.10.md +[1]: https://heptio.github.io/velero/v0.10.0/upgrading-to-v0.10 [2]: locations.md diff --git a/changelogs/CHANGELOG-0.8.md b/changelogs/CHANGELOG-0.8.md index 00bff8055d..2a13676255 100644 --- a/changelogs/CHANGELOG-0.8.md +++ b/changelogs/CHANGELOG-0.8.md @@ -77,9 +77,9 @@ here are the steps you can take to upgrade: 1. Execute the steps from the **Credentials and configuration** section for your cloud: - * [AWS](https://heptio.github.io/ark/v0.8.0/aws-config#credentials-and-configuration) - * [Azure](https://heptio.github.io/ark/v0.8.0/azure-config#credentials-and-configuration) - * [GCP](https://heptio.github.io/ark/v0.8.0/gcp-config#credentials-and-configuration) + * [AWS](https://heptio.github.io/velero/v0.8.0/aws-config#credentials-and-configuration) + * [Azure](https://heptio.github.io/velero/v0.8.0/azure-config#credentials-and-configuration) + * [GCP](https://heptio.github.io/velero/v0.8.0/gcp-config#credentials-and-configuration) When you get to the secret creation step, if you don't have your `credentials-ark` file handy, you can copy the existing secret from your `heptio-ark-server` namespace into the `heptio-ark` namespace: @@ -95,6 +95,6 @@ ``` 3. Execute the commands from the **Start the server** section for your cloud: - * [AWS](https://heptio.github.io/ark/v0.8.0/aws-config#start-the-server) - * [Azure](https://heptio.github.io/ark/v0.8.0/azure-config#start-the-server) - * [GCP](https://heptio.github.io/ark/v0.8.0/gcp-config#start-the-server) + * [AWS](https://heptio.github.io/velero/v0.8.0/aws-config#start-the-server) + * [Azure](https://heptio.github.io/velero/v0.8.0/azure-config#start-the-server) + * [GCP](https://heptio.github.io/velero/v0.8.0/gcp-config#start-the-server) diff --git a/changelogs/unreleased/rename-nrb b/changelogs/unreleased/rename-nrb new file mode 100644 index 0000000000..fcc01dae7d --- /dev/null +++ b/changelogs/unreleased/rename-nrb @@ -0,0 +1 @@ +Renamed Heptio Ark to Velero. Changed internal imports, environment variables, and binary name. diff --git a/cmd/ark-restic-restore-helper/main.go b/cmd/velero-restic-restore-helper/main.go similarity index 91% rename from cmd/ark-restic-restore-helper/main.go rename to cmd/velero-restic-restore-helper/main.go index 46755aba4e..0fe0b1eccd 100644 --- a/cmd/ark-restic-restore-helper/main.go +++ b/cmd/velero-restic-restore-helper/main.go @@ -45,7 +45,7 @@ func main() { } // done returns true if for each directory under /restores, a file exists -// within the .ark/ subdirectory whose name is equal to os.Args[1], or +// within the .velero/ subdirectory whose name is equal to os.Args[1], or // false otherwise func done() bool { children, err := ioutil.ReadDir("/restores") @@ -60,7 +60,7 @@ func done() bool { continue } - doneFile := filepath.Join("/restores", child.Name(), ".ark", os.Args[1]) + doneFile := filepath.Join("/restores", child.Name(), ".velero", os.Args[1]) if _, err := os.Stat(doneFile); os.IsNotExist(err) { fmt.Printf("Not found: %s\n", doneFile) diff --git a/cmd/ark/main.go b/cmd/velero/main.go similarity index 85% rename from cmd/ark/main.go rename to cmd/velero/main.go index 33678a6eac..0bc6720107 100644 --- a/cmd/ark/main.go +++ b/cmd/velero/main.go @@ -22,8 +22,8 @@ import ( "github.com/golang/glog" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/ark" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/velero" ) func main() { @@ -31,6 +31,6 @@ func main() { baseName := filepath.Base(os.Args[0]) - err := ark.NewCommand(baseName).Execute() + err := velero.NewCommand(baseName).Execute() cmd.CheckError(err) } diff --git a/docs/about.md b/docs/about.md index 35d1051dae..cb39b8ba9e 100644 --- a/docs/about.md +++ b/docs/about.md @@ -1,10 +1,10 @@ -# How Ark Works +# How Velero Works -Each Ark operation -- on-demand backup, scheduled backup, restore -- is a custom resource, defined with a Kubernetes [Custom Resource Definition (CRD)][20] and stored in [etcd][22]. Ark also includes controllers that process the custom resources to perform backups, restores, and all related operations. +Each Velero operation -- on-demand backup, scheduled backup, restore -- is a custom resource, defined with a Kubernetes [Custom Resource Definition (CRD)][20] and stored in [etcd][22]. Velero also includes controllers that process the custom resources to perform backups, restores, and all related operations. You can back up or restore all objects in your cluster, or you can filter objects by type, namespace, and/or label. -Ark is ideal for the disaster recovery use case, as well as for snapshotting your application state, prior to performing system operations on your cluster (e.g. upgrades). +Velero is ideal for the disaster recovery use case, as well as for snapshotting your application state, prior to performing system operations on your cluster (e.g. upgrades). ## On-demand backups @@ -27,17 +27,17 @@ Scheduled backups are saved with the name `-`, where ` ## Restores -The **restore** operation allows you to restore all of the objects and persistent volumes from a previously created backup. You can also restore only a filtered subset of objects and persistent volumes. Ark supports multiple namespace remapping--for example, in a single restore, objects in namespace "abc" can be recreated under namespace "def", and the objects in namespace "123" under "456". +The **restore** operation allows you to restore all of the objects and persistent volumes from a previously created backup. You can also restore only a filtered subset of objects and persistent volumes. Velero supports multiple namespace remapping--for example, in a single restore, objects in namespace "abc" can be recreated under namespace "def", and the objects in namespace "123" under "456". -The default name of a restore is `-`, where `` is formatted as *YYYYMMDDhhmmss*. You can also specify a custom name. A restored object also includes a label with key `ark.heptio.com/restore-name` and value ``. +The default name of a restore is `-`, where `` is formatted as *YYYYMMDDhhmmss*. You can also specify a custom name. A restored object also includes a label with key `velero.io/restore-name` and value ``. -You can also run the Ark server in restore-only mode, which disables backup, schedule, and garbage collection functionality during disaster recovery. +You can also run the Velero server in restore-only mode, which disables backup, schedule, and garbage collection functionality during disaster recovery. ## Backup workflow -When you run `ark backup create test-backup`: +When you run `velero backup create test-backup`: -1. The Ark client makes a call to the Kubernetes API server to create a `Backup` object. +1. The Velero client makes a call to the Kubernetes API server to create a `Backup` object. 1. The `BackupController` notices the new `Backup` object and performs validation. @@ -45,19 +45,19 @@ When you run `ark backup create test-backup`: 1. The `BackupController` makes a call to the object storage service -- for example, AWS S3 -- to upload the backup file. -By default, `ark backup create` makes disk snapshots of any persistent volumes. You can adjust the snapshots by specifying additional flags. Run `ark backup create --help` to see available flags. Snapshots can be disabled with the option `--snapshot-volumes=false`. +By default, `velero backup create` makes disk snapshots of any persistent volumes. You can adjust the snapshots by specifying additional flags. Run `velero backup create --help` to see available flags. Snapshots can be disabled with the option `--snapshot-volumes=false`. ![19] ## Backed-up API versions -Ark backs up resources using the Kubernetes API server's *preferred version* for each group/resource. When restoring a resource, this same API group/version must exist in the target cluster in order for the restore to be successful. +Velero backs up resources using the Kubernetes API server's *preferred version* for each group/resource. When restoring a resource, this same API group/version must exist in the target cluster in order for the restore to be successful. For example, if the cluster being backed up has a `gizmos` resource in the `things` API group, with group/versions `things/v1alpha1`, `things/v1beta1`, and `things/v1`, and the server's preferred group/version is `things/v1`, then all `gizmos` will be backed up from the `things/v1` API endpoint. When backups from this cluster are restored, the target cluster **must** have the `things/v1` endpoint in order for `gizmos` to be restored. Note that `things/v1` **does not** need to be the preferred version in the target cluster; it just needs to exist. ## Set a backup to expire -When you create a backup, you can specify a TTL by adding the flag `--ttl `. If Ark sees that an existing backup resource is expired, it removes: +When you create a backup, you can specify a TTL by adding the flag `--ttl `. If Velero sees that an existing backup resource is expired, it removes: * The backup resource * The backup file from cloud object storage @@ -66,7 +66,7 @@ When you create a backup, you can specify a TTL by adding the flag `--ttl *NOTE*: `BackupStorageLocation` takes the place of the `Config.backupStorageProvider` key as of v0.10.0 A sample YAML `BackupStorageLocation` looks like the following: ```yaml -apiVersion: ark.heptio.com/v1 +apiVersion: velero.io/v1 kind: BackupStorageLocation metadata: name: default - namespace: heptio-ark + namespace: velero spec: provider: aws objectStorage: @@ -32,7 +32,7 @@ The configurable parameters are as follows: | Key | Type | Default | Meaning | | --- | --- | --- | --- | -| `provider` | String (Ark natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.)| Required Field | The name for whichever cloud provider will be used to actually store the backups. | +| `provider` | String (Velero natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.)| Required Field | The name for whichever cloud provider will be used to actually store the backups. | | `objectStorage` | ObjectStorageLocation | Specification of the object storage for the given provider. | | `objectStorage/bucket` | String | Required Field | The storage bucket where backups are to be uploaded. | | `objectStorage/prefix` | String | Optional Field | The directory inside a storage bucket where backups are to be uploaded. | @@ -48,10 +48,10 @@ The configurable parameters are as follows: | --- | --- | --- | --- | | `region` | string | Empty | *Example*: "us-east-1"

See [AWS documentation][3] for the full list.

Queried from the AWS S3 API if not provided. | | `s3ForcePathStyle` | bool | `false` | Set this to `true` if you are using a local storage service like Minio. | -| `s3Url` | string | Required field for non-AWS-hosted storage| *Example*: http://minio:9000

You can specify the AWS S3 URL here for explicitness, but Ark can already generate it from `region`, and `bucket`. This field is primarily for local storage services like Minio.| +| `s3Url` | string | Required field for non-AWS-hosted storage| *Example*: http://minio:9000

You can specify the AWS S3 URL here for explicitness, but Velero can already generate it from `region`, and `bucket`. This field is primarily for local storage services like Minio.| | `publicUrl` | string | Empty | *Example*: https://minio.mycluster.com

If specified, use this instead of `s3Url` when generating download URLs (e.g., for logs). This field is primarily for local storage services like Minio.| | `kmsKeyId` | string | Empty | *Example*: "502b409c-4da1-419f-a16e-eif453b3i49f" or "alias/``"

Specify an [AWS KMS key][10] id or alias to enable encryption of the backups stored in S3. Only works with AWS S3 and may require explicitly granting key usage rights.| -| `signatureVersion` | string | `"4"` | Version of the signature algorithm used to create signed URLs that are used by ark cli to download backups or fetch logs. Possible versions are "1" and "4". Usually the default version 4 is correct, but some S3-compatible providers like Quobyte only support version 1.| +| `signatureVersion` | string | `"4"` | Version of the signature algorithm used to create signed URLs that are used by velero cli to download backups or fetch logs. Possible versions are "1" and "4". Usually the default version 4 is correct, but some S3-compatible providers like Quobyte only support version 1.| #### Azure diff --git a/docs/api-types/volumesnapshotlocation.md b/docs/api-types/volumesnapshotlocation.md index 95744c4c64..734297f3d7 100644 --- a/docs/api-types/volumesnapshotlocation.md +++ b/docs/api-types/volumesnapshotlocation.md @@ -1,21 +1,21 @@ -# Ark Volume Snapshot Location +# Velero Volume Snapshot Location ## Volume Snapshot Location A volume snapshot location is the location in which to store the volume snapshots created for a backup. -Ark can be configured to take snapshots of volumes from multiple providers. Ark also allows you to configure multiple possible `VolumeSnapshotLocation` per provider, although you can only select one location per provider at backup time. +Velero can be configured to take snapshots of volumes from multiple providers. Velero also allows you to configure multiple possible `VolumeSnapshotLocation` per provider, although you can only select one location per provider at backup time. -Each VolumeSnapshotLocation describes a provider + location. These are represented in the cluster via the `VolumeSnapshotLocation` CRD. Ark must have at least one `VolumeSnapshotLocation` per cloud provider. +Each VolumeSnapshotLocation describes a provider + location. These are represented in the cluster via the `VolumeSnapshotLocation` CRD. Velero must have at least one `VolumeSnapshotLocation` per cloud provider. A sample YAML `VolumeSnapshotLocation` looks like the following: ```yaml -apiVersion: ark.heptio.com/v1 +apiVersion: velero.io/v1 kind: VolumeSnapshotLocation metadata: name: aws-default - namespace: heptio-ark + namespace: velero spec: provider: aws config: @@ -30,7 +30,7 @@ The configurable parameters are as follows: | Key | Type | Default | Meaning | | --- | --- | --- | --- | -| `provider` | String (Ark natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.)| Required Field | The name for whichever cloud provider will be used to actually store the volume. | +| `provider` | String (Velero natively supports `aws`, `gcp`, and `azure`. Other providers may be available via external plugins.)| Required Field | The name for whichever cloud provider will be used to actually store the volume. | | `config` | See the corresponding [AWS][0], [GCP][1], and [Azure][2]-specific configs or your provider's documentation. #### AWS diff --git a/docs/aws-config.md b/docs/aws-config.md index 57e006588b..ca4e447703 100644 --- a/docs/aws-config.md +++ b/docs/aws-config.md @@ -1,9 +1,9 @@ -# Run Ark on AWS +# Run Velero on AWS -To set up Ark on AWS, you: +To set up Velero on AWS, you: * Create your S3 bucket -* Create an AWS IAM user for Ark +* Create an AWS IAM user for Velero * Configure the server * Create a Secret for your credentials @@ -11,7 +11,7 @@ If you do not have the `aws` CLI locally installed, follow the [user guide][5] t ## Create S3 bucket -Heptio Ark requires an object storage bucket to store backups in, preferrably unique to a single Kubernetes cluster (see the [FAQ][20] for more details). Create an S3 bucket, replacing placeholders appropriately: +Velero requires an object storage bucket to store backups in, preferrably unique to a single Kubernetes cluster (see the [FAQ][20] for more details). Create an S3 bucket, replacing placeholders appropriately: ```bash aws s3api create-bucket \ @@ -34,16 +34,16 @@ For more information, see [the AWS documentation on IAM users][14]. 1. Create the IAM user: ```bash - aws iam create-user --user-name heptio-ark + aws iam create-user --user-name velero ``` - > If you'll be using Ark to backup multiple clusters with multiple S3 buckets, it may be desirable to create a unique username per cluster rather than the default `heptio-ark`. + > If you'll be using Velero to backup multiple clusters with multiple S3 buckets, it may be desirable to create a unique username per cluster rather than the default `velero`. -2. Attach policies to give `heptio-ark` the necessary permissions: +2. Attach policies to give `velero` the necessary permissions: ```bash BUCKET= - cat > heptio-ark-policy.json < velero-policy.json <, @@ -111,7 +111,7 @@ For more information, see [the AWS documentation on IAM users][14]. } ``` -4. Create an Ark-specific credentials file (`credentials-ark`) in your local directory: +4. Create a Velero-specific credentials file (`credentials-velero`) in your local directory: ``` [default] @@ -123,7 +123,7 @@ For more information, see [the AWS documentation on IAM users][14]. ## Credentials and configuration -In the Ark directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML files to specify the namespace. See [Run in custom namespace][0]. +In the Velero directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML files to specify the namespace. See [Run in custom namespace][0]. ```bash kubectl apply -f config/common/00-prereqs.yaml @@ -133,17 +133,17 @@ Create a Secret. In the directory of the credentials file you just created, run: ```bash kubectl create secret generic cloud-credentials \ - --namespace \ - --from-file cloud=credentials-ark + --namespace \ + --from-file cloud=credentials-velero ``` Specify the following values in the example files: -* In `config/aws/05-ark-backupstoragelocation.yaml`: +* In `config/aws/05-backupstoragelocation.yaml`: * Replace `` and `` (for S3 backup storage, region is optional and will be queried from the AWS S3 API if not provided). See the [BackupStorageLocation definition][21] for details. -* In `config/aws/06-ark-volumesnapshotlocation.yaml`: +* In `config/aws/06-volumesnapshotlocation.yaml`: * Replace ``. See the [VolumeSnapshotLocation definition][6] for details. @@ -157,7 +157,7 @@ Specify the following values in the example files: * (Optional) If you have multiple clusters and you want to support migration of resources between them, in file `config/aws/10-deployment.yaml`: - * Uncomment the environment variable `AWS_CLUSTER_NAME` and replace `` with the current cluster's name. When restoring backup, it will make Ark (and cluster it's running on) claim ownership of AWS volumes created from snapshots taken on different cluster. + * Uncomment the environment variable `AWS_CLUSTER_NAME` and replace `` with the current cluster's name. When restoring backup, it will make Velero (and cluster it's running on) claim ownership of AWS volumes created from snapshots taken on different cluster. The best way to get the current cluster's name is to either check it with used deployment tool or to read it directly from the EC2 instances tags. The following listing shows how to get the cluster's nodes EC2 Tags. First, get the nodes external IDs (EC2 IDs): @@ -182,11 +182,11 @@ Specify the following values in the example files: ## Start the server -In the root of your Ark directory, run: +In the root of your Velero directory, run: ```bash - kubectl apply -f config/aws/05-ark-backupstoragelocation.yaml - kubectl apply -f config/aws/06-ark-volumesnapshotlocation.yaml + kubectl apply -f config/aws/05-backupstoragelocation.yaml + kubectl apply -f config/aws/06-volumesnapshotlocation.yaml kubectl apply -f config/aws/10-deployment.yaml ``` @@ -196,12 +196,12 @@ In the root of your Ark directory, run: > This path assumes you have `kube2iam` already running in your Kubernetes cluster. If that is not the case, please install it first, following the docs here: [https://github.com/jtblin/kube2iam](https://github.com/jtblin/kube2iam) -It can be set up for Ark by creating a role that will have required permissions, and later by adding the permissions annotation on the ark deployment to define which role it should use internally. +It can be set up for Velero by creating a role that will have required permissions, and later by adding the permissions annotation on the velero deployment to define which role it should use internally. 1. Create a Trust Policy document to allow the role being used for EC2 management & assume kube2iam role: ```bash - cat > heptio-ark-trust-policy.json < velero-trust-policy.json < - cat > heptio-ark-policy.json < velero-policy.json <:role/ + iam.amazonaws.com/role: arn:aws:iam:::role/ ... ``` -5. Run Ark deployment using the file `config/aws/10-deployment-kube2iam.yaml`. +5. Run Velero deployment using the file `config/aws/10-deployment-kube2iam.yaml`. [0]: namespace.md [5]: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html diff --git a/docs/azure-config.md b/docs/azure-config.md index d72ae48f61..2e5bfbbb20 100644 --- a/docs/azure-config.md +++ b/docs/azure-config.md @@ -1,9 +1,9 @@ -# Run Ark on Azure +# Run Velero on Azure -To configure Ark on Azure, you: +To configure Velero on Azure, you: * Create your Azure storage account and blob container -* Create Azure service principal for Ark +* Create Azure service principal for Velero * Configure the server * Create a Secret for your credentials @@ -22,11 +22,11 @@ consider using Premium Managed Disks, which are SSD backed. ## Create Azure storage account and blob container -Heptio Ark requires a storage account and blob container in which to store backups. +Velero requires a storage account and blob container in which to store backups. The storage account can be created in the same Resource Group as your Kubernetes cluster or separated into its own Resource Group. The example below shows the storage account created in a -separate `Ark_Backups` Resource Group. +separate `Velero_Backups` Resource Group. The storage account needs to be created with a globally unique id since this is used for dns. In the sample script below, we're generating a random name using `uuidgen`, but you can come up with @@ -36,11 +36,11 @@ configured to only allow access via https. ```bash # Create a resource group for the backups storage account. Change the location as needed. -AZURE_BACKUP_RESOURCE_GROUP=Ark_Backups +AZURE_BACKUP_RESOURCE_GROUP=Velero_Backups az group create -n $AZURE_BACKUP_RESOURCE_GROUP --location WestUS # Create the storage account -AZURE_STORAGE_ACCOUNT_ID="ark$(uuidgen | cut -d '-' -f5 | tr '[A-Z]' '[a-z]')" +AZURE_STORAGE_ACCOUNT_ID="velero$(uuidgen | cut -d '-' -f5 | tr '[A-Z]' '[a-z]')" az storage account create \ --name $AZURE_STORAGE_ACCOUNT_ID \ --resource-group $AZURE_BACKUP_RESOURCE_GROUP \ @@ -51,10 +51,10 @@ az storage account create \ --access-tier Hot ``` -Create the blob container named `ark`. Feel free to use a different name, preferably unique to a single Kubernetes cluster. See the [FAQ][20] for more details. +Create the blob container named `velero`. Feel free to use a different name, preferably unique to a single Kubernetes cluster. See the [FAQ][20] for more details. ```bash -az storage container create -n ark --public-access off --account-name $AZURE_STORAGE_ACCOUNT_ID +az storage container create -n velero --public-access off --account-name $AZURE_STORAGE_ACCOUNT_ID ``` ## Get resource group for persistent volume snapshots @@ -78,7 +78,7 @@ az storage container create -n ark --public-access off --account-name $AZURE_STO ## Create service principal -To integrate Ark with Azure, you must create an Ark-specific [service principal][17]. +To integrate Velero with Azure, you must create an Velero-specific [service principal][17]. 1. Obtain your Azure Account Subscription ID and Tenant ID: @@ -89,23 +89,23 @@ To integrate Ark with Azure, you must create an Ark-specific [service principal] 1. Create a service principal with `Contributor` role. This will have subscription-wide access, so protect this credential. You can specify a password or let the `az ad sp create-for-rbac` command create one for you. - > If you'll be using Ark to backup multiple clusters with multiple blob containers, it may be desirable to create a unique username per cluster rather than the default `heptio-ark`. + > If you'll be using Velero to backup multiple clusters with multiple blob containers, it may be desirable to create a unique username per cluster rather than the default `velero`. ```bash # Create service principal and specify your own password AZURE_CLIENT_SECRET=super_secret_and_high_entropy_password_replace_me_with_your_own - az ad sp create-for-rbac --name "heptio-ark" --role "Contributor" --password $AZURE_CLIENT_SECRET + az ad sp create-for-rbac --name "velero" --role "Contributor" --password $AZURE_CLIENT_SECRET # Or create service principal and let the CLI generate a password for you. Make sure to capture the password. - AZURE_CLIENT_SECRET=`az ad sp create-for-rbac --name "heptio-ark" --role "Contributor" --query 'password' -o tsv` + AZURE_CLIENT_SECRET=`az ad sp create-for-rbac --name "velero" --role "Contributor" --query 'password' -o tsv` # After creating the service principal, obtain the client id - AZURE_CLIENT_ID=`az ad sp list --display-name "heptio-ark" --query '[0].appId' -o tsv` + AZURE_CLIENT_ID=`az ad sp list --display-name "velero" --query '[0].appId' -o tsv` ``` ## Credentials and configuration -In the Ark directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML file to specify the namespace. See [Run in custom namespace][0]. +In the Velero directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML file to specify the namespace. See [Run in custom namespace][0]. ```bash kubectl apply -f config/common/00-prereqs.yaml @@ -115,7 +115,7 @@ Now you need to create a Secret that contains all the environment variables you ```bash kubectl create secret generic cloud-credentials \ - --namespace \ + --namespace \ --from-literal AZURE_SUBSCRIPTION_ID=${AZURE_SUBSCRIPTION_ID} \ --from-literal AZURE_TENANT_ID=${AZURE_TENANT_ID} \ --from-literal AZURE_CLIENT_ID=${AZURE_CLIENT_ID} \ @@ -125,21 +125,21 @@ kubectl create secret generic cloud-credentials \ Now that you have your Azure credentials stored in a Secret, you need to replace some placeholder values in the template files. Specifically, you need to change the following: -* In file `config/azure/05-ark-backupstoragelocation.yaml`: +* In file `config/azure/05-backupstoragelocation.yaml`: * Replace ``, ``, and ``. See the [BackupStorageLocation definition][21] for details. -* In file `config/azure/06-ark-volumesnapshotlocation.yaml`: +* In file `config/azure/06-volumesnapshotlocation.yaml`: * Replace ``. See the [VolumeSnapshotLocation definition][8] for details. -* (Optional, use only if you need to specify multiple volume snapshot locations) In `config/azure/00-ark-deployment.yaml`: +* (Optional, use only if you need to specify multiple volume snapshot locations) In `config/azure/00-deployment.yaml`: * Uncomment the `--default-volume-snapshot-locations` and replace provider locations with the values for your environment. ## Start the server -In the root of your Ark directory, run: +In the root of your Velero directory, run: ```bash kubectl apply -f config/azure/ diff --git a/docs/build-from-scratch.md b/docs/build-from-scratch.md index 96532fddad..d17e471831 100644 --- a/docs/build-from-scratch.md +++ b/docs/build-from-scratch.md @@ -9,7 +9,7 @@ ## Prerequisites -* Access to a Kubernetes cluster, version 1.7 or later. Version 1.7.5 or later is required to run `ark backup delete`. +* Access to a Kubernetes cluster, version 1.7 or later. Version 1.7.5 or later is required to run `velero backup delete`. * A DNS server on the cluster * `kubectl` installed * [Go][5] installed (minimum version 1.8) @@ -19,7 +19,7 @@ ```bash mkdir $HOME/go export GOPATH=$HOME/go -go get github.com/heptio/ark +go get github.com/heptio/velero ``` Where `go` is your [import path][4] for Go. @@ -29,11 +29,11 @@ For Go development, it is recommended to add the Go import path (`$HOME/go` in t ## Build -You can build your Ark image locally on the machine where you run your cluster, or you can push it to a private registry. This section covers both workflows. +You can build your Velero image locally on the machine where you run your cluster, or you can push it to a private registry. This section covers both workflows. -Set the `$REGISTRY` environment variable (used in the `Makefile`) to push the Heptio Ark images to your own registry. This allows any node in your cluster to pull your locally built image. +Set the `$REGISTRY` environment variable (used in the `Makefile`) to push the Velero images to your own registry. This allows any node in your cluster to pull your locally built image. -In the Ark root directory, to build your container with the tag `$REGISTRY/ark:$VERSION`, run: +In the Velero root directory, to build your container with the tag `$REGISTRY/velero:$VERSION`, run: ``` make container @@ -63,12 +63,12 @@ Run [generate-proto.sh][13] to regenerate files if you make the following change ### Cross compiling -By default, `make build` builds an `ark` binary for `linux-amd64`. +By default, `make build` builds an `velero` binary for `linux-amd64`. To build for another platform, run `make build--`. For example, to build for the Mac, run `make build-darwin-amd64`. -All binaries are placed in `_output/bin//`-- for example, `_output/bin/darwin/amd64/ark`. +All binaries are placed in `_output/bin//`-- for example, `_output/bin/darwin/amd64/velero`. -Ark's `Makefile` has a convenience target, `all-build`, that builds the following platforms: +Velero's `Makefile` has a convenience target, `all-build`, that builds the following platforms: * linux-amd64 * linux-arm @@ -85,7 +85,7 @@ files (clientset, listers, shared informers, docs) are up to date. ### Prerequisites -When running Heptio Ark, you will need to account for the following (all of which are handled in the [`/examples`][6] manifests): +When running Velero, you will need to account for the following (all of which are handled in the [`/examples`][6] manifests): * Appropriate RBAC permissions in the cluster * Read access for all data from the source cluster and namespaces @@ -93,8 +93,8 @@ When running Heptio Ark, you will need to account for the following (all of whic * Cloud provider credentials * Read/write access to volumes * Read/write access to object storage for backup data -* A [BackupStorageLocation][20] object definition for the Ark server -* (Optional) A [VolumeSnapshotLocation][21] object definition for the Ark server, to take PV snapshots +* A [BackupStorageLocation][20] object definition for the Velero server +* (Optional) A [VolumeSnapshotLocation][21] object definition for the Velero server, to take PV snapshots ### Create a cluster @@ -104,9 +104,9 @@ To provision a cluster on AWS using Amazon’s official CloudFormation templates * eksctl - [a CLI for Amazon EKS][18] -### Option 1: Run your Ark server locally +### Option 1: Run your Velero server locally -Running the Ark server locally can speed up iterative development. This eliminates the need to rebuild the Ark server +Running the Velero server locally can speed up iterative development. This eliminates the need to rebuild the Velero server image and redeploy it to the cluster with each change. #### 1. Set enviroment variables @@ -139,64 +139,64 @@ You may create resources on a cluster using our [example configurations][19]. ##### Example -Here is how to setup using an existing cluster in AWS: At the root of the Ark repo: +Here is how to setup using an existing cluster in AWS: At the root of the Velero repo: -- Edit `examples/aws/05-ark-backupstoragelocation.yaml` to point to your AWS S3 bucket and region. Note: you can run `aws s3api list-buckets` to get the name of all your buckets. +- Edit `examples/aws/05-backupstoragelocation.yaml` to point to your AWS S3 bucket and region. Note: you can run `aws s3api list-buckets` to get the name of all your buckets. -- (Optional) Edit `examples/aws/06-ark-volumesnapshotlocation.yaml` to point to your AWS region. +- (Optional) Edit `examples/aws/06-volumesnapshotlocation.yaml` to point to your AWS region. Then run the commands below. -`00-prereqs.yaml` contains all our CustomResourceDefinitions (CRDs) that allow us to perform CRUD operations on backups, restores, schedules, etc. it also contains the `heptio-ark` namespace, the `ark` ServiceAccount, and a cluster role binding to grant the `ark` ServiceAccount the cluster-admin role: +`00-prereqs.yaml` contains all our CustomResourceDefinitions (CRDs) that allow us to perform CRUD operations on backups, restores, schedules, etc. it also contains the `velero` namespace, the `velero` ServiceAccount, and a cluster role binding to grant the `velero` ServiceAccount the cluster-admin role: ```bash kubectl apply -f examples/common/00-prereqs.yaml ``` -`10-deployment.yaml` is a sample Ark config resource for AWS: +`10-deployment.yaml` is a sample Velero config resource for AWS: ```bash kubectl apply -f examples/aws/10-deployment.yaml ``` -And `05-ark-backupstoragelocation.yaml` specifies the location of your backup storage, together with the optional `06-ark-volumesnapshotlocation.yaml`: +And `05-backupstoragelocation.yaml` specifies the location of your backup storage, together with the optional `06-volumesnapshotlocation.yaml`: ```bash -kubectl apply -f examples/aws/05-ark-backupstoragelocation.yaml +kubectl apply -f examples/aws/05-backupstoragelocation.yaml ``` or ```bash -kubectl apply -f examples/aws/05-ark-backupstoragelocation.yaml examples/aws/06-ark-volumesnapshotlocation.yaml +kubectl apply -f examples/aws/05-backupstoragelocation.yaml examples/aws/06-volumesnapshotlocation.yaml ``` -### 3. Start the Ark server +### 3. Start the Velero server -* Make sure `ark` is in your `PATH` or specify the full path. +* Make sure `velero` is in your `PATH` or specify the full path. -* Set variable for Ark as needed. The variables below can be exported as environment variables or passed as CLI cmd flags: - * `--kubeconfig`: set the path to the kubeconfig file the Ark server uses to talk to the Kubernetes apiserver - * `--namespace`: the set namespace where the Ark server should look for backups, schedules, restores - * `--log-level`: set the Ark server's log level - * `--plugin-dir`: set the directory where the Ark server looks for plugins +* Set variable for Velero as needed. The variables below can be exported as environment variables or passed as CLI cmd flags: + * `--kubeconfig`: set the path to the kubeconfig file the Velero server uses to talk to the Kubernetes apiserver + * `--namespace`: the set namespace where the Velero server should look for backups, schedules, restores + * `--log-level`: set the Velero server's log level + * `--plugin-dir`: set the directory where the Velero server looks for plugins * `--metrics-address`: set the bind address and port where Prometheus metrics are exposed -* Start the server: `ark server` +* Start the server: `velero server` -### Option 2: Run your Ark server in a deployment +### Option 2: Run your Velero server in a deployment -1. Install Ark using a deployment: +1. Install Velero using a deployment: We have examples of deployments for different cloud providers in `examples//10-deployment.yaml`. -2. Replace the deployment's default Ark image with the image that you built. Run: +2. Replace the deployment's default Velero image with the image that you built. Run: ``` -kubectl --namespace=heptio-ark set image deployment/ark ark=$REGISTRY/ark:$VERSION +kubectl --namespace=velero set image deployment/velero velero=$REGISTRY/velero:$VERSION ``` -where `$REGISTRY` and `$VERSION` are the values that you built Ark with. +where `$REGISTRY` and `$VERSION` are the values that you built Velero with. ## 5. Vendoring dependencies @@ -208,13 +208,13 @@ If you need to add or update the vendored dependencies, see [Vendoring dependenc [3]: #build [4]: https://blog.golang.org/organizing-go-code [5]: https://golang.org/doc/install -[6]: https://github.com/heptio/ark/tree/master/examples +[6]: https://github.com/heptio/velero/tree/master/examples [7]: #run [8]: config-definition.md [10]: #vendoring-dependencies [11]: vendoring-dependencies.md [12]: #test -[13]: https://github.com/heptio/ark/blob/master/hack/generate-proto.sh +[13]: https://github.com/heptio/velero/blob/master/hack/generate-proto.sh [14]: https://grpc.io/docs/quickstart/go.html#install-protocol-buffers-v3 [15]: https://docs.aws.amazon.com/cli/latest/topic/config-vars.html#the-shared-credentials-file [16]: https://cloud.google.com/docs/authentication/getting-started#setting_the_environment_variable diff --git a/docs/debugging-install.md b/docs/debugging-install.md index bf8d8716a1..aaf25986d5 100644 --- a/docs/debugging-install.md +++ b/docs/debugging-install.md @@ -3,17 +3,17 @@ ## General ### `invalid configuration: no configuration has been provided` -This typically means that no `kubeconfig` file can be found for the Ark client to use. Ark looks for a kubeconfig in the +This typically means that no `kubeconfig` file can be found for the Velero client to use. Velero looks for a kubeconfig in the following locations: * the path specified by the `--kubeconfig` flag, if any * the path specified by the `$KUBECONFIG` environment variable, if any * `~/.kube/config` ### Backups or restores stuck in `New` phase -This means that the Ark controllers are not processing the backups/restores, which usually happens because the Ark server is not running. Check the pod description and logs for errors: +This means that the Velero controllers are not processing the backups/restores, which usually happens because the Velero server is not running. Check the pod description and logs for errors: ``` -kubectl -n heptio-ark describe pods -kubectl -n heptio-ark logs deployment/ark +kubectl -n velero describe pods +kubectl -n velero logs deployment/velero ``` @@ -22,19 +22,19 @@ kubectl -n heptio-ark logs deployment/ark ### `NoCredentialProviders: no valid providers in chain` #### Using credentials -This means that the secret containing the AWS IAM user credentials for Ark has not been created/mounted properly -into the Ark server pod. Ensure the following: -* The `cloud-credentials` secret exists in the Ark server's namespace -* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-ark` file -* The `credentials-ark` file is formatted properly and has the correct values: +This means that the secret containing the AWS IAM user credentials for Velero has not been created/mounted properly +into the Velero server pod. Ensure the following: +* The `cloud-credentials` secret exists in the Velero server's namespace +* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-velero` file +* The `credentials-velero` file is formatted properly and has the correct values: ``` [default] aws_access_key_id= aws_secret_access_key= ``` -* The `cloud-credentials` secret is defined as a volume for the Ark deployment -* The `cloud-credentials` secret is being mounted into the Ark server pod at `/credentials` +* The `cloud-credentials` secret is defined as a volume for the Velero deployment +* The `cloud-credentials` secret is being mounted into the Velero server pod at `/credentials` #### Using kube2iam This means that Ark can't read the content of the S3 bucket. Ensure the following: @@ -45,22 +45,22 @@ This means that Ark can't read the content of the S3 bucket. Ensure the followin ## Azure ### `Failed to refresh the Token` or `adal: Refresh request failed` -This means that the secrets containing the Azure service principal credentials for Ark has not been created/mounted -properly into the Ark server pod. Ensure the following: -* The `cloud-credentials` secret exists in the Ark server's namespace +This means that the secrets containing the Azure service principal credentials for Velero has not been created/mounted +properly into the Velero server pod. Ensure the following: +* The `cloud-credentials` secret exists in the Velero server's namespace * The `cloud-credentials` secret has all of the expected keys and each one has the correct value (see [setup instructions](0)) -* The `cloud-credentials` secret is defined as a volume for the Ark deployment -* The `cloud-credentials` secret is being mounted into the Ark server pod at `/credentials` +* The `cloud-credentials` secret is defined as a volume for the Velero deployment +* The `cloud-credentials` secret is being mounted into the Velero server pod at `/credentials` ## GCE/GKE ### `open credentials/cloud: no such file or directory` -This means that the secret containing the GCE service account credentials for Ark has not been created/mounted properly -into the Ark server pod. Ensure the following: -* The `cloud-credentials` secret exists in the Ark server's namespace -* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-ark` file -* The `cloud-credentials` secret is defined as a volume for the Ark deployment -* The `cloud-credentials` secret is being mounted into the Ark server pod at `/credentials` +This means that the secret containing the GCE service account credentials for Velero has not been created/mounted properly +into the Velero server pod. Ensure the following: +* The `cloud-credentials` secret exists in the Velero server's namespace +* The `cloud-credentials` secret has a single key, `cloud`, whose value is the contents of the `credentials-velero` file +* The `cloud-credentials` secret is defined as a volume for the Velero deployment +* The `cloud-credentials` secret is being mounted into the Velero server pod at `/credentials` [0]: azure-config#credentials-and-configuration diff --git a/docs/debugging-restores.md b/docs/debugging-restores.md index bac4de5010..7ff6153be3 100644 --- a/docs/debugging-restores.md +++ b/docs/debugging-restores.md @@ -5,7 +5,7 @@ ## Example -When Heptio Ark finishes a Restore, its status changes to "Completed" regardless of whether or not there are issues during the process. The number of warnings and errors are indicated in the output columns from `ark restore get`: +When Velero finishes a Restore, its status changes to "Completed" regardless of whether or not there are issues during the process. The number of warnings and errors are indicated in the output columns from `velero restore get`: ``` NAME BACKUP STATUS WARNINGS ERRORS CREATED SELECTOR @@ -15,14 +15,14 @@ backup-test-2-20170726180514 backup-test-2 Completed 0 0 2 backup-test-2-20170726180515 backup-test-2 Completed 0 1 2017-07-26 13:32:59 -0400 EDT ``` -To delve into the warnings and errors into more detail, you can use `ark restore describe`: +To delve into the warnings and errors into more detail, you can use `velero restore describe`: ``` -ark restore describe backup-test-20170726180512 +velero restore describe backup-test-20170726180512 ``` The output looks like this: ``` Name: backup-test-20170726180512 -Namespace: heptio-ark +Namespace: velero Labels: Annotations: @@ -48,10 +48,10 @@ Phase: Completed Validation errors: Warnings: - Ark: + Velero: Cluster: Namespaces: - heptio-ark: serviceaccounts "ark" already exists + velero: serviceaccounts "velero" already exists serviceaccounts "default" already exists kube-public: serviceaccounts "default" already exists kube-system: serviceaccounts "attachdetach-controller" already exists @@ -80,7 +80,7 @@ Warnings: default: serviceaccounts "default" already exists Errors: - Ark: + Velero: Cluster: Namespaces: ``` @@ -93,7 +93,7 @@ of them may have been pre-existing). Both errors and warnings are structured in the same way: -* `Ark`: A list of system-related issues encountered by the Ark server (e.g. couldn't read directory). +* `Velero`: A list of system-related issues encountered by the Velero server (e.g. couldn't read directory). * `Cluster`: A list of issues related to the restore of cluster-scoped resources. diff --git a/docs/disaster-case.md b/docs/disaster-case.md index 73d569fba5..5958119089 100644 --- a/docs/disaster-case.md +++ b/docs/disaster-case.md @@ -2,22 +2,22 @@ *Using Schedules and Restore-Only Mode* -If you periodically back up your cluster's resources, you are able to return to a previous state in case of some unexpected mishap, such as a service outage. Doing so with Heptio Ark looks like the following: +If you periodically back up your cluster's resources, you are able to return to a previous state in case of some unexpected mishap, such as a service outage. Doing so with Velero looks like the following: -1. After you first run the Ark server on your cluster, set up a daily backup (replacing `` in the command as desired): +1. After you first run the Velero server on your cluster, set up a daily backup (replacing `` in the command as desired): ``` - ark schedule create --schedule "0 7 * * *" + velero schedule create --schedule "0 7 * * *" ``` This creates a Backup object with the name `-`. 1. A disaster happens and you need to recreate your resources. -1. Update the Ark server deployment, adding the argument for the `server` command flag `restore-only` set to `true`. This prevents Backup objects from being created or deleted during your Restore process. +1. Update the Velero server deployment, adding the argument for the `server` command flag `restore-only` set to `true`. This prevents Backup objects from being created or deleted during your Restore process. -1. Create a restore with your most recent Ark Backup: +1. Create a restore with your most recent Velero Backup: ``` - ark restore create --from-backup - + velero restore create --from-backup - ``` diff --git a/docs/expose-minio.md b/docs/expose-minio.md index 1f4f0f8d9a..236d038096 100644 --- a/docs/expose-minio.md +++ b/docs/expose-minio.md @@ -1,17 +1,17 @@ # Expose Minio outside your cluster -When you run commands to get logs or describe a backup, the Ark server generates a pre-signed URL to download the requested items. To access these URLs from outside the cluster -- that is, from your Ark client -- you need to make Minio available outside the cluster. You can: +When you run commands to get logs or describe a backup, the Velero server generates a pre-signed URL to download the requested items. To access these URLs from outside the cluster -- that is, from your Velero client -- you need to make Minio available outside the cluster. You can: - Change the Minio Service type from `ClusterIP` to `NodePort`. - Set up Ingress for your cluster, keeping Minio Service type `ClusterIP`. -In Ark 0.10, you can also specify the value of a new `publicUrl` field for the pre-signed URL in your backup storage config. +In Velero 0.10, you can also specify the value of a new `publicUrl` field for the pre-signed URL in your backup storage config. -For basic instructions on how to install the Ark server and client, see [the getting started example][1]. +For basic instructions on how to install the Velero server and client, see [the getting started example][1]. ## Expose Minio with Service of type NodePort -The Minio deployment by default specifies a Service of type `ClusterIP`. You can change this to `NodePort` to easily expose a cluster service externally if you can reach the node from your Ark client. +The Minio deployment by default specifies a Service of type `ClusterIP`. You can change this to `NodePort` to easily expose a cluster service externally if you can reach the node from your Velero client. You must also get the Minio URL, which you can then specify as the value of the new `publicUrl` field in your backup storage config. @@ -22,29 +22,29 @@ You must also get the Minio URL, which you can then specify as the value of the - if you're running Minikube: ```shell - minikube service minio --namespace=heptio-ark --url + minikube service minio --namespace=velero --url ``` - in any other environment: - 1. Get the value of an external IP address or DNS name of any node in your cluster. You must be able to reach this address from the Ark client. + 1. Get the value of an external IP address or DNS name of any node in your cluster. You must be able to reach this address from the Velero client. 1. Append the value of the NodePort to get a complete URL. You can get this value by running: ```shell - kubectl -n heptio-ark get svc/minio -o jsonpath='{.spec.ports[0].nodePort}' + kubectl -n velero get svc/minio -o jsonpath='{.spec.ports[0].nodePort}' ``` -1. In `examples/minio/05-ark-backupstoragelocation.yaml`, uncomment the `publicUrl` line and provide this Minio URL as the value of the `publicUrl` field. You must include the `http://` or `https://` prefix. +1. In `examples/minio/05-backupstoragelocation.yaml`, uncomment the `publicUrl` line and provide this Minio URL as the value of the `publicUrl` field. You must include the `http://` or `https://` prefix. ## Work with Ingress -Configuring Ingress for your cluster is out of scope for the Ark documentation. If you have already set up Ingress, however, it makes sense to continue with it while you run the example Ark configuration with Minio. +Configuring Ingress for your cluster is out of scope for the Velero documentation. If you have already set up Ingress, however, it makes sense to continue with it while you run the example Velero configuration with Minio. In this case: 1. Keep the Service type as `ClusterIP`. -1. In `examples/minio/05-ark-backupstoragelocation.yaml`, uncomment the `publicUrl` line and provide the URL and port of your Ingress as the value of the `publicUrl` field. +1. In `examples/minio/05-backupstoragelocation.yaml`, uncomment the `publicUrl` line and provide the URL and port of your Ingress as the value of the `publicUrl` field. -[1]: get-started.md \ No newline at end of file +[1]: get-started.md diff --git a/docs/extend.md b/docs/extend.md index 0a96b8e3e6..5a1c685333 100644 --- a/docs/extend.md +++ b/docs/extend.md @@ -1,9 +1,9 @@ -# Extend Ark +# Extend Velero -Ark includes mechanisms for extending the core functionality to meet your individual backup/restore needs: +Velero includes mechanisms for extending the core functionality to meet your individual backup/restore needs: * [Hooks][27] allow you to specify commands to be executed within running pods during a backup. This is useful if you need to run a workload-specific command prior to taking a backup (for example, to flush disk buffers or to freeze a database). -* [Plugins][28] allow you to develop custom object/block storage back-ends or per-item backup/restore actions that can execute arbitrary logic, including modifying the items being backed up/restored. Plugins can be used by Ark without needing to be compiled into the core Ark binary. +* [Plugins][28] allow you to develop custom object/block storage back-ends or per-item backup/restore actions that can execute arbitrary logic, including modifying the items being backed up/restored. Plugins can be used by Velero without needing to be compiled into the core Velero binary. [27]: hooks.md [28]: plugins.md diff --git a/docs/faq.md b/docs/faq.md index b25b4f4399..2cd9025c7a 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -1,15 +1,15 @@ # FAQ -## When is it appropriate to use Ark instead of etcd's built in backup/restore? +## When is it appropriate to use Velero instead of etcd's built in backup/restore? Etcd's backup/restore tooling is good for recovering from data loss in a single etcd cluster. For example, it is a good idea to take a backup of etcd prior to upgrading etcd itself. For more -sophisticated management of your Kubernetes cluster backups and restores, we feel that Ark is +sophisticated management of your Kubernetes cluster backups and restores, we feel that Velero is generally a better approach. It gives you the ability to throw away an unstable cluster and restore your Kubernetes resources and data into a new cluster, which you can't do easily just by backing up and restoring etcd. -Examples of cases where Ark is useful: +Examples of cases where Velero is useful: * you don't have access to etcd (e.g. you're running on GKE) * backing up both Kubernetes resources and persistent volume state @@ -18,20 +18,20 @@ Examples of cases where Ark is useful: * backing up Kubernetes resources that are stored across multiple etcd clusters (for example if you run a custom apiserver) -## Will Ark restore my Kubernetes resources exactly the way they were before? +## Will Velero restore my Kubernetes resources exactly the way they were before? -Yes, with some exceptions. For example, when Ark restores pods it deletes the `nodeName` from the +Yes, with some exceptions. For example, when Velero restores pods it deletes the `nodeName` from the pod so that it can be scheduled onto a new node. You can see some more examples of the differences -in [pod_action.go](https://github.com/heptio/ark/blob/master/pkg/restore/pod_action.go) +in [pod_action.go](https://github.com/heptio/velero/blob/master/pkg/restore/pod_action.go) -## I'm using Ark in multiple clusters. Should I use the same bucket to store all of my backups? +## I'm using Velero in multiple clusters. Should I use the same bucket to store all of my backups? We **strongly** recommend that you use a separate bucket per cluster to store backups. Sharing a bucket -across multiple Ark instances can lead to numerous problems - failed backups, overwritten backups, -inadvertently deleted backups, etc., all of which can be avoided by using a separate bucket per Ark +across multiple Velero instances can lead to numerous problems - failed backups, overwritten backups, +inadvertently deleted backups, etc., all of which can be avoided by using a separate bucket per Velero instance. Related to this, if you need to restore a backup from cluster A into cluster B, please use restore-only -mode in cluster B's Ark instance (via the `--restore-only` flag on the `ark server` command specified -in your Ark deployment) while it's configured to use cluster A's bucket. This will ensure no +mode in cluster B's Velero instance (via the `--restore-only` flag on the `velero server` command specified +in your Velero deployment) while it's configured to use cluster A's bucket. This will ensure no new backups are created, and no existing backups are deleted or overwritten. diff --git a/docs/gcp-config.md b/docs/gcp-config.md index 7b0b13e921..f0531e7750 100644 --- a/docs/gcp-config.md +++ b/docs/gcp-config.md @@ -1,4 +1,4 @@ -# Run Ark on GCP +# Run Velero on GCP You can run Kubernetes on Google Cloud Platform in either: @@ -9,7 +9,7 @@ If you do not have the `gcloud` and `gsutil` CLIs locally installed, follow the ## Create GCS bucket -Heptio Ark requires an object storage bucket in which to store backups, preferably unique to a single Kubernetes cluster (see the [FAQ][20] for more details). Create a GCS bucket, replacing the placeholder with the name of your bucket: +Velero requires an object storage bucket in which to store backups, preferably unique to a single Kubernetes cluster (see the [FAQ][20] for more details). Create a GCS bucket, replacing the placeholder with the name of your bucket: ```bash BUCKET= @@ -19,7 +19,7 @@ gsutil mb gs://$BUCKET/ ## Create service account -To integrate Heptio Ark with GCP, create an Ark-specific [Service Account][15]: +To integrate Velero with GCP, create an Velero-specific [Service Account][15]: 1. View your current config settings: @@ -36,13 +36,13 @@ To integrate Heptio Ark with GCP, create an Ark-specific [Service Account][15]: 2. Create a service account: ```bash - gcloud iam service-accounts create heptio-ark \ - --display-name "Heptio Ark service account" + gcloud iam service-accounts create velero \ + --display-name "Velero service account" ``` - > If you'll be using Ark to backup multiple clusters with multiple GCS buckets, it may be desirable to create a unique username per cluster rather than the default `heptio-ark`. + > If you'll be using Velero to backup multiple clusters with multiple GCS buckets, it may be desirable to create a unique username per cluster rather than the default `velero`. - Then list all accounts and find the `heptio-ark` account you just created: + Then list all accounts and find the `velero` account you just created: ```bash gcloud iam service-accounts list ``` @@ -51,11 +51,11 @@ To integrate Heptio Ark with GCP, create an Ark-specific [Service Account][15]: ```bash SERVICE_ACCOUNT_EMAIL=$(gcloud iam service-accounts list \ - --filter="displayName:Heptio Ark service account" \ + --filter="displayName:Velero service account" \ --format 'value(email)') ``` -3. Attach policies to give `heptio-ark` the necessary permissions to function: +3. Attach policies to give `velero` the necessary permissions to function: ```bash @@ -69,22 +69,22 @@ To integrate Heptio Ark with GCP, create an Ark-specific [Service Account][15]: compute.snapshots.delete ) - gcloud iam roles create heptio_ark.server \ + gcloud iam roles create velero.server \ --project $PROJECT_ID \ - --title "Heptio Ark Server" \ + --title "Velero Server" \ --permissions "$(IFS=","; echo "${ROLE_PERMISSIONS[*]}")" gcloud projects add-iam-policy-binding $PROJECT_ID \ --member serviceAccount:$SERVICE_ACCOUNT_EMAIL \ - --role projects/$PROJECT_ID/roles/heptio_ark.server + --role projects/$PROJECT_ID/roles/velero.server gsutil iam ch serviceAccount:$SERVICE_ACCOUNT_EMAIL:objectAdmin gs://${BUCKET} ``` -4. Create a service account key, specifying an output file (`credentials-ark`) in your local directory: +4. Create a service account key, specifying an output file (`credentials-velero`) in your local directory: ```bash - gcloud iam service-accounts keys create credentials-ark \ + gcloud iam service-accounts keys create credentials-velero \ --iam-account $SERVICE_ACCOUNT_EMAIL ``` @@ -93,7 +93,7 @@ To integrate Heptio Ark with GCP, create an Ark-specific [Service Account][15]: If you run Google Kubernetes Engine (GKE), make sure that your current IAM user is a cluster-admin. This role is required to create RBAC objects. See [the GKE documentation][22] for more information. -In the Ark directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML files to specify the namespace. See [Run in custom namespace][0]. +In the Velero directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML files to specify the namespace. See [Run in custom namespace][0]. ```bash kubectl apply -f config/common/00-prereqs.yaml @@ -103,15 +103,15 @@ Create a Secret. In the directory of the credentials file you just created, run: ```bash kubectl create secret generic cloud-credentials \ - --namespace heptio-ark \ - --from-file cloud=credentials-ark + --namespace velero \ + --from-file cloud=credentials-velero ``` -**Note: If you use a custom namespace, replace `heptio-ark` with the name of the custom namespace** +**Note: If you use a custom namespace, replace `velero` with the name of the custom namespace** Specify the following values in the example files: -* In file `config/gcp/05-ark-backupstoragelocation.yaml`: +* In file `config/gcp/05-backupstoragelocation.yaml`: * Replace ``. See the [BackupStorageLocation definition][7] for details. @@ -125,11 +125,11 @@ Specify the following values in the example files: ## Start the server -In the root of your Ark directory, run: +In the root of your Velero directory, run: ```bash - kubectl apply -f config/gcp/05-ark-backupstoragelocation.yaml - kubectl apply -f config/gcp/06-ark-volumesnapshotlocation.yaml + kubectl apply -f config/gcp/05-backupstoragelocation.yaml + kubectl apply -f config/gcp/06-volumesnapshotlocation.yaml kubectl apply -f config/gcp/10-deployment.yaml ``` diff --git a/docs/get-started.md b/docs/get-started.md index c11cb23e08..d910a374c5 100644 --- a/docs/get-started.md +++ b/docs/get-started.md @@ -1,19 +1,19 @@ ## Getting started -The following example sets up the Ark server and client, then backs up and restores a sample application. +The following example sets up the Velero server and client, then backs up and restores a sample application. For simplicity, the example uses Minio, an S3-compatible storage service that runs locally on your cluster. For additional functionality with this setup, see the docs on how to [expose Minio outside your cluster][31]. -**NOTE** The example lets you explore basic Ark functionality. Configuring Minio for production is out of scope. +**NOTE** The example lets you explore basic Velero functionality. Configuring Minio for production is out of scope. -See [Set up Ark on your platform][3] for how to configure Ark for a production environment. +See [Set up Velero on your platform][3] for how to configure Velero for a production environment. If you encounter issues with installing or configuring, see [Debugging Installation Issues](debugging-install.md). ### Prerequisites -* Access to a Kubernetes cluster, version 1.7 or later. Version 1.7.5 or later is required to run `ark backup delete`. +* Access to a Kubernetes cluster, version 1.7 or later. Version 1.7.5 or later is required to run `velero backup delete`. * A DNS server on the cluster * `kubectl` installed @@ -25,22 +25,22 @@ If you encounter issues with installing or configuring, see [Debugging Installat ```bash tar -xzf .tar.gz -C /dir/to/extract/to ``` - We'll refer to the directory you extracted to as the "Ark directory" in subsequent steps. + We'll refer to the directory you extracted to as the "Velero directory" in subsequent steps. -1. Move the `ark` binary from the Ark directory to somewhere in your PATH. +1. Move the `velero` binary from the Velero directory to somewhere in your PATH. #### MacOS Installation -On Mac, you can use [HomeBrew](https://brew.sh) to install the `ark` client: +On Mac, you can use [HomeBrew](https://brew.sh) to install the `velero` client: ```bash -brew install ark +brew install velero ``` ### Set up server -These instructions start the Ark server and a Minio instance that is accessible from within the cluster only. See [Expose Minio outside your cluster][31] for information about configuring your cluster for outside access to Minio. Outside access is required to access logs and run `ark describe` commands. +These instructions start the Velero server and a Minio instance that is accessible from within the cluster only. See [Expose Minio outside your cluster][31] for information about configuring your cluster for outside access to Minio. Outside access is required to access logs and run `velero describe` commands. -1. Start the server and the local storage service. In the Ark directory, run: +1. Start the server and the local storage service. In the Velero directory, run: ```bash kubectl apply -f config/common/00-prereqs.yaml @@ -53,10 +53,10 @@ These instructions start the Ark server and a Minio instance that is accessible kubectl apply -f config/nginx-app/base.yaml ``` -1. Check to see that both the Ark and nginx deployments are successfully created: +1. Check to see that both the Velero and nginx deployments are successfully created: ``` - kubectl get deployments -l component=ark --namespace=heptio-ark + kubectl get deployments -l component=velero --namespace=velero kubectl get deployments --namespace=nginx-example ``` @@ -65,25 +65,25 @@ These instructions start the Ark server and a Minio instance that is accessible 1. Create a backup for any object that matches the `app=nginx` label selector: ``` - ark backup create nginx-backup --selector app=nginx + velero backup create nginx-backup --selector app=nginx ``` Alternatively if you want to backup all objects *except* those matching the label `backup=ignore`: ``` - ark backup create nginx-backup --selector 'backup notin (ignore)' + velero backup create nginx-backup --selector 'backup notin (ignore)' ``` 1. (Optional) Create regularly scheduled backups based on a cron expression using the `app=nginx` label selector: ``` - ark schedule create nginx-daily --schedule="0 1 * * *" --selector app=nginx + velero schedule create nginx-daily --schedule="0 1 * * *" --selector app=nginx ``` Alternatively, you can use some non-standard shorthand cron expressions: ``` - ark schedule create nginx-daily --schedule="@daily" --selector app=nginx + velero schedule create nginx-daily --schedule="@daily" --selector app=nginx ``` See the [cron package's documentation][30] for more usage examples. @@ -111,13 +111,13 @@ These instructions start the Ark server and a Minio instance that is accessible 1. Run: ``` - ark restore create --from-backup nginx-backup + velero restore create --from-backup nginx-backup ``` 1. Run: ``` - ark restore get + velero restore get ``` After the restore finishes, the output looks like the following: @@ -134,7 +134,7 @@ After a successful restore, the `STATUS` column is `Completed`, and `WARNINGS` a If there are errors or warnings, you can look at them in detail: ``` -ark restore describe +velero restore describe ``` For more information, see [the debugging information][18]. @@ -145,21 +145,21 @@ If you want to delete any backups you created, including data in object storage volume snapshots, you can run: ``` -ark backup delete BACKUP_NAME +velero backup delete BACKUP_NAME ``` -This asks the Ark server to delete all backup data associated with `BACKUP_NAME`. You need to do -this for each backup you want to permanently delete. A future version of Ark will allow you to +This asks the Velero server to delete all backup data associated with `BACKUP_NAME`. You need to do +this for each backup you want to permanently delete. A future version of Velero will allow you to delete multiple backups by name or label selector. Once fully removed, the backup is no longer visible when you run: ``` -ark backup get BACKUP_NAME +velero backup get BACKUP_NAME ``` -If you want to uninstall Ark but preserve the backup data in object storage and persistent volume -snapshots, it is safe to remove the `heptio-ark` namespace and everything else created for this +If you want to uninstall Velero but preserve the backup data in object storage and persistent volume +snapshots, it is safe to remove the `velero` namespace and everything else created for this example: ``` @@ -171,5 +171,5 @@ kubectl delete -f config/nginx-app/base.yaml [31]: expose-minio.md [3]: install-overview.md [18]: debugging-restores.md -[26]: https://github.com/heptio/ark/releases +[26]: https://github.com/heptio/velero/releases [30]: https://godoc.org/github.com/robfig/cron diff --git a/docs/hooks.md b/docs/hooks.md index 5db78e6432..dffac53857 100644 --- a/docs/hooks.md +++ b/docs/hooks.md @@ -1,16 +1,16 @@ # Hooks -Heptio Ark currently supports executing commands in containers in pods during a backup. +Velero currently supports executing commands in containers in pods during a backup. ## Backup Hooks When performing a backup, you can specify one or more commands to execute in a container in a pod when that pod is being backed up. -Ark versions prior to v0.7.0 only support hooks that execute prior to any custom action processing +Velero versions prior to v0.7.0 only support hooks that execute prior to any custom action processing ("pre" hooks). -As of version v0.7.0, Ark also supports "post" hooks - these execute after all custom actions have +As of version v0.7.0, Velero also supports "post" hooks - these execute after all custom actions have completed, as well as after all the additional items specified by custom actions have been backed up. @@ -18,28 +18,26 @@ There are two ways to specify hooks: annotations on the pod itself, and in the B ### Specifying Hooks As Pod Annotations -You can use the following annotations on a pod to make Ark execute a hook when backing up the pod: +You can use the following annotations on a pod to make Velero execute a hook when backing up the pod: #### Pre hooks | Annotation Name | Description | | --- | --- | -| `pre.hook.backup.ark.heptio.com/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. | -| `pre.hook.backup.ark.heptio.com/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` | -| `pre.hook.backup.ark.heptio.com/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. | -| `pre.hook.backup.ark.heptio.com/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. | +| `pre.hook.backup.velero.io/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. | +| `pre.hook.backup.velero.io/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` | +| `pre.hook.backup.velero.io/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. | +| `pre.hook.backup.velero.io/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. | -Ark v0.7.0+ continues to support the original (deprecated) way to specify pre hooks - without the -`pre.` prefix in the annotation names (e.g. `hook.backup.ark.heptio.com/container`). #### Post hooks (v0.7.0+) | Annotation Name | Description | | --- | --- | -| `post.hook.backup.ark.heptio.com/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. | -| `post.hook.backup.ark.heptio.com/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` | -| `post.hook.backup.ark.heptio.com/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. | -| `post.hook.backup.ark.heptio.com/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. | +| `post.hook.backup.velero.io/container` | The container where the command should be executed. Defaults to the first container in the pod. Optional. | +| `post.hook.backup.velero.io/command` | The command to execute. If you need multiple arguments, specify the command as a JSON array, such as `["/usr/bin/uname", "-a"]` | +| `post.hook.backup.velero.io/on-error` | What to do if the command returns a non-zero exit code. Defaults to Fail. Valid values are Fail and Continue. Optional. | +| `post.hook.backup.velero.io/timeout` | How long to wait for the command to execute. The hook is considered in error if the command exceeds the timeout. Defaults to 30s. Optional. | ### Specifying Hooks in the Backup Spec @@ -56,25 +54,25 @@ setup this example. ### Annotations -The Ark [example/nginx-app/with-pv.yaml][2] serves as an example of adding the pre and post hook annotations directly +The Velero [example/nginx-app/with-pv.yaml][2] serves as an example of adding the pre and post hook annotations directly to your declarative deployment. Below is an example of what updating an object in place might look like. ```shell kubectl annotate pod -n nginx-example -l app=nginx \ - pre.hook.backup.ark.heptio.com/command='["/sbin/fsfreeze", "--freeze", "/var/log/nginx"]' \ - pre.hook.backup.ark.heptio.com/container=fsfreeze \ - post.hook.backup.ark.heptio.com/command='["/sbin/fsfreeze", "--unfreeze", "/var/log/nginx"]' \ - post.hook.backup.ark.heptio.com/container=fsfreeze + pre.hook.backup.velero.io/command='["/sbin/fsfreeze", "--freeze", "/var/log/nginx"]' \ + pre.hook.backup.velero.io/container=fsfreeze \ + post.hook.backup.velero.io/command='["/sbin/fsfreeze", "--unfreeze", "/var/log/nginx"]' \ + post.hook.backup.velero.io/container=fsfreeze ``` -Now test the pre and post hooks by creating a backup. You can use the Ark logs to verify that the pre and post +Now test the pre and post hooks by creating a backup. You can use the Velero logs to verify that the pre and post hooks are running and exiting without error. ```shell -ark backup create nginx-hook-test +velero backup create nginx-hook-test -ark backup get nginx-hook-test -ark backup logs nginx-hook-test | grep hookCommand +velero backup get nginx-hook-test +velero backup logs nginx-hook-test | grep hookCommand ``` diff --git a/docs/ibm-config.md b/docs/ibm-config.md index b25cc0f2c7..39e3d01bdc 100644 --- a/docs/ibm-config.md +++ b/docs/ibm-config.md @@ -1,31 +1,31 @@ -# Use IBM Cloud Object Storage as Ark's storage destination. -You can deploy Ark on IBM [Public][5] or [Private][4] clouds, or even on any other Kubernetes cluster, but anyway you can use IBM Cloud Object Store as a destination for Ark's backups. +# Use IBM Cloud Object Storage as Velero's storage destination. +You can deploy Velero on IBM [Public][5] or [Private][4] clouds, or even on any other Kubernetes cluster, but anyway you can use IBM Cloud Object Store as a destination for Velero's backups. -To set up IBM Cloud Object Storage (COS) as Ark's destination, you: +To set up IBM Cloud Object Storage (COS) as Velero's destination, you: * Create your COS instance * Create an S3 bucket * Define a service that can store data in the bucket -* Configure and start the Ark server +* Configure and start the Velero server ## Create COS instance If you don’t have a COS instance, you can create a new one, according to the detailed instructions in [Creating a new resource instance][1]. ## Create an S3 bucket -Heptio Ark requires an object storage bucket to store backups in. See instructions in [Create some buckets to store your data][2]. +Velero requires an object storage bucket to store backups in. See instructions in [Create some buckets to store your data][2]. ## Define a service that can store data in the bucket. The process of creating service credentials is described in [Service credentials][3]. Several comments: -1. The Ark service will write its backup into the bucket, so it requires the “Writer” access role. +1. The Velero service will write its backup into the bucket, so it requires the “Writer” access role. -2. Ark uses an AWS S3 compatible API. Which means it authenticates using a signature created from a pair of access and secret keys — a set of HMAC credentials. You can create these HMAC credentials by specifying `{“HMAC”:true}` as an optional inline parameter. See step 3 in the [Service credentials][3] guide. +2. Velero uses an AWS S3 compatible API. Which means it authenticates using a signature created from a pair of access and secret keys — a set of HMAC credentials. You can create these HMAC credentials by specifying `{“HMAC”:true}` as an optional inline parameter. See step 3 in the [Service credentials][3] guide. 3. After successfully creating a Service credential, you can view the JSON definition of the credential. Under the `cos_hmac_keys` entry there are `access_key_id` and `secret_access_key`. We will use them in the next step. -4. Create an Ark-specific credentials file (`credentials-ark`) in your local directory: +4. Create an Velero-specific credentials file (`credentials-velero`) in your local directory: ``` [default] @@ -37,7 +37,7 @@ Several comments: ## Credentials and configuration -In the Ark directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML files to specify the namespace. See [Run in custom namespace][0]. +In the Velero directory (i.e. where you extracted the release tarball), run the following to first set up namespaces, RBAC, and other scaffolding. To run in a custom namespace, make sure that you have edited the YAML files to specify the namespace. See [Run in custom namespace][0]. ```bash kubectl apply -f config/common/00-prereqs.yaml @@ -47,13 +47,13 @@ Create a Secret. In the directory of the credentials file you just created, run: ```bash kubectl create secret generic cloud-credentials \ - --namespace \ - --from-file cloud=credentials-ark + --namespace \ + --from-file cloud=credentials-velero ``` Specify the following values in the example files: -* In `config/ibm/05-ark-backupstoragelocation.yaml`: +* In `config/ibm/05-backupstoragelocation.yaml`: * Replace ``, `` and ``. See the [BackupStorageLocation definition][6] for details. @@ -61,12 +61,12 @@ Specify the following values in the example files: * Replace `` with your `StorageClass` name. -## Start the Ark server +## Start the Velero server -In the root of your Ark directory, run: +In the root of your Velero directory, run: ```bash - kubectl apply -f config/ibm/05-ark-backupstoragelocation.yaml + kubectl apply -f config/ibm/05-backupstoragelocation.yaml kubectl apply -f config/ibm/10-deployment.yaml ``` diff --git a/docs/image-tagging.md b/docs/image-tagging.md index 33460d8c3d..43e61834a6 100644 --- a/docs/image-tagging.md +++ b/docs/image-tagging.md @@ -1,21 +1,21 @@ # Image tagging policy -This document describes Ark's image tagging policy. +This document describes Velero's image tagging policy. ## Released versions -`gcr.io/heptio-images/ark:` +`gcr.io/heptio-images/velero:` -Ark follows the [Semantic Versioning](http://semver.org/) standard for releases. Each tag in the `github.com/heptio/ark` repository has a matching image, e.g. `gcr.io/heptio-images/ark:v0.8.0`. +Velero follows the [Semantic Versioning](http://semver.org/) standard for releases. Each tag in the `github.com/heptio/velero` repository has a matching image, e.g. `gcr.io/heptio-images/velero:v0.11.0`. ### Latest -`gcr.io/heptio-images/ark:latest` +`gcr.io/heptio-images/velero:latest` -The `latest` tag follows the most recently released version of Ark. +The `latest` tag follows the most recently released version of Velero. ## Development -`gcr.io/heptio-images/ark:master` +`gcr.io/heptio-images/velero:master` -The `master` tag follows the latest commit to land on the `master` branch. \ No newline at end of file +The `master` tag follows the latest commit to land on the `master` branch. diff --git a/docs/install-overview.md b/docs/install-overview.md index e13d0bb578..a1f10c83a8 100644 --- a/docs/install-overview.md +++ b/docs/install-overview.md @@ -1,42 +1,42 @@ -# Set up Ark on your platform +# Set up Velero on your platform -You can run Ark with a cloud provider or on-premises. For detailed information about the platforms that Ark supports, see [Compatible Storage Providers][99]. +You can run Velero with a cloud provider or on-premises. For detailed information about the platforms that Velero supports, see [Compatible Storage Providers][99]. -In version 0.7.0 and later, you can run Ark in any namespace, which requires additional customization. See [Run in custom namespace][3]. +In version 0.7.0 and later, you can run Velero in any namespace, which requires additional customization. See [Run in custom namespace][3]. -In version 0.9.0 and later, you can use Ark's integration with restic, which requires additional setup. See [restic instructions][20]. +In version 0.9.0 and later, you can use Velero's integration with restic, which requires additional setup. See [restic instructions][20]. ## Customize configuration -Whether you run Ark on a cloud provider or on-premises, if you have more than one volume snapshot location for a given volume provider, you can specify its default location for backups by setting a server flag in your Ark deployment YAML. +Whether you run Velero on a cloud provider or on-premises, if you have more than one volume snapshot location for a given volume provider, you can specify its default location for backups by setting a server flag in your Velero deployment YAML. For details, see the documentation topics for individual cloud providers. ## Cloud provider -The Ark repository includes a set of example YAML files that specify the settings for each supported cloud provider. For provider-specific instructions, see: +The Velero repository includes a set of example YAML files that specify the settings for each supported cloud provider. For provider-specific instructions, see: -* [Run Ark on AWS][0] -* [Run Ark on GCP][1] -* [Run Ark on Azure][2] -* [Use IBM Cloud Object Store as Ark's storage destination][4] +* [Run Velero on AWS][0] +* [Run Velero on GCP][1] +* [Run Velero on Azure][2] +* [Use IBM Cloud Object Store as Velero's storage destination][4] ## On-premises -You can run Ark in an on-premises cluster in different ways depending on your requirements. +You can run Velero in an on-premises cluster in different ways depending on your requirements. -First, you must select an object storage backend that Ark can use to store backup data. [Compatible Storage Providers][99] contains information on various +First, you must select an object storage backend that Velero can use to store backup data. [Compatible Storage Providers][99] contains information on various options that are supported or have been reported to work by users. [Minio][101] is an option if you want to keep your backup data on-premises and you are not using another storage platform that offers an S3-compatible object storage API. Second, if you need to back up persistent volume data, you must select a volume backup solution. [Volume Snapshot Providers][100] contains information on -the supported options. For example, if you use [Portworx][102] for persistent storage, you can install their Ark plugin to get native Portworx snapshots as part -of your Ark backups. If there is no native snapshot plugin available for your storage platform, you can use Ark's [restic integration][20], which provides a +the supported options. For example, if you use [Portworx][102] for persistent storage, you can install their Velero plugin to get native Portworx snapshots as part +of your Velero backups. If there is no native snapshot plugin available for your storage platform, you can use Velero's [restic integration][20], which provides a platform-agnostic backup solution for volume data. ## Examples -After you set up the Ark server, try these examples: +After you set up the Velero server, try these examples: ### Basic example (without PersistentVolumes) @@ -49,7 +49,7 @@ After you set up the Ark server, try these examples: 1. Create a backup: ```bash - ark backup create nginx-backup --include-namespaces nginx-example + velero backup create nginx-backup --include-namespaces nginx-example ``` 1. Simulate a disaster: @@ -63,7 +63,7 @@ After you set up the Ark server, try these examples: 1. Restore your lost resources: ```bash - ark restore create --from-backup nginx-backup + velero restore create --from-backup nginx-backup ``` ### Snapshot example (with PersistentVolumes) @@ -79,7 +79,7 @@ After you set up the Ark server, try these examples: 1. Create a backup with PV snapshotting: ```bash - ark backup create nginx-backup --include-namespaces nginx-example + velero backup create nginx-backup --include-namespaces nginx-example ``` 1. Simulate a disaster: @@ -93,7 +93,7 @@ After you set up the Ark server, try these examples: 1. Restore your lost resources: ```bash - ark restore create --from-backup nginx-backup + velero restore create --from-backup nginx-backup ``` [0]: aws-config.md diff --git a/docs/issue-template-gen/main.go b/docs/issue-template-gen/main.go index 19ef0ab850..57e19daae4 100644 --- a/docs/issue-template-gen/main.go +++ b/docs/issue-template-gen/main.go @@ -24,7 +24,7 @@ import ( "os" "text/template" - "github.com/heptio/ark/pkg/cmd/cli/bug" + "github.com/heptio/velero/pkg/cmd/cli/bug" ) func main() { @@ -38,7 +38,7 @@ func main() { if err != nil { log.Fatal(err) } - err = tmpl.Execute(outFile, bug.ArkBugInfo{}) + err = tmpl.Execute(outFile, bug.VeleroBugInfo{}) if err != nil { log.Fatal(err) } diff --git a/docs/locations.md b/docs/locations.md index 9479828b2f..f1af86ea52 100644 --- a/docs/locations.md +++ b/docs/locations.md @@ -1,55 +1,55 @@ # Backup Storage Locations and Volume Snapshot Locations -Ark v0.10 introduces a new way of configuring where Ark backups and their associated persistent volume snapshots are stored. +Velero v0.10 introduces a new way of configuring where Velero backups and their associated persistent volume snapshots are stored. ## Motivations -In Ark versions prior to v0.10, the configuration for where to store backups & volume snapshots is specified in a `Config` custom resource. The `backupStorageProvider` section captures the place where all Ark backups should be stored. This is defined by a **provider** (e.g. `aws`, `azure`, `gcp`, `minio`, etc.), a **bucket**, and possibly some additional provider-specific settings (e.g. `region`). Similarly, the `persistentVolumeProvider` section captures the place where all persistent volume snapshots taken as part of Ark backups should be stored, and is defined by a **provider** and additional provider-specific settings (e.g. `region`). +In Velero versions prior to v0.10, the configuration for where to store backups & volume snapshots is specified in a `Config` custom resource. The `backupStorageProvider` section captures the place where all Velero backups should be stored. This is defined by a **provider** (e.g. `aws`, `azure`, `gcp`, `minio`, etc.), a **bucket**, and possibly some additional provider-specific settings (e.g. `region`). Similarly, the `persistentVolumeProvider` section captures the place where all persistent volume snapshots taken as part of Velero backups should be stored, and is defined by a **provider** and additional provider-specific settings (e.g. `region`). There are a number of use cases that this basic design does not support, such as: -- Take snapshots of more than one kind of persistent volume in a single Ark backup (e.g. in a cluster with both EBS volumes and Portworx volumes) -- Have some Ark backups go to a bucket in an eastern USA region, and others go to a bucket in a western USA region +- Take snapshots of more than one kind of persistent volume in a single Velero backup (e.g. in a cluster with both EBS volumes and Portworx volumes) +- Have some Velero backups go to a bucket in an eastern USA region, and others go to a bucket in a western USA region - For volume providers that support it (e.g. Portworx), have some snapshots be stored locally on the cluster and have others be stored in the cloud -Additionally, as we look ahead to backup replication, a major feature on our roadmap, we know that we'll need Ark to be able to support multiple possible storage locations. +Additionally, as we look ahead to backup replication, a major feature on our roadmap, we know that we'll need Velero to be able to support multiple possible storage locations. ## Overview -In Ark v0.10 we got rid of the `Config` custom resource, and replaced it with two new custom resources, `BackupStorageLocation` and `VolumeSnapshotLocation`. The new resources directly replace the legacy `backupStorageProvider` and `persistentVolumeProvider` sections of the `Config` resource, respectively. +In Velero v0.10 we got rid of the `Config` custom resource, and replaced it with two new custom resources, `BackupStorageLocation` and `VolumeSnapshotLocation`. The new resources directly replace the legacy `backupStorageProvider` and `persistentVolumeProvider` sections of the `Config` resource, respectively. Now, the user can pre-define more than one possible `BackupStorageLocation` and more than one `VolumeSnapshotLocation`, and can select *at backup creation time* the location in which the backup and associated snapshots should be stored. -A `BackupStorageLocation` is defined as a bucket, a prefix within that bucket under which all Ark data should be stored, and a set of additional provider-specific fields (e.g. AWS region, Azure storage account, etc.) The [API documentation][1] captures the configurable parameters for each in-tree provider. +A `BackupStorageLocation` is defined as a bucket, a prefix within that bucket under which all Velero data should be stored, and a set of additional provider-specific fields (e.g. AWS region, Azure storage account, etc.) The [API documentation][1] captures the configurable parameters for each in-tree provider. A `VolumeSnapshotLocation` is defined entirely by provider-specific fields (e.g. AWS region, Azure resource group, Portworx snapshot type, etc.) The [API documentation][2] captures the configurable parameters for each in-tree provider. -Additionally, since multiple `VolumeSnapshotLocations` can be created, the user can now configure locations for more than one volume provider, and if the cluster has volumes from multiple providers (e.g. AWS EBS and Portworx), all of them can be snapshotted in a single Ark backup. +Additionally, since multiple `VolumeSnapshotLocations` can be created, the user can now configure locations for more than one volume provider, and if the cluster has volumes from multiple providers (e.g. AWS EBS and Portworx), all of them can be snapshotted in a single Velero backup. ## Limitations / Caveats -- Volume snapshots are still limited by where your provider allows you to create snapshots. For example, AWS and Azure do not allow you to create a volume snapshot in a different region than where the volume is. If you try to take an Ark backup using a volume snapshot location with a different region than where your cluster's volumes are, the backup will fail. +- Volume snapshots are still limited by where your provider allows you to create snapshots. For example, AWS and Azure do not allow you to create a volume snapshot in a different region than where the volume is. If you try to take an Velero backup using a volume snapshot location with a different region than where your cluster's volumes are, the backup will fail. -- Each Ark backup has one `BackupStorageLocation`, and one `VolumeSnapshotLocation` per volume provider. It is not possible (yet) to send a single Ark backup to multiple backup storage locations simultaneously, or a single volume snapshot to multiple locations simultaneously. However, you can always set up multiple scheduled backups that differ only in the storage locations used if redundancy of backups across locations is important. +- Each Velero backup has one `BackupStorageLocation`, and one `VolumeSnapshotLocation` per volume provider. It is not possible (yet) to send a single Velero backup to multiple backup storage locations simultaneously, or a single volume snapshot to multiple locations simultaneously. However, you can always set up multiple scheduled backups that differ only in the storage locations used if redundancy of backups across locations is important. -- Cross-provider snapshots are not supported. If you have a cluster with more than one type of volume (e.g. EBS and Portworx), but you only have a `VolumeSnapshotLocation` configured for EBS, then Ark will **only** snapshot the EBS volumes. +- Cross-provider snapshots are not supported. If you have a cluster with more than one type of volume (e.g. EBS and Portworx), but you only have a `VolumeSnapshotLocation` configured for EBS, then Velero will **only** snapshot the EBS volumes. -- Restic data is now stored under a prefix/subdirectory of the main Ark bucket, and will go into the bucket corresponding to the `BackupStorageLocation` selected by the user at backup creation time. +- Restic data is now stored under a prefix/subdirectory of the main Velero bucket, and will go into the bucket corresponding to the `BackupStorageLocation` selected by the user at backup creation time. ## Examples Let's look at some examples of how we can use this new mechanism to address each of our previously unsupported use cases: -#### Take snapshots of more than one kind of persistent volume in a single Ark backup (e.g. in a cluster with both EBS volumes and Portworx volumes) +#### Take snapshots of more than one kind of persistent volume in a single Velero backup (e.g. in a cluster with both EBS volumes and Portworx volumes) During server configuration: ```shell -ark snapshot-location create ebs-us-east-1 \ +velero snapshot-location create ebs-us-east-1 \ --provider aws \ --config region=us-east-1 -ark snapshot-location create portworx-cloud \ +velero snapshot-location create portworx-cloud \ --provider portworx \ --config type=cloud ``` @@ -57,43 +57,43 @@ ark snapshot-location create portworx-cloud \ During backup creation: ```shell -ark backup create full-cluster-backup \ +velero backup create full-cluster-backup \ --volume-snapshot-locations ebs-us-east-1,portworx-cloud ``` -Alternately, since in this example there's only one possible volume snapshot location configured for each of our two providers (`ebs-us-east-1` for `aws`, and `portworx-cloud` for `portworx`), Ark doesn't require them to be explicitly specified when creating the backup: +Alternately, since in this example there's only one possible volume snapshot location configured for each of our two providers (`ebs-us-east-1` for `aws`, and `portworx-cloud` for `portworx`), Velero doesn't require them to be explicitly specified when creating the backup: ```shell -ark backup create full-cluster-backup +velero backup create full-cluster-backup ``` -#### Have some Ark backups go to a bucket in an eastern USA region, and others go to a bucket in a western USA region +#### Have some Velero backups go to a bucket in an eastern USA region, and others go to a bucket in a western USA region During server configuration: ```shell -ark backup-location create default \ +velero backup-location create default \ --provider aws \ - --bucket ark-backups \ + --bucket velero-backups \ --config region=us-east-1 -ark backup-location create s3-alt-region \ +velero backup-location create s3-alt-region \ --provider aws \ - --bucket ark-backups-alt \ + --bucket velero-backups-alt \ --config region=us-west-1 ``` During backup creation: ```shell -# The Ark server will automatically store backups in the backup storage location named "default" if +# The Velero server will automatically store backups in the backup storage location named "default" if # one is not specified when creating the backup. You can alter which backup storage location is used -# by default by setting the --default-backup-storage-location flag on the `ark server` command (run -# by the Ark deployment) to the name of a different backup storage location. -ark backup create full-cluster-backup +# by default by setting the --default-backup-storage-location flag on the `velero server` command (run +# by the Velero deployment) to the name of a different backup storage location. +velero backup create full-cluster-backup ``` Or: ```shell -ark backup create full-cluster-alternate-location-backup \ +velero backup create full-cluster-alternate-location-backup \ --storage-location s3-alt-region ``` @@ -102,11 +102,11 @@ ark backup create full-cluster-alternate-location-backup \ During server configuration: ```shell -ark snapshot-location create portworx-local \ +velero snapshot-location create portworx-local \ --provider portworx \ --config type=local -ark snapshot-location create portworx-cloud \ +velero snapshot-location create portworx-cloud \ --provider portworx \ --config type=cloud ``` @@ -116,49 +116,49 @@ During backup creation: ```shell # Note that since in this example we have two possible volume snapshot locations for the Portworx # provider, we need to explicitly specify which one to use when creating a backup. Alternately, -# you can set the --default-volume-snapshot-locations flag on the `ark server` command (run by -# the Ark deployment) to specify which location should be used for each provider by default, in +# you can set the --default-volume-snapshot-locations flag on the `velero server` command (run by +# the Velero deployment) to specify which location should be used for each provider by default, in # which case you don't need to specify it when creating a backup. -ark backup create local-snapshot-backup \ +velero backup create local-snapshot-backup \ --volume-snapshot-locations portworx-local ``` Or: ```shell -ark backup create cloud-snapshot-backup \ +velero backup create cloud-snapshot-backup \ --volume-snapshot-locations portworx-cloud ``` #### One location is still easy -If you don't have a use case for more than one location, it's still just as easy to use Ark. Let's assume you're running on AWS, in the `us-west-1` region: +If you don't have a use case for more than one location, it's still just as easy to use Velero. Let's assume you're running on AWS, in the `us-west-1` region: During server configuration: ```shell -ark backup-location create default \ +velero backup-location create default \ --provider aws \ - --bucket ark-backups \ + --bucket velero-backups \ --config region=us-west-1 -ark snapshot-location create ebs-us-west-1 \ +velero snapshot-location create ebs-us-west-1 \ --provider aws \ --config region=us-west-1 ``` During backup creation: ```shell -# Ark's will automatically use your configured backup storage location and volume snapshot location. +# Velero's will automatically use your configured backup storage location and volume snapshot location. # Nothing new needs to be specified when creating a backup. -ark backup create full-cluster-backup +velero backup create full-cluster-backup ``` ## Additional Use Cases 1. If you're using Azure's AKS, you may want to store your volume snapshots outside of the "infrastructure" resource group that is automatically created when you create your AKS cluster. This is now possible using a `VolumeSnapshotLocation`, by specifying a `resourceGroup` under the `config` section of the snapshot location. See the [Azure volume snapshot location documentation][3] for details. -1. If you're using Azure, you may want to store your Ark backups across multiple storage accounts and/or resource groups. This is now possible using a `BackupStorageLocation`, by specifying a `storageAccount` and/or `resourceGroup`, respectively, under the `config` section of the backup location. See the [Azure backup storage location documentation][4] for details. +1. If you're using Azure, you may want to store your Velero backups across multiple storage accounts and/or resource groups. This is now possible using a `BackupStorageLocation`, by specifying a `storageAccount` and/or `resourceGroup`, respectively, under the `config` section of the backup location. See the [Azure backup storage location documentation][4] for details. diff --git a/docs/migration-case.md b/docs/migration-case.md index f3c6a4f077..9d3a659acc 100644 --- a/docs/migration-case.md +++ b/docs/migration-case.md @@ -2,31 +2,31 @@ *Using Backups and Restores* -Heptio Ark can help you port your resources from one cluster to another, as long as you point each Ark instance to the same cloud object storage location. In this scenario, we are also assuming that your clusters are hosted by the same cloud provider. **Note that Heptio Ark does not support the migration of persistent volumes across cloud providers.** +Velero can help you port your resources from one cluster to another, as long as you point each Velero instance to the same cloud object storage location. In this scenario, we are also assuming that your clusters are hosted by the same cloud provider. **Note that Velero does not support the migration of persistent volumes across cloud providers.** -1. *(Cluster 1)* Assuming you haven't already been checkpointing your data with the Ark `schedule` operation, you need to first back up your entire cluster (replacing `` as desired): +1. *(Cluster 1)* Assuming you haven't already been checkpointing your data with the Velero `schedule` operation, you need to first back up your entire cluster (replacing `` as desired): ``` - ark backup create + velero backup create ``` The default TTL is 30 days (720 hours); you can use the `--ttl` flag to change this as necessary. -1. *(Cluster 2)* Add the `--restore-only` flag to the server spec in the Ark deployment YAML. +1. *(Cluster 2)* Add the `--restore-only` flag to the server spec in the Velero deployment YAML. -1. *(Cluster 2)* Make sure that the `BackupStorageLocation` and `VolumeSnapshotLocation` CRDs match the ones from *Cluster 1*, so that your new Ark server instance points to the same bucket. +1. *(Cluster 2)* Make sure that the `BackupStorageLocation` and `VolumeSnapshotLocation` CRDs match the ones from *Cluster 1*, so that your new Velero server instance points to the same bucket. -1. *(Cluster 2)* Make sure that the Ark Backup object is created. Ark resources are synchronized with the backup files in cloud storage. +1. *(Cluster 2)* Make sure that the Velero Backup object is created. Velero resources are synchronized with the backup files in cloud storage. ``` - ark backup describe + velero backup describe ``` - **Note:** As of version 0.10, the default sync interval is 1 minute, so make sure to wait before checking. You can configure this interval with the `--backup-sync-period` flag to the Ark server. + **Note:** As of version 0.10, the default sync interval is 1 minute, so make sure to wait before checking. You can configure this interval with the `--backup-sync-period` flag to the Velero server. 1. *(Cluster 2)* Once you have confirmed that the right Backup (``) is now present, you can restore everything with: ``` - ark restore create --from-backup + velero restore create --from-backup ``` ## Verify both clusters @@ -36,13 +36,13 @@ Check that the second cluster is behaving as expected: 1. *(Cluster 2)* Run: ``` - ark restore get + velero restore get ``` 1. Then run: ``` - ark restore describe + velero restore describe ``` -If you encounter issues, make sure that Ark is running in the same namespace in both clusters. \ No newline at end of file +If you encounter issues, make sure that Velero is running in the same namespace in both clusters. \ No newline at end of file diff --git a/docs/namespace.md b/docs/namespace.md index ca82857749..b77615b8cd 100644 --- a/docs/namespace.md +++ b/docs/namespace.md @@ -1,38 +1,38 @@ # Run in custom namespace -In Ark version 0.7.0 and later, you can run Ark in any namespace. To do so, you specify the -namespace in the YAML files that configure the Ark server. You then also specify the namespace when -you run Ark client commands. +In Velero version 0.7.0 and later, you can run Velero in any namespace. To do so, you specify the +namespace in the YAML files that configure the Velero server. You then also specify the namespace when +you run Velero client commands. ## Edit the example files -The Ark release tarballs include a set of example configs that you can use to set up your Ark server. The -examples place the server and backup/schedule/restore/etc. data in the `heptio-ark` namespace. +The Velero release tarballs include a set of example configs that you can use to set up your Velero server. The +examples place the server and backup/schedule/restore/etc. data in the `velero` namespace. -To run the server in another namespace, you edit the relevant files, changing `heptio-ark` to +To run the server in another namespace, you edit the relevant files, changing `velero` to your desired namespace. To store your backups, schedules, restores, and config in another namespace, you edit the relevant -files, changing `heptio-ark` to your desired namespace. You also need to create the +files, changing `velero` to your desired namespace. You also need to create the `cloud-credentials` secret in your desired namespace. First, ensure you've [downloaded & extracted the latest release][0]. For all cloud providers, edit `config/common/00-prereqs.yaml`. This file defines: -* CustomResourceDefinitions for the Ark objects (backups, schedules, restores, downloadrequests, etc.) -* The namespace where the Ark server runs +* CustomResourceDefinitions for the Velero objects (backups, schedules, restores, downloadrequests, etc.) +* The namespace where the Velero server runs * The namespace where backups, schedules, restores, etc. are stored -* The Ark service account -* The RBAC rules to grant permissions to the Ark service account +* The Velero service account +* The RBAC rules to grant permissions to the Velero service account ### AWS For AWS, edit: -* `config/aws/05-ark-backupstoragelocation.yaml` -* `config/aws/06-ark-volumesnapshotlocation.yaml` +* `config/aws/05-backupstoragelocation.yaml` +* `config/aws/06-volumesnapshotlocation.yaml` * `config/aws/10-deployment.yaml` @@ -40,16 +40,16 @@ For AWS, edit: For Azure, edit: -* `config/azure/00-ark-deployment.yaml` -* `config/azure/05-ark-backupstoragelocation.yaml` -* `config/azure/06-ark-volumesnapshotlocation.yaml` +* `config/azure/00-deployment.yaml` +* `config/azure/05-backupstoragelocation.yaml` +* `config/azure/06-volumesnapshotlocation.yaml` ### GCP For GCP, edit: -* `config/gcp/05-ark-backupstoragelocation.yaml` -* `config/gcp/06-ark-volumesnapshotlocation.yaml` +* `config/gcp/05-backupstoragelocation.yaml` +* `config/gcp/06-volumesnapshotlocation.yaml` * `config/gcp/10-deployment.yaml` @@ -57,16 +57,16 @@ For GCP, edit: For IBM, edit: -* `config/ibm/05-ark-backupstoragelocation.yaml` +* `config/ibm/05-backupstoragelocation.yaml` * `config/ibm/10-deployment.yaml` ## Specify the namespace in client commands -To specify the namespace for all Ark client commands, run: +To specify the namespace for all Velero client commands, run: ``` -ark client config set namespace= +velero client config set namespace= ``` diff --git a/docs/output-file-format.md b/docs/output-file-format.md index abdc0166b4..04420be919 100644 --- a/docs/output-file-format.md +++ b/docs/output-file-format.md @@ -1,15 +1,15 @@ # Output file format -A backup is a gzip-compressed tar file whose name matches the Backup API resource's `metadata.name` (what is specified during `ark backup create `). +A backup is a gzip-compressed tar file whose name matches the Backup API resource's `metadata.name` (what is specified during `velero backup create `). -In cloud object storage, each backup file is stored in its own subdirectory in the bucket specified in the Ark server configuration. This subdirectory includes an additional file called `ark-backup.json`. The JSON file lists all information about your associated Backup resource, including any default values. This gives you a complete historical record of the backup configuration. The JSON file also specifies `status.version`, which corresponds to the output file format. +In cloud object storage, each backup file is stored in its own subdirectory in the bucket specified in the Velero server configuration. This subdirectory includes an additional file called `velero-backup.json`. The JSON file lists all information about your associated Backup resource, including any default values. This gives you a complete historical record of the backup configuration. The JSON file also specifies `status.version`, which corresponds to the output file format. The directory structure in your cloud storage looks something like: ``` rootBucket/ backup1234/ - ark-backup.json + velero-backup.json backup1234.tar.gz ``` @@ -18,11 +18,11 @@ rootBucket/ ```json { "kind": "Backup", - "apiVersion": "ark.heptio.com/v1", + "apiVersion": "velero.io/v1", "metadata": { "name": "test-backup", - "namespace": "heptio-ark", - "selfLink": "/apis/ark.heptio.com/v1/namespaces/heptio-ark/backups/testtest", + "namespace": "velero", + "selfLink": "/apis/velero.io/v1/namespaces/velero/backups/testtest", "uid": "a12345cb-75f5-11e7-b4c2-abcdef123456", "resourceVersion": "337075", "creationTimestamp": "2017-07-31T13:39:15Z" diff --git a/docs/plugins.md b/docs/plugins.md index 4f3bc04fd0..dceb2f2bef 100644 --- a/docs/plugins.md +++ b/docs/plugins.md @@ -1,10 +1,10 @@ # Plugins -Heptio Ark has a plugin architecture that allows users to add their own custom functionality to Ark backups & restores -without having to modify/recompile the core Ark binary. To add custom functionality, users simply create their own binary -containing implementations of Ark's plugin kinds (described below), plus a small amount of boilerplate code to -expose the plugin implementations to Ark. This binary is added to a container image that serves as an init container for -the Ark server pod and copies the binary into a shared emptyDir volume for the Ark server to access. +Velero has a plugin architecture that allows users to add their own custom functionality to Velero backups & restores +without having to modify/recompile the core Velero binary. To add custom functionality, users simply create their own binary +containing implementations of Velero's plugin kinds (described below), plus a small amount of boilerplate code to +expose the plugin implementations to Velero. This binary is added to a container image that serves as an init container for +the Velero server pod and copies the binary into a shared emptyDir volume for the Velero server to access. Multiple plugins, of any type, can be implemented in this binary. @@ -12,7 +12,7 @@ A fully-functional [sample plugin repository][1] is provided to serve as a conve ## Plugin Kinds -Ark currently supports the following kinds of plugins: +Velero currently supports the following kinds of plugins: - **Object Store** - persists and retrieves backups, backup logs and restore logs - **Block Store** - creates volume snapshots (during backup) and restores volumes from snapshots (during restore) @@ -21,11 +21,11 @@ Ark currently supports the following kinds of plugins: ## Plugin Logging -Ark provides a [logger][2] that can be used by plugins to log structured information to the main Ark server log or +Velero provides a [logger][2] that can be used by plugins to log structured information to the main Velero server log or per-backup/restore logs. See the [sample repository][1] for an example of how to instantiate and use the logger within your plugin. -[1]: https://github.com/heptio/ark-plugin-example -[2]: https://github.com/heptio/ark/blob/master/pkg/plugin/logger.go +[1]: https://github.com/heptio/velero-plugin-example +[2]: https://github.com/heptio/velero/blob/master/pkg/plugin/logger.go diff --git a/docs/rbac.md b/docs/rbac.md index ebc0316716..d8686bf6ee 100644 --- a/docs/rbac.md +++ b/docs/rbac.md @@ -1,6 +1,6 @@ -# Run Ark more securely with restrictive RBAC settings +# Run Velero more securely with restrictive RBAC settings -By default Ark runs with an RBAC policy of ClusterRole `cluster-admin`. This is to make sure that Ark can back up or restore anything in your cluster. But `cluster-admin` access is wide open -- it gives Ark components access to everything in your cluster. Depending on your environment and your security needs, you should consider whether to configure additional RBAC policies with more restrictive access. +By default Velero runs with an RBAC policy of ClusterRole `cluster-admin`. This is to make sure that Velero can back up or restore anything in your cluster. But `cluster-admin` access is wide open -- it gives Velero components access to everything in your cluster. Depending on your environment and your security needs, you should consider whether to configure additional RBAC policies with more restrictive access. **Note:** Roles and RoleBindings are associated with a single namespaces, not with an entire cluster. PersistentVolume backups are associated only with an entire cluster. This means that any backups or restores that use a restrictive Role and RoleBinding pair can manage only the resources that belong to the namespace. You do not need a wide open RBAC policy to manage PersistentVolumes, however. You can configure a ClusterRole and ClusterRoleBinding that allow backups and restores only of PersistentVolumes, not of all objects in the cluster. @@ -17,10 +17,10 @@ metadata: namespace: YOUR_NAMESPACE_HERE name: ROLE_NAME_HERE labels: - component: ark + component: velero rules: - apiGroups: - - ark.heptio.com + - velero.io verbs: - "*" resources: @@ -44,4 +44,4 @@ roleRef: [1]: https://kubernetes.io/docs/reference/access-authn-authz/controlling-access/ [2]: https://kubernetes.io/docs/reference/access-authn-authz/service-accounts-admin/ [3]: https://kubernetes.io/docs/reference/access-authn-authz/rbac/ -[4]: namespace.md \ No newline at end of file +[4]: namespace.md diff --git a/docs/restic.md b/docs/restic.md index ebb63cdb66..690b4c3734 100644 --- a/docs/restic.md +++ b/docs/restic.md @@ -1,16 +1,16 @@ # Restic Integration -As of version 0.9.0, Ark has support for backing up and restoring Kubernetes volumes using a free open-source backup tool called +As of version 0.9.0, Velero has support for backing up and restoring Kubernetes volumes using a free open-source backup tool called [restic][1]. -Ark has always allowed you to take snapshots of persistent volumes as part of your backups if you’re using one of +Velero has always allowed you to take snapshots of persistent volumes as part of your backups if you’re using one of the supported cloud providers’ block storage offerings (Amazon EBS Volumes, Azure Managed Disks, Google Persistent Disks). Starting with version 0.6.0, we provide a plugin model that enables anyone to implement additional object and block storage -backends, outside the main Ark repository. +backends, outside the main Velero repository. -We integrated restic with Ark so that users have an out-of-the-box solution for backing up and restoring almost any type of Kubernetes -volume*. This is a new capability for Ark, not a replacement for existing functionality. If you're running on AWS, and -taking EBS snapshots as part of your regular Ark backups, there's no need to switch to using restic. However, if you've +We integrated restic with Velero so that users have an out-of-the-box solution for backing up and restoring almost any type of Kubernetes +volume*. This is a new capability for Velero, not a replacement for existing functionality. If you're running on AWS, and +taking EBS snapshots as part of your regular Velero backups, there's no need to switch to using restic. However, if you've been waiting for a snapshot plugin for your storage platform, or if you're using EFS, AzureFile, NFS, emptyDir, local, or any other volume type that doesn't have a native snapshot concept, restic might be for you. @@ -23,16 +23,16 @@ cross-volume-type data migrations. Stay tuned as this evolves! ### Prerequisites -- A working install of Ark version 0.10.0 or later. See [Set up Ark][2] -- A local clone of [the latest release tag of the Ark repository][3] -- Ark's restic integration requires the Kubernetes [MountPropagation feature][6], which is enabled by default in Kubernetes v1.10.0 and later. +- A working install of Velero version 0.10.0 or later. See [Set up Velero][2] +- A local clone of [the latest release tag of the Velero repository][3] +- Velero's restic integration requires the Kubernetes [MountPropagation feature][6], which is enabled by default in Kubernetes v1.10.0 and later. ### Instructions 1. Ensure you've [downloaded & extracted the latest release][3]. -1. In the Ark directory (i.e. where you extracted the release tarball), run the following to create new custom resource definitions: +1. In the Velero directory (i.e. where you extracted the release tarball), run the following to create new custom resource definitions: ```bash kubectl apply -f config/common/00-prereqs.yaml @@ -45,14 +45,14 @@ cross-volume-type data migrations. Stay tuned as this evolves! - GCP: `kubectl apply -f config/gcp/20-restic-daemonset.yaml` - Minio: `kubectl apply -f config/minio/30-restic-daemonset.yaml` -You're now ready to use Ark with restic. +You're now ready to use Velero with restic. ## Back up 1. Run the following for each pod that contains a volume to back up: ```bash - kubectl -n YOUR_POD_NAMESPACE annotate pod/YOUR_POD_NAME backup.ark.heptio.com/backup-volumes=YOUR_VOLUME_NAME_1,YOUR_VOLUME_NAME_2,... + kubectl -n YOUR_POD_NAMESPACE annotate pod/YOUR_POD_NAME backup.velero.io/backup-volumes=YOUR_VOLUME_NAME_1,YOUR_VOLUME_NAME_2,... ``` where the volume names are the names of the volumes in the pod spec. @@ -84,91 +84,91 @@ You're now ready to use Ark with restic. You'd run: ```bash - kubectl -n foo annotate pod/sample backup.ark.heptio.com/backup-volumes=pvc-volume,emptydir-volume + kubectl -n foo annotate pod/sample backup.velero.io/backup-volumes=pvc-volume,emptydir-volume ``` This annotation can also be provided in a pod template spec if you use a controller to manage your pods. -1. Take an Ark backup: +1. Take an Velero backup: ```bash - ark backup create NAME OPTIONS... + velero backup create NAME OPTIONS... ``` 1. When the backup completes, view information about the backups: ```bash - ark backup describe YOUR_BACKUP_NAME + velero backup describe YOUR_BACKUP_NAME - kubectl -n heptio-ark get podvolumebackups -l ark.heptio.com/backup-name=YOUR_BACKUP_NAME -o yaml + kubectl -n velero get podvolumebackups -l velero.io/backup-name=YOUR_BACKUP_NAME -o yaml ``` ## Restore -1. Restore from your Ark backup: +1. Restore from your Velero backup: ```bash - ark restore create --from-backup BACKUP_NAME OPTIONS... + velero restore create --from-backup BACKUP_NAME OPTIONS... ``` 1. When the restore completes, view information about your pod volume restores: ```bash - ark restore describe YOUR_RESTORE_NAME + velero restore describe YOUR_RESTORE_NAME - kubectl -n heptio-ark get podvolumerestores -l ark.heptio.com/restore-name=YOUR_RESTORE_NAME -o yaml + kubectl -n velero get podvolumerestores -l velero.io/restore-name=YOUR_RESTORE_NAME -o yaml ``` ## Limitations - `hostPath` volumes are not supported. [Local persistent volumes][4] are supported. - Those of you familiar with [restic][1] may know that it encrypts all of its data. We've decided to use a static, -common encryption key for all restic repositories created by Ark. **This means that anyone who has access to your +common encryption key for all restic repositories created by Velero. **This means that anyone who has access to your bucket can decrypt your restic backup data**. Make sure that you limit access to the restic bucket -appropriately. We plan to implement full Ark backup encryption, including securing the restic encryption keys, in +appropriately. We plan to implement full Velero backup encryption, including securing the restic encryption keys, in a future release. ## Troubleshooting Run the following checks: -Are your Ark server and daemonset pods running? +Are your Velero server and daemonset pods running? ```bash -kubectl get pods -n heptio-ark +kubectl get pods -n velero ``` Does your restic repository exist, and is it ready? ```bash -ark restic repo get +velero restic repo get -ark restic repo get REPO_NAME -o yaml +velero restic repo get REPO_NAME -o yaml ``` -Are there any errors in your Ark backup/restore? +Are there any errors in your Velero backup/restore? ```bash -ark backup describe BACKUP_NAME -ark backup logs BACKUP_NAME +velero backup describe BACKUP_NAME +velero backup logs BACKUP_NAME -ark restore describe RESTORE_NAME -ark restore logs RESTORE_NAME +velero restore describe RESTORE_NAME +velero restore logs RESTORE_NAME ``` What is the status of your pod volume backups/restores? ```bash -kubectl -n heptio-ark get podvolumebackups -l ark.heptio.com/backup-name=BACKUP_NAME -o yaml +kubectl -n velero get podvolumebackups -l velero.io/backup-name=BACKUP_NAME -o yaml -kubectl -n heptio-ark get podvolumerestores -l ark.heptio.com/restore-name=RESTORE_NAME -o yaml +kubectl -n velero get podvolumerestores -l velero.io/restore-name=RESTORE_NAME -o yaml ``` -Is there any useful information in the Ark server or daemon pod logs? +Is there any useful information in the Velero server or daemon pod logs? ```bash -kubectl -n heptio-ark logs deploy/ark -kubectl -n heptio-ark logs DAEMON_POD_NAME +kubectl -n velero logs deploy/velero +kubectl -n velero logs DAEMON_POD_NAME ``` **NOTE**: You can increase the verbosity of the pod logs by adding `--log-level=debug` as an argument @@ -178,71 +178,71 @@ to the container command in the deployment/daemonset pod template spec. We introduced three custom resource definitions and associated controllers: -- `ResticRepository` - represents/manages the lifecycle of Ark's [restic repositories][5]. Ark creates +- `ResticRepository` - represents/manages the lifecycle of Velero's [restic repositories][5]. Velero creates a restic repository per namespace when the first restic backup for a namespace is requested. The controller for this custom resource executes restic repository lifecycle commands -- `restic init`, `restic check`, and `restic prune`. - You can see information about your Ark restic repositories by running `ark restic repo get`. + You can see information about your Velero restic repositories by running `velero restic repo get`. -- `PodVolumeBackup` - represents a restic backup of a volume in a pod. The main Ark backup process creates +- `PodVolumeBackup` - represents a restic backup of a volume in a pod. The main Velero backup process creates one or more of these when it finds an annotated pod. Each node in the cluster runs a controller for this resource (in a daemonset) that handles the `PodVolumeBackups` for pods on that node. The controller executes `restic backup` commands to backup pod volume data. -- `PodVolumeRestore` - represents a restic restore of a pod volume. The main Ark restore process creates one +- `PodVolumeRestore` - represents a restic restore of a pod volume. The main Velero restore process creates one or more of these when it encounters a pod that has associated restic backups. Each node in the cluster runs a controller for this resource (in the same daemonset as above) that handles the `PodVolumeRestores` for pods on that node. The controller executes `restic restore` commands to restore pod volume data. ### Backup -1. The main Ark backup process checks each pod that it's backing up for the annotation specifying a restic backup -should be taken (`backup.ark.heptio.com/backup-volumes`) -1. When found, Ark first ensures a restic repository exists for the pod's namespace, by: +1. The main Velero backup process checks each pod that it's backing up for the annotation specifying a restic backup +should be taken (`backup.velero.io/backup-volumes`) +1. When found, Velero first ensures a restic repository exists for the pod's namespace, by: - checking if a `ResticRepository` custom resource already exists - if not, creating a new one, and waiting for the `ResticRepository` controller to init/check it -1. Ark then creates a `PodVolumeBackup` custom resource per volume listed in the pod annotation -1. The main Ark process now waits for the `PodVolumeBackup` resources to complete or fail +1. Velero then creates a `PodVolumeBackup` custom resource per volume listed in the pod annotation +1. The main Velero process now waits for the `PodVolumeBackup` resources to complete or fail 1. Meanwhile, each `PodVolumeBackup` is handled by the controller on the appropriate node, which: - has a hostPath volume mount of `/var/lib/kubelet/pods` to access the pod volume data - finds the pod volume's subdirectory within the above volume - runs `restic backup` - updates the status of the custom resource to `Completed` or `Failed` -1. As each `PodVolumeBackup` finishes, the main Ark process captures its restic snapshot ID and adds it as an annotation -to the copy of the pod JSON that's stored in the Ark backup. This will be used for restores, as seen in the next section. +1. As each `PodVolumeBackup` finishes, the main Velero process captures its restic snapshot ID and adds it as an annotation +to the copy of the pod JSON that's stored in the Velero backup. This will be used for restores, as seen in the next section. ### Restore -1. The main Ark restore process checks each pod that it's restoring for annotations specifying a restic backup -exists for a volume in the pod (`snapshot.ark.heptio.com/`) -1. When found, Ark first ensures a restic repository exists for the pod's namespace, by: +1. The main Velero restore process checks each pod that it's restoring for annotations specifying a restic backup +exists for a volume in the pod (`snapshot.velero.io/`) +1. When found, Velero first ensures a restic repository exists for the pod's namespace, by: - checking if a `ResticRepository` custom resource already exists - if not, creating a new one, and waiting for the `ResticRepository` controller to init/check it (note that - in this case, the actual repository should already exist in object storage, so the Ark controller will simply + in this case, the actual repository should already exist in object storage, so the Velero controller will simply check it for integrity) -1. Ark adds an init container to the pod, whose job is to wait for all restic restores for the pod to complete (more +1. Velero adds an init container to the pod, whose job is to wait for all restic restores for the pod to complete (more on this shortly) -1. Ark creates the pod, with the added init container, by submitting it to the Kubernetes API -1. Ark creates a `PodVolumeRestore` custom resource for each volume to be restored in the pod -1. The main Ark process now waits for each `PodVolumeRestore` resource to complete or fail +1. Velero creates the pod, with the added init container, by submitting it to the Kubernetes API +1. Velero creates a `PodVolumeRestore` custom resource for each volume to be restored in the pod +1. The main Velero process now waits for each `PodVolumeRestore` resource to complete or fail 1. Meanwhile, each `PodVolumeRestore` is handled by the controller on the appropriate node, which: - has a hostPath volume mount of `/var/lib/kubelet/pods` to access the pod volume data - waits for the pod to be running the init container - finds the pod volume's subdirectory within the above volume - runs `restic restore` - - on success, writes a file into the pod volume, in an `.ark` subdirectory, whose name is the UID of the Ark restore + - on success, writes a file into the pod volume, in a `.velero` subdirectory, whose name is the UID of the Velero restore that this pod volume restore is for - updates the status of the custom resource to `Completed` or `Failed` 1. The init container that was added to the pod is running a process that waits until it finds a file -within each restored volume, under `.ark`, whose name is the UID of the Ark restore being run +within each restored volume, under `.velero`, whose name is the UID of the Velero restore being run 1. Once all such files are found, the init container's process terminates successfully and the pod moves on to running other init containers/the main containers. [1]: https://github.com/restic/restic [2]: install-overview.md -[3]: https://github.com/heptio/ark/releases/ +[3]: https://github.com/heptio/velero/releases/ [4]: https://kubernetes.io/docs/concepts/storage/volumes/#local [5]: http://restic.readthedocs.io/en/latest/100_references.html#terminology [6]: https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation diff --git a/docs/storage-layout-reorg-v0.10.md b/docs/storage-layout-reorg-v0.10.md deleted file mode 100644 index dab57a9364..0000000000 --- a/docs/storage-layout-reorg-v0.10.md +++ /dev/null @@ -1,160 +0,0 @@ -# Object Storage Layout Changes in v0.10 - -## Overview - -Ark v0.10 includes breaking changes to where data is stored in your object storage bucket. You'll need to run a [one-time migration procedure](#upgrading-to-v010) -if you're upgrading from prior versions of Ark. - -## Details - -Prior to v0.10, Ark stored data in an object storage bucket using the following structure: - -``` -/ - backup-1/ - ark-backup.json - backup-1.tar.gz - backup-1-logs.gz - restore-of-backup-1-logs.gz - restore-of-backup-1-results.gz - backup-2/ - ark-backup.json - backup-2.tar.gz - backup-2-logs.gz - restore-of-backup-2-logs.gz - restore-of-backup-2-results.gz - ... -``` - -Ark also stored restic data, if applicable, in a separate object storage bucket, structured as: - -``` -/[/] - namespace-1/ - data/ - index/ - keys/ - snapshots/ - config - namespace-2/ - data/ - index/ - keys/ - snapshots/ - config - ... -``` - -As of v0.10, we've reorganized this layout to provide a cleaner and more extensible directory structure. The new layout looks like: - -``` -[/]/ - backups/ - backup-1/ - ark-backup.json - backup-1.tar.gz - backup-1-logs.gz - backup-2/ - ark-backup.json - backup-2.tar.gz - backup-2-logs.gz - ... - restores/ - restore-of-backup-1/ - restore-of-backup-1-logs.gz - restore-of-backup-1-results.gz - restore-of-backup-2/ - restore-of-backup-2-logs.gz - restore-of-backup-2-results.gz - ... - restic/ - namespace-1/ - data/ - index/ - keys/ - snapshots/ - config - namespace-2/ - data/ - index/ - keys/ - snapshots/ - config - ... - ... -``` - -## Upgrading to v0.10 - -Before upgrading to v0.10, you'll need to run a one-time upgrade script to rearrange the contents of your existing Ark bucket(s) to be compatible with -the new layout. - -Please note that the following scripts **will not** migrate existing restore logs/results into the new `restores/` subdirectory. This means that they -will not be accessible using `ark restore describe` or `ark restore logs`. They *will* remain in the relevant backup's subdirectory so they are manually -accessible, and will eventually be garbage-collected along with the backup. We've taken this approach in order to keep the migration scripts simple -and less error-prone. - -### rclone-Based Script - -This script uses [rclone][1], which you can download and install following the instructions [here][2]. -Please read through the script carefully before starting and execute it step-by-step. - -```bash -ARK_BUCKET= -ARK_TEMP_MIGRATION_BUCKET= - -# 1. This is an interactive step that configures rclone to be -# able to access your storage provider. Follow the instructions, -# and keep track of the "remote name" for the next step: -rclone config - -# 2. Store the name of the rclone remote that you just set up -# in Step #1: -RCLONE_REMOTE_NAME= - -# 3. Create a temporary bucket to be used as a backup of your -# current Ark bucket's contents: -rclone mkdir ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET} - -# 4. Do a full copy of the contents of your Ark bucket into the -# temporary bucket: -rclone copy ${RCLONE_REMOTE_NAME}:${ARK_BUCKET} ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET} - -# 5. Verify that the temporary bucket contains an exact copy of -# your Ark bucket's contents. You should see a short block -# of output stating "0 differences found": -rclone check ${RCLONE_REMOTE_NAME}:${ARK_BUCKET} ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET} - -# 6. Delete your Ark bucket's contents (this command does not -# delete the bucket itself, only the contents): -rclone delete ${RCLONE_REMOTE_NAME}:${ARK_BUCKET} - -# 7. Copy the contents of the temporary bucket into your Ark bucket, -# under the 'backups/' directory/prefix: -rclone copy ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET} ${RCLONE_REMOTE_NAME}:${ARK_BUCKET}/backups - -# 8. Verify that the 'backups/' directory in your Ark bucket now -# contains an exact copy of the temporary bucket's contents: -rclone check ${RCLONE_REMOTE_NAME}:${ARK_BUCKET}/backups ${RCLONE_REMOTE_NAME}:${ARK_TEMP_MIGRATION_BUCKET} - -# 9. OPTIONAL: If you have restic data to migrate: - -# a. Copy the contents of your Ark restic location into your -# Ark bucket, under the 'restic/' directory/prefix: - ARK_RESTIC_LOCATION= - rclone copy ${RCLONE_REMOTE_NAME}:${ARK_RESTIC_LOCATION} ${RCLONE_REMOTE_NAME}:${ARK_BUCKET}/restic - -# b. Check that the 'restic/' directory in your Ark bucket now -# contains an exact copy of your restic location: - rclone check ${RCLONE_REMOTE_NAME}:${ARK_BUCKET}/restic ${RCLONE_REMOTE_NAME}:${ARK_RESTIC_LOCATION} - -# c. Delete your ResticRepository custom resources to allow Ark -# to find them in the new location: - kubectl -n heptio-ark delete resticrepositories --all - -# 10. Once you've confirmed that Ark v0.10 works with your revised Ark -# bucket, you can delete the temporary migration bucket. -``` - -[1]: https://rclone.org/ -[2]: https://rclone.org/downloads/ diff --git a/docs/support-matrix.md b/docs/support-matrix.md index 4e2bdfae8d..faf85dffc2 100644 --- a/docs/support-matrix.md +++ b/docs/support-matrix.md @@ -1,20 +1,20 @@ # Compatible Storage Providers -Ark supports a variety of storage providers for different backup and snapshot operations. As of version 0.6.0, a plugin system allows anyone to add compatibility for additional backup and volume storage platforms without modifying the Ark codebase. +Velero supports a variety of storage providers for different backup and snapshot operations. As of version 0.6.0, a plugin system allows anyone to add compatibility for additional backup and volume storage platforms without modifying the Velero codebase. ## Backup Storage Providers | Provider | Owner | Contact | |---------------------------|----------|---------------------------------| -| [AWS S3][2] | Ark Team | [Slack][10], [GitHub Issue][11] | -| [Azure Blob Storage][3] | Ark Team | [Slack][10], [GitHub Issue][11] | -| [Google Cloud Storage][4] | Ark Team | [Slack][10], [GitHub Issue][11] | +| [AWS S3][2] | Velero Team | [Slack][10], [GitHub Issue][11] | +| [Azure Blob Storage][3] | Velero Team | [Slack][10], [GitHub Issue][11] | +| [Google Cloud Storage][4] | Velero Team | [Slack][10], [GitHub Issue][11] | ## S3-Compatible Backup Storage Providers -Ark uses [Amazon's Go SDK][12] to connect to the S3 API. Some third-party storage providers also support the S3 API, and users have reported the following providers work with Ark: +Velero uses [Amazon's Go SDK][12] to connect to the S3 API. Some third-party storage providers also support the S3 API, and users have reported the following providers work with Velero: -_Note that these providers are not regularly tested by the Ark team._ +_Note that these providers are not regularly tested by the Velero team._ * [IBM Cloud][5] * [Minio][9] @@ -28,10 +28,10 @@ _Some storage providers, like Quobyte, may need a different [signature algorithm | Provider | Owner | Contact | |----------------------------------|-----------------|---------------------------------| -| [AWS EBS][2] | Ark Team | [Slack][10], [GitHub Issue][11] | -| [Azure Managed Disks][3] | Ark Team | [Slack][10], [GitHub Issue][11] | -| [Google Compute Engine Disks][4] | Ark Team | [Slack][10], [GitHub Issue][11] | -| [Restic][1] | Ark Team | [Slack][10], [GitHub Issue][11] | +| [AWS EBS][2] | Velero Team | [Slack][10], [GitHub Issue][11] | +| [Azure Managed Disks][3] | Velero Team | [Slack][10], [GitHub Issue][11] | +| [Google Compute Engine Disks][4] | Velero Team | [Slack][10], [GitHub Issue][11] | +| [Restic][1] | Velero Team | [Slack][10], [GitHub Issue][11] | | [Portworx][6] | Portworx | [Slack][13], [GitHub Issue][14] | | [DigitalOcean][7] | StackPointCloud | | @@ -48,10 +48,10 @@ After you publish your plugin, open a PR that adds your plugin to the appropriat [5]: ibm-config.md [6]: https://docs.portworx.com/scheduler/kubernetes/ark.html [7]: https://github.com/StackPointCloud/ark-plugin-digitalocean -[8]: https://github.com/heptio/ark-plugin-example/ +[8]: https://github.com/heptio/velero-plugin-example/ [9]: get-started.md -[10]: https://kubernetes.slack.com/messages/ark-dr -[11]: https://github.com/heptio/ark/issues +[10]: https://kubernetes.slack.com/messages/velero +[11]: https://github.com/heptio/velero/issues [12]: https://github.com/aws/aws-sdk-go/aws [13]: https://portworx.slack.com/messages/px-k8s [14]: https://github.com/portworx/ark-plugin/issues diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 2b861a11bd..0c70b876d0 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -1,6 +1,6 @@ # Troubleshooting -These tips can help you troubleshoot known issues. If they don't help, you can [file an issue][4], or talk to us on the [#ark-dr channel][25] on the Kubernetes Slack server. +These tips can help you troubleshoot known issues. If they don't help, you can [file an issue][4], or talk to us on the [#velero channel][25] on the Kubernetes Slack server. See also: @@ -9,29 +9,29 @@ See also: ## General troubleshooting information -In `ark` version >= `0.1.0`, you can use the `ark bug` command to open a [Github issue][4] by launching a browser window with some prepopulated values. Values included are OS, CPU architecture, `kubectl` client and server versions (if available) and the `ark` client version. This information isn't submitted to Github until you click the `Submit new issue` button in the Github UI, so feel free to add, remove or update whatever information you like. +In `velero` version >= `0.10.0`, you can use the `velero bug` command to open a [Github issue][4] by launching a browser window with some prepopulated values. Values included are OS, CPU architecture, `kubectl` client and server versions (if available) and the `velero` client version. This information isn't submitted to Github until you click the `Submit new issue` button in the Github UI, so feel free to add, remove or update whatever information you like. Some general commands for troubleshooting that may be helpful: -* `ark backup describe ` - describe the details of a backup -* `ark backup logs ` - fetch the logs for this specific backup. Useful for viewing failures and warnings, including resources that could not be backed up. -* `ark restore describe ` - describe the details of a restore -* `ark restore logs ` - fetch the logs for this specific restore. Useful for viewing failures and warnings, including resources that could not be restored. -* `kubectl logs deployment/ark -n heptio-ark` - fetch the logs of the Ark server pod. This provides the output of the Ark server processes. +* `velero backup describe ` - describe the details of a backup +* `velero backup logs ` - fetch the logs for this specific backup. Useful for viewing failures and warnings, including resources that could not be backed up. +* `velero restore describe ` - describe the details of a restore +* `velero restore logs ` - fetch the logs for this specific restore. Useful for viewing failures and warnings, including resources that could not be restored. +* `kubectl logs deployment/velero -n velero` - fetch the logs of the Velero server pod. This provides the output of the Velero server processes. -### Getting ark debug logs +### Getting velero debug logs -You can increase the verbosity of the Ark server by editing your Ark deployment to look like this: +You can increase the verbosity of the Velero server by editing your Velero deployment to look like this: ``` -kubectl edit deployment/ark -n heptio-ark +kubectl edit deployment/velero -n velero ... containers: - - name: ark - image: gcr.io/heptio-images/ark:latest + - name: velero + image: gcr.io/heptio-images/velero:latest command: - - /ark + - /velero args: - server - --log-level # Add this line @@ -41,18 +41,18 @@ kubectl edit deployment/ark -n heptio-ark ## Known issue with restoring LoadBalancer Service -Because of how Kubernetes handles Service objects of `type=LoadBalancer`, when you restore these objects you might encounter an issue with changed values for Service UIDs. Kubernetes automatically generates the name of the cloud resource based on the Service UID, which is different when restored, resulting in a different name for the cloud load balancer. If the DNS CNAME for your application points to the DNS name of your cloud load balancer, you'll need to update the CNAME pointer when you perform an Ark restore. +Because of how Kubernetes handles Service objects of `type=LoadBalancer`, when you restore these objects you might encounter an issue with changed values for Service UIDs. Kubernetes automatically generates the name of the cloud resource based on the Service UID, which is different when restored, resulting in a different name for the cloud load balancer. If the DNS CNAME for your application points to the DNS name of your cloud load balancer, you'll need to update the CNAME pointer when you perform an Velero restore. Alternatively, you might be able to use the Service's `spec.loadBalancerIP` field to keep connections valid, if your cloud provider supports this value. See [the Kubernetes documentation about Services of Type LoadBalancer](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer). ## Miscellaneous issues -### Ark reports `custom resource not found` errors when starting up. +### Velero reports `custom resource not found` errors when starting up. -Ark's server will not start if the required Custom Resource Definitions are not found in Kubernetes. Apply -the `config/common/00-prereqs.yaml` file to create these definitions, then restart Ark. +Velero's server will not start if the required Custom Resource Definitions are not found in Kubernetes. Apply +the `config/common/00-prereqs.yaml` file to create these definitions, then restart Velero. -### `ark backup logs` returns a `SignatureDoesNotMatch` error +### `velero backup logs` returns a `SignatureDoesNotMatch` error Downloading artifacts from object storage utilizes temporary, signed URLs. In the case of S3-compatible providers, such as Ceph, there may be differences between their implementation and the official S3 @@ -66,6 +66,6 @@ Here are some things to verify if you receive `SignatureDoesNotMatch` errors: [1]: debugging-restores.md [2]: debugging-install.md -[4]: https://github.com/heptio/ark/issues +[4]: https://github.com/heptio/velero/issues [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html -[25]: https://kubernetes.slack.com/messages/ark-dr +[25]: https://kubernetes.slack.com/messages/velero diff --git a/docs/upgrading-to-v0.10.md b/docs/upgrading-to-v0.10.md deleted file mode 100644 index 0d6ff7dbd6..0000000000 --- a/docs/upgrading-to-v0.10.md +++ /dev/null @@ -1,89 +0,0 @@ -# Upgrading to Ark v0.10 - -## Overview - -Ark v0.10 includes a number of breaking changes. Below, we outline what those changes are, and what steps you should take to ensure -a successful upgrade from prior versions of Ark. - -## Breaking Changes - -### Switch from Config to BackupStorageLocation and VolumeSnapshotLocation CRDs, and new server flags - -Prior to v0.10, Ark used a `Config` CRD to capture information about your backup storage and persistent volume providers, as well -some miscellaneous Ark settings. In v0.10, we've eliminated this CRD and replaced it with: - -- A [BackupStorageLocation][1] CRD to capture information about where to store your backups -- A [VolumeSnapshotLocation][2] CRD to capture information about where to store your persistent volume snapshots -- Command-line flags for the `ark server` command (run by your Ark deployment) to capture miscellaneous Ark settings - -When upgrading to v0.10, you'll need to transfer the configuration information that you currently have in the `Config` CRD -into the above. We'll cover exactly how to do this below. - -For a general overview of this change, see the [Locations documentation][4]. - -### Reorganization of data in object storage - -We've made [changes to the layout of data stored in object storage][3] for simplicity and extensibility. You'll need to -rearrange any pre-v0.10 data as part of the upgrade. We've provided a script to help with this. - -## Step-by-Step Upgrade Instructions - -1. Ensure you've [downloaded & extracted the latest release][5]. - -1. Scale down your existing Ark deployment: - ```bash - kubectl scale -n heptio-ark deploy/ark --replicas 0 - ``` - -1. In the Ark directory (i.e. where you extracted the release tarball), re-apply the `00-prereqs.yaml` file to create new CRDs: - ```bash - kubectl apply -f config/common/00-prereqs.yaml - ``` - -1. Create one or more [BackupStorageLocation][1] resources based on the examples provided in the `config/` directory for your platform, using information from the existing `Config` resource as necessary. - -1. If you're using Ark to take PV snapshots, create one or more [VolumeSnapshotLocation][2] resources based on the examples provided in the `config/` directory for your platform, using information from the existing `Config` resource as necessary. - -1. Perform the one-time object storage migration detailed [here][3]. - -1. In your Ark deployment YAML (see the `config/` directory for samples), specify flags to the `ark server` command under the container's `args`: - - a. The names of the `BackupStorageLocation` and `VolumeSnapshotLocation(s)` that should be used by default for backups. If defaults are set here, - users won't need to explicitly specify location names when creating backups (though they still can, if they want to store backups/snapshots in - alternate locations). If no value is specified for `--default-backup-storage-location`, the Ark server looks for a `BackupStorageLocation` - named `default` to use. - - Flag | Default Value | Description | Example - ---- | ------------- | ----------- | ------- - `--default-backup-storage-location` | "default" | name of the backup storage location that should be used by default for backups | aws-us-east-1-bucket - `--default-volume-snapshot-locations` | [none] | name of the volume snapshot location(s) that should be used by default for PV snapshots, for each PV provider | aws:us-east-1,portworx:local - - **NOTE:** the values of these flags should correspond to the names of a `BackupStorageLocation` and `VolumeSnapshotLocation(s)` custom resources - in the cluster. - - b. Any non-default Ark server settings: - - Flag | Default Value | Description - ---- | ------------- | ----------- - `--backup-sync-period` | 1m | how often to ensure all Ark backups in object storage exist as Backup API objects in the cluster - `--restic-timeout` | 1h | how long backups/restores of pod volumes should be allowed to run before timing out (previously `podVolumeOperationTimeout` in the `Config` resource in pre-v0.10 versions) - `--restore-only` | false | run in a mode where only restores are allowed; backups, schedules, and garbage-collection are all disabled - -1. If you are using any plugins, update the Ark deployment YAML to reference the latest image tag for your plugins. This can be found under the `initContainers` section of your deployment YAML. - -1. Apply your updated Ark deployment YAML to your cluster and ensure the pod(s) starts up successfully. - -1. If you're using Ark's restic integration, ensure the daemon set pods have been re-created with the latest Ark image (if your daemon set YAML is using the `:latest` tag, you can delete the pods so they're recreated with an updated image). - -1. Once you've confirmed all of your settings have been migrated over correctly, delete the Config CRD: - ```bash - kubectl delete -n heptio-ark config --all - kubectl delete crd configs.ark.heptio.com - ``` - - -[1]: api-types/backupstoragelocation.md -[2]: api-types/volumesnapshotlocation.md -[3]: storage-layout-reorg-v0.10.md -[4]: locations.md -[5]: get-started.md#download diff --git a/docs/versions.md b/docs/versions.md index 10eef7b337..bac2bb9464 100644 --- a/docs/versions.md +++ b/docs/versions.md @@ -1,6 +1,6 @@ -# Upgrading Ark versions +# Upgrading Velero versions -Ark supports multiple concurrent versions. Whether you're setting up Ark for the first time or upgrading to a new version, you need to pay careful attention to versioning. This doc page is new as of version 0.10.0, and will be updated with information about subsequent releases. +Velero supports multiple concurrent versions. Whether you're setting up Velero for the first time or upgrading to a new version, you need to pay careful attention to versioning. This doc page is new as of version 0.10.0, and will be updated with information about subsequent releases. ## Minor versions, patch versions @@ -14,13 +14,13 @@ Breaking changes are documented in the release notes and in the documentation. - See [Upgrading to version 0.10.0][2] -## Ark versions and Kubernetes versions +## Velero versions and Kubernetes versions -Not all Ark versions support all versions of Kubernetes. You should be aware of the following known limitations: +Not all Velero versions support all versions of Kubernetes. You should be aware of the following known limitations: -- Ark version 0.9.0 requires Kubernetes version 1.8 or later. In version 0.9.1, Ark was updated to support earlier versions. +- Velero version 0.9.0 requires Kubernetes version 1.8 or later. In version 0.9.1, Velero was updated to support earlier versions. - Restic support requires Kubernetes version 1.10 or later, or an earlier version with the mount propagation feature enabled. See [Restic Integration][3]. -[1]: https://github.com/heptio/ark/releases -[2]: upgrading-to-v0.10.md +[1]: https://github.com/heptio/velero/releases +[2]: https://heptio.github.io/velero/v0.10.0/upgrading-to-v0.10 [3]: restic.md diff --git a/docs/zenhub.md b/docs/zenhub.md index e34b7678bb..7bb32a387d 100644 --- a/docs/zenhub.md +++ b/docs/zenhub.md @@ -3,13 +3,13 @@ As an Open Source community, it is necessary for our work, communication, and collaboration to be done in the open. GitHub provides a central repository for code, pull requests, issues, and documentation. When applicable, we will use Google Docs for design reviews, proposals, and other working documents. -While GitHub issues, milestones, and labels generally work pretty well, the Heptio team has found that product planning requires some additional tooling that GitHub projects do not offer. +While GitHub issues, milestones, and labels generally work pretty well, the Velero team has found that product planning requires some additional tooling that GitHub projects do not offer. In our effort to minimize tooling while enabling product management insights, we have decided to use [ZenHub Open-Source](https://www.zenhub.com/blog/open-source/) to overlay product and project tracking on top of GitHub. ZenHub is a GitHub application that provides Kanban visualization, Epic tracking, fine-grained prioritization, and more. It's primary backing storage system is existing GitHub issues along with additional metadata stored in ZenHub's database. -If you are an Ark user or Ark Developer, you do not _need_ to use ZenHub for your regular workflow (e.g to see open bug reports or feature requests, work on pull requests). However, if you'd like to be able to visualize the high-level project goals and roadmap, you will need to use the free version of ZenHub. +If you are an Velero user or Velero Developer, you do not _need_ to use ZenHub for your regular workflow (e.g to see open bug reports or feature requests, work on pull requests). However, if you'd like to be able to visualize the high-level project goals and roadmap, you will need to use the free version of ZenHub. ## Using ZenHub -ZenHub can be integrated within the GitHub interface using their [Chrome or FireFox extensions](https://www.zenhub.com/extension). In addition, you can use their dedicated [web application](https://app.zenhub.com/workspace/o/heptio/ark/boards?filterLogic=all&repos=99143276). +ZenHub can be integrated within the GitHub interface using their [Chrome or FireFox extensions](https://www.zenhub.com/extension). In addition, you can use their dedicated [web application](https://app.zenhub.com/workspace/o/heptio/velero/boards?filterLogic=all&repos=99143276). diff --git a/examples/README.md b/examples/README.md index dd623bf8a9..17d074b2ab 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,13 +1,13 @@ # Examples -This directory contains sample YAML config files for running Ark on each core provider. Starting with v0.10, these files are packaged into [the Ark release tarballs][2], and we highly recommend that you use the packaged versions of these files to ensure compatibility with the released code. +This directory contains sample YAML config files for running Velero on each core provider. Starting with v0.10, these files are packaged into [the Velero release tarballs][2], and we highly recommend that you use the packaged versions of these files to ensure compatibility with the released code. -* `common/`: Contains manifests to set up Ark. Can be used across cloud provider platforms. (Note that Azure requires its own deployment file due to its unique way of loading credentials). +* `common/`: Contains manifests to set up Velero. Can be used across cloud provider platforms. (Note that Azure requires its own deployment file due to its unique way of loading credentials). -* `minio/`: Used in the [Quickstart][1] to set up [Minio][0], a local S3-compatible object storage service. It provides a convenient way to test Ark without tying you to a specific cloud provider. +* `minio/`: Used in the [Quickstart][1] to set up [Minio][0], a local S3-compatible object storage service. It provides a convenient way to test Velero without tying you to a specific cloud provider. * `aws/`, `azure/`, `gcp/`, `ibm/`: Contains manifests specific to the given cloud provider's setup. [0]: https://github.com/minio/minio [1]: /README.md#quickstart -[2]: https://github.com/heptio/ark/releases +[2]: https://github.com/heptio/velero/releases diff --git a/examples/aws/05-ark-backupstoragelocation.yaml b/examples/aws/05-backupstoragelocation.yaml similarity index 93% rename from examples/aws/05-ark-backupstoragelocation.yaml rename to examples/aws/05-backupstoragelocation.yaml index 4f0c4f89e0..96f50492d8 100644 --- a/examples/aws/05-ark-backupstoragelocation.yaml +++ b/examples/aws/05-backupstoragelocation.yaml @@ -13,11 +13,11 @@ # limitations under the License. --- -apiVersion: ark.heptio.com/v1 +apiVersion: velero.io/v1 kind: BackupStorageLocation metadata: name: default - namespace: heptio-ark + namespace: velero spec: provider: aws objectStorage: diff --git a/examples/aws/06-ark-volumesnapshotlocation.yaml b/examples/aws/06-volumesnapshotlocation.yaml similarity index 89% rename from examples/aws/06-ark-volumesnapshotlocation.yaml rename to examples/aws/06-volumesnapshotlocation.yaml index b93ebabfea..4dea1bdb02 100644 --- a/examples/aws/06-ark-volumesnapshotlocation.yaml +++ b/examples/aws/06-volumesnapshotlocation.yaml @@ -13,12 +13,12 @@ # limitations under the License. --- -apiVersion: ark.heptio.com/v1 +apiVersion: velero.io/v1 kind: VolumeSnapshotLocation metadata: name: aws-default - namespace: heptio-ark + namespace: velero spec: provider: aws config: - region: \ No newline at end of file + region: diff --git a/examples/aws/10-deployment-kube2iam.yaml b/examples/aws/10-deployment-kube2iam.yaml index 902514fed0..37056ef2ea 100644 --- a/examples/aws/10-deployment-kube2iam.yaml +++ b/examples/aws/10-deployment-kube2iam.yaml @@ -16,30 +16,30 @@ apiVersion: apps/v1beta1 kind: Deployment metadata: - namespace: heptio-ark - name: ark + namespace: velero + name: velero spec: replicas: 1 template: metadata: labels: - component: ark + component: velero annotations: - iam.amazonaws.com/role: arn:aws:iam:::role/ + iam.amazonaws.com/role: arn:aws:iam:::role/ prometheus.io/scrape: "true" prometheus.io/port: "8085" prometheus.io/path: "/metrics" spec: restartPolicy: Always - serviceAccountName: ark + serviceAccountName: velero containers: - - name: ark - image: gcr.io/heptio-images/ark:latest + - name: velero + image: gcr.io/heptio-images/velero:latest ports: - name: metrics containerPort: 8085 command: - - /ark + - /velero args: - server volumeMounts: diff --git a/examples/aws/10-deployment.yaml b/examples/aws/10-deployment.yaml index daba129986..9cc126b80e 100644 --- a/examples/aws/10-deployment.yaml +++ b/examples/aws/10-deployment.yaml @@ -16,26 +16,26 @@ apiVersion: apps/v1beta1 kind: Deployment metadata: - namespace: heptio-ark - name: ark + namespace: velero + name: velero spec: replicas: 1 template: metadata: labels: - component: ark + component: velero annotations: prometheus.io/scrape: "true" prometheus.io/port: "8085" prometheus.io/path: "/metrics" spec: restartPolicy: Always - serviceAccountName: ark + serviceAccountName: velero containers: - - name: ark - image: gcr.io/heptio-images/ark:latest + - name: velero + image: gcr.io/heptio-images/velero:latest command: - - /ark + - /velero args: - server ## uncomment following line and specify values if needed for multiple provider snapshot locations @@ -50,7 +50,7 @@ spec: env: - name: AWS_SHARED_CREDENTIALS_FILE value: /credentials/cloud - - name: ARK_SCRATCH_DIR + - name: VELERO_SCRATCH_DIR value: /scratch #- name: AWS_CLUSTER_NAME # value: diff --git a/examples/aws/20-restic-daemonset.yaml b/examples/aws/20-restic-daemonset.yaml index c8fa577212..6a9cda8609 100644 --- a/examples/aws/20-restic-daemonset.yaml +++ b/examples/aws/20-restic-daemonset.yaml @@ -16,7 +16,7 @@ apiVersion: apps/v1 kind: DaemonSet metadata: name: restic - namespace: heptio-ark + namespace: velero spec: selector: matchLabels: @@ -26,7 +26,7 @@ spec: labels: name: restic spec: - serviceAccountName: ark + serviceAccountName: velero securityContext: runAsUser: 0 volumes: @@ -39,10 +39,10 @@ spec: - name: scratch emptyDir: {} containers: - - name: ark - image: gcr.io/heptio-images/ark:latest + - name: velero + image: gcr.io/heptio-images/velero:latest command: - - /ark + - /velero args: - restic - server @@ -59,11 +59,11 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName - - name: HEPTIO_ARK_NAMESPACE + - name: VELERO_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: AWS_SHARED_CREDENTIALS_FILE value: /credentials/cloud - - name: ARK_SCRATCH_DIR - value: /scratch \ No newline at end of file + - name: VELERO_SCRATCH_DIR + value: /scratch diff --git a/examples/azure/00-ark-deployment.yaml b/examples/azure/00-deployment.yaml similarity index 87% rename from examples/azure/00-ark-deployment.yaml rename to examples/azure/00-deployment.yaml index 178e857150..23234c5e5d 100644 --- a/examples/azure/00-ark-deployment.yaml +++ b/examples/azure/00-deployment.yaml @@ -16,29 +16,29 @@ apiVersion: apps/v1beta1 kind: Deployment metadata: - namespace: heptio-ark - name: ark + namespace: velero + name: velero spec: replicas: 1 template: metadata: labels: - component: ark + component: velero annotations: prometheus.io/scrape: "true" prometheus.io/port: "8085" prometheus.io/path: "/metrics" spec: restartPolicy: Always - serviceAccountName: ark + serviceAccountName: velero containers: - - name: ark - image: gcr.io/heptio-images/ark:latest + - name: velero + image: gcr.io/heptio-images/velero:latest ports: - name: metrics containerPort: 8085 command: - - /ark + - /velero args: - server ## uncomment following line and specify values if needed for multiple provider snapshot locations @@ -47,7 +47,7 @@ spec: - secretRef: name: cloud-credentials env: - - name: ARK_SCRATCH_DIR + - name: VELERO_SCRATCH_DIR value: /scratch volumeMounts: - name: plugins diff --git a/examples/azure/05-ark-backupstoragelocation.yaml b/examples/azure/05-backupstoragelocation.yaml similarity index 93% rename from examples/azure/05-ark-backupstoragelocation.yaml rename to examples/azure/05-backupstoragelocation.yaml index 255cc2fac6..cb33b1376e 100644 --- a/examples/azure/05-ark-backupstoragelocation.yaml +++ b/examples/azure/05-backupstoragelocation.yaml @@ -13,11 +13,11 @@ # limitations under the License. --- -apiVersion: ark.heptio.com/v1 +apiVersion: velero.io/v1 kind: BackupStorageLocation metadata: name: default - namespace: heptio-ark + namespace: velero spec: provider: azure objectStorage: diff --git a/examples/azure/06-ark-volumesnapshotlocation.yaml b/examples/azure/06-volumesnapshotlocation.yaml similarity index 93% rename from examples/azure/06-ark-volumesnapshotlocation.yaml rename to examples/azure/06-volumesnapshotlocation.yaml index 7c73cd2482..13a87a5755 100644 --- a/examples/azure/06-ark-volumesnapshotlocation.yaml +++ b/examples/azure/06-volumesnapshotlocation.yaml @@ -13,11 +13,11 @@ # limitations under the License. --- -apiVersion: ark.heptio.com/v1 +apiVersion: velero.io/v1 kind: VolumeSnapshotLocation metadata: name: azure-default - namespace: heptio-ark + namespace: velero spec: provider: azure config: diff --git a/examples/azure/20-restic-daemonset.yaml b/examples/azure/20-restic-daemonset.yaml index 8d6137a6b9..b92974ef32 100644 --- a/examples/azure/20-restic-daemonset.yaml +++ b/examples/azure/20-restic-daemonset.yaml @@ -16,7 +16,7 @@ apiVersion: apps/v1 kind: DaemonSet metadata: name: restic - namespace: heptio-ark + namespace: velero spec: selector: matchLabels: @@ -26,7 +26,7 @@ spec: labels: name: restic spec: - serviceAccountName: ark + serviceAccountName: velero securityContext: runAsUser: 0 volumes: @@ -36,10 +36,10 @@ spec: - name: scratch emptyDir: {} containers: - - name: ark - image: gcr.io/heptio-images/ark:latest + - name: velero + image: gcr.io/heptio-images/velero:latest command: - - /ark + - /velero args: - restic - server @@ -57,9 +57,9 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName - - name: HEPTIO_ARK_NAMESPACE + - name: VELERO_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - - name: ARK_SCRATCH_DIR - value: /scratch \ No newline at end of file + - name: VELERO_SCRATCH_DIR + value: /scratch diff --git a/examples/common/00-prereqs.yaml b/examples/common/00-prereqs.yaml index 2ec75f7ed8..438c6258a6 100644 --- a/examples/common/00-prereqs.yaml +++ b/examples/common/00-prereqs.yaml @@ -16,11 +16,11 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: backups.ark.heptio.com + name: backups.velero.io labels: - component: ark + component: velero spec: - group: ark.heptio.com + group: velero.io version: v1 scope: Namespaced names: @@ -31,11 +31,11 @@ spec: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: schedules.ark.heptio.com + name: schedules.velero.io labels: - component: ark + component: velero spec: - group: ark.heptio.com + group: velero.io version: v1 scope: Namespaced names: @@ -46,11 +46,11 @@ spec: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: restores.ark.heptio.com + name: restores.velero.io labels: - component: ark + component: velero spec: - group: ark.heptio.com + group: velero.io version: v1 scope: Namespaced names: @@ -61,11 +61,11 @@ spec: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: downloadrequests.ark.heptio.com + name: downloadrequests.velero.io labels: - component: ark + component: velero spec: - group: ark.heptio.com + group: velero.io version: v1 scope: Namespaced names: @@ -76,11 +76,11 @@ spec: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: deletebackuprequests.ark.heptio.com + name: deletebackuprequests.velero.io labels: - component: ark + component: velero spec: - group: ark.heptio.com + group: velero.io version: v1 scope: Namespaced names: @@ -91,11 +91,11 @@ spec: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: podvolumebackups.ark.heptio.com + name: podvolumebackups.velero.io labels: - component: ark + component: velero spec: - group: ark.heptio.com + group: velero.io version: v1 scope: Namespaced names: @@ -106,11 +106,11 @@ spec: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: podvolumerestores.ark.heptio.com + name: podvolumerestores.velero.io labels: - component: ark + component: velero spec: - group: ark.heptio.com + group: velero.io version: v1 scope: Namespaced names: @@ -121,11 +121,11 @@ spec: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: resticrepositories.ark.heptio.com + name: resticrepositories.velero.io labels: - component: ark + component: velero spec: - group: ark.heptio.com + group: velero.io version: v1 scope: Namespaced names: @@ -136,11 +136,11 @@ spec: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: backupstoragelocations.ark.heptio.com + name: backupstoragelocations.velero.io labels: - component: ark + component: velero spec: - group: ark.heptio.com + group: velero.io version: v1 scope: Namespaced names: @@ -151,11 +151,11 @@ spec: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: volumesnapshotlocations.ark.heptio.com + name: volumesnapshotlocations.velero.io labels: - component: ark + component: velero spec: - group: ark.heptio.com + group: velero.io version: v1 scope: Namespaced names: @@ -166,11 +166,11 @@ spec: apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: serverstatusrequests.ark.heptio.com + name: serverstatusrequests.velero.io labels: - component: ark + component: velero spec: - group: ark.heptio.com + group: velero.io version: v1 scope: Namespaced names: @@ -180,28 +180,28 @@ spec: apiVersion: v1 kind: Namespace metadata: - name: heptio-ark + name: velero --- apiVersion: v1 kind: ServiceAccount metadata: - name: ark - namespace: heptio-ark + name: velero + namespace: velero labels: - component: ark + component: velero --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: - name: ark + name: velero labels: - component: ark + component: velero subjects: - kind: ServiceAccount - namespace: heptio-ark - name: ark + namespace: velero + name: velero roleRef: kind: ClusterRole name: cluster-admin diff --git a/examples/common/README.md b/examples/common/README.md index ea56ea63eb..8cb41883e5 100644 --- a/examples/common/README.md +++ b/examples/common/README.md @@ -2,9 +2,9 @@ ## 00-prereqs.yaml -This file contains the prerequisites necessary to run the Ark server: +This file contains the prerequisites necessary to run the Velero server: -- `heptio-ark` namespace -- `ark` service account -- RBAC rules to grant permissions to the `ark` service account -- CRDs for the Ark-specific resources (Backup, Schedule, Restore, etc.) +- `velero` namespace +- `velero` service account +- RBAC rules to grant permissions to the `velero` service account +- CRDs for the Velero-specific resources (Backup, Schedule, Restore, etc.) diff --git a/examples/gcp/05-ark-backupstoragelocation.yaml b/examples/gcp/05-backupstoragelocation.yaml similarity index 92% rename from examples/gcp/05-ark-backupstoragelocation.yaml rename to examples/gcp/05-backupstoragelocation.yaml index 34c83b98c9..65352b72e4 100644 --- a/examples/gcp/05-ark-backupstoragelocation.yaml +++ b/examples/gcp/05-backupstoragelocation.yaml @@ -13,11 +13,11 @@ # limitations under the License. --- -apiVersion: ark.heptio.com/v1 +apiVersion: velero.io/v1 kind: BackupStorageLocation metadata: name: default - namespace: heptio-ark + namespace: velero spec: provider: gcp objectStorage: diff --git a/examples/gcp/06-ark-volumesnapshotlocation.yaml b/examples/gcp/06-volumesnapshotlocation.yaml similarity index 90% rename from examples/gcp/06-ark-volumesnapshotlocation.yaml rename to examples/gcp/06-volumesnapshotlocation.yaml index c42c3cfdc6..f4cc566938 100644 --- a/examples/gcp/06-ark-volumesnapshotlocation.yaml +++ b/examples/gcp/06-volumesnapshotlocation.yaml @@ -13,10 +13,10 @@ # limitations under the License. --- -apiVersion: ark.heptio.com/v1 +apiVersion: velero.io/v1 kind: VolumeSnapshotLocation metadata: name: gcp-default - namespace: heptio-ark + namespace: velero spec: - provider: gcp \ No newline at end of file + provider: gcp diff --git a/examples/gcp/10-deployment.yaml b/examples/gcp/10-deployment.yaml index b31135965a..3748f5abba 100644 --- a/examples/gcp/10-deployment.yaml +++ b/examples/gcp/10-deployment.yaml @@ -16,29 +16,29 @@ apiVersion: apps/v1beta1 kind: Deployment metadata: - namespace: heptio-ark - name: ark + namespace: velero + name: velero spec: replicas: 1 template: metadata: labels: - component: ark + component: velero annotations: prometheus.io/scrape: "true" prometheus.io/port: "8085" prometheus.io/path: "/metrics" spec: restartPolicy: Always - serviceAccountName: ark + serviceAccountName: velero containers: - - name: ark - image: gcr.io/heptio-images/ark:latest + - name: velero + image: gcr.io/heptio-images/velero:latest ports: - name: metrics containerPort: 8085 command: - - /ark + - /velero args: - server ## uncomment following line and specify values if needed for multiple provider snapshot locations @@ -53,7 +53,7 @@ spec: env: - name: GOOGLE_APPLICATION_CREDENTIALS value: /credentials/cloud - - name: ARK_SCRATCH_DIR + - name: VELERO_SCRATCH_DIR value: /scratch volumes: - name: cloud-credentials diff --git a/examples/gcp/20-restic-daemonset.yaml b/examples/gcp/20-restic-daemonset.yaml index 62aafb8e36..9f48a0fb84 100644 --- a/examples/gcp/20-restic-daemonset.yaml +++ b/examples/gcp/20-restic-daemonset.yaml @@ -16,7 +16,7 @@ apiVersion: apps/v1 kind: DaemonSet metadata: name: restic - namespace: heptio-ark + namespace: velero spec: selector: matchLabels: @@ -26,7 +26,7 @@ spec: labels: name: restic spec: - serviceAccountName: ark + serviceAccountName: velero securityContext: runAsUser: 0 volumes: @@ -39,10 +39,10 @@ spec: - name: scratch emptyDir: {} containers: - - name: ark - image: gcr.io/heptio-images/ark:latest + - name: velero + image: gcr.io/heptio-images/velero:latest command: - - /ark + - /velero args: - restic - server @@ -59,11 +59,11 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName - - name: HEPTIO_ARK_NAMESPACE + - name: VELERO_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: GOOGLE_APPLICATION_CREDENTIALS value: /credentials/cloud - - name: ARK_SCRATCH_DIR - value: /scratch \ No newline at end of file + - name: VELERO_SCRATCH_DIR + value: /scratch diff --git a/examples/ibm/05-ark-backupstoragelocation.yaml b/examples/ibm/05-backupstoragelocation.yaml similarity index 93% rename from examples/ibm/05-ark-backupstoragelocation.yaml rename to examples/ibm/05-backupstoragelocation.yaml index 57cbeea752..458aa5ce4c 100644 --- a/examples/ibm/05-ark-backupstoragelocation.yaml +++ b/examples/ibm/05-backupstoragelocation.yaml @@ -13,11 +13,11 @@ # limitations under the License. --- -apiVersion: ark.heptio.com/v1 +apiVersion: velero.io/v1 kind: BackupStorageLocation metadata: name: default - namespace: heptio-ark + namespace: velero spec: provider: aws objectStorage: diff --git a/examples/ibm/10-deployment.yaml b/examples/ibm/10-deployment.yaml index 8d54af35b0..0fe622b652 100644 --- a/examples/ibm/10-deployment.yaml +++ b/examples/ibm/10-deployment.yaml @@ -16,29 +16,29 @@ apiVersion: apps/v1beta1 kind: Deployment metadata: - namespace: heptio-ark - name: ark + namespace: velero + name: velero spec: replicas: 1 template: metadata: labels: - component: ark + component: velero annotations: prometheus.io/scrape: "true" prometheus.io/port: "8085" prometheus.io/path: "/metrics" spec: restartPolicy: Always - serviceAccountName: ark + serviceAccountName: velero containers: - - name: ark - image: gcr.io/heptio-images/ark:latest + - name: velero + image: gcr.io/heptio-images/velero:latest ports: - name: metrics containerPort: 8085 command: - - /ark + - /velero args: - server volumeMounts: @@ -51,7 +51,7 @@ spec: env: - name: AWS_SHARED_CREDENTIALS_FILE value: /credentials/cloud - - name: ARK_SCRATCH_DIR + - name: VELERO_SCRATCH_DIR value: /scratch volumes: - name: cloud-credentials diff --git a/examples/minio/00-minio-deployment.yaml b/examples/minio/00-minio-deployment.yaml index 37c28739b5..bd262b79b8 100644 --- a/examples/minio/00-minio-deployment.yaml +++ b/examples/minio/00-minio-deployment.yaml @@ -16,7 +16,7 @@ apiVersion: apps/v1beta1 kind: Deployment metadata: - namespace: heptio-ark + namespace: velero name: minio labels: component: minio @@ -58,7 +58,7 @@ spec: apiVersion: v1 kind: Service metadata: - namespace: heptio-ark + namespace: velero name: minio labels: component: minio @@ -78,7 +78,7 @@ spec: apiVersion: v1 kind: Secret metadata: - namespace: heptio-ark + namespace: velero name: cloud-credentials labels: component: minio @@ -92,7 +92,7 @@ stringData: apiVersion: batch/v1 kind: Job metadata: - namespace: heptio-ark + namespace: velero name: minio-setup labels: component: minio @@ -112,7 +112,7 @@ spec: command: - /bin/sh - -c - - "mc --config-dir=/config config host add ark http://minio:9000 minio minio123 && mc --config-dir=/config mb -p ark/ark" + - "mc --config-dir=/config config host add velero http://minio:9000 minio minio123 && mc --config-dir=/config mb -p velero/velero" volumeMounts: - name: config mountPath: "/config" diff --git a/examples/minio/05-ark-backupstoragelocation.yaml b/examples/minio/05-backupstoragelocation.yaml similarity index 82% rename from examples/minio/05-ark-backupstoragelocation.yaml rename to examples/minio/05-backupstoragelocation.yaml index 0dbf7fda3f..3c7c4899ca 100644 --- a/examples/minio/05-ark-backupstoragelocation.yaml +++ b/examples/minio/05-backupstoragelocation.yaml @@ -13,21 +13,21 @@ # limitations under the License. --- -apiVersion: ark.heptio.com/v1 +apiVersion: velero.io/v1 kind: BackupStorageLocation metadata: name: default - namespace: heptio-ark + namespace: velero spec: provider: aws objectStorage: - bucket: ark + bucket: velero config: region: minio s3ForcePathStyle: "true" - s3Url: http://minio.heptio-ark.svc:9000 + s3Url: http://minio.velero.svc:9000 # Uncomment the following line and provide the value of an externally - # available URL for downloading logs, running Ark describe, and more. + # available URL for downloading logs, running Velero describe, and more. # publicUrl: https://minio.mycluster.com diff --git a/examples/minio/20-ark-deployment.yaml b/examples/minio/20-deployment.yaml similarity index 87% rename from examples/minio/20-ark-deployment.yaml rename to examples/minio/20-deployment.yaml index 33a16f7e4d..f8e237d4a1 100644 --- a/examples/minio/20-ark-deployment.yaml +++ b/examples/minio/20-deployment.yaml @@ -16,29 +16,29 @@ apiVersion: apps/v1beta1 kind: Deployment metadata: - namespace: heptio-ark - name: ark + namespace: velero + name: velero spec: replicas: 1 template: metadata: labels: - component: ark + component: velero annotations: prometheus.io/scrape: "true" prometheus.io/port: "8085" prometheus.io/path: "/metrics" spec: restartPolicy: Always - serviceAccountName: ark + serviceAccountName: velero containers: - - name: ark - image: gcr.io/heptio-images/ark:latest + - name: velero + image: gcr.io/heptio-images/velero:latest ports: - name: metrics containerPort: 8085 command: - - /ark + - /velero args: - server volumeMounts: @@ -51,7 +51,7 @@ spec: env: - name: AWS_SHARED_CREDENTIALS_FILE value: /credentials/cloud - - name: ARK_SCRATCH_DIR + - name: VELERO_SCRATCH_DIR value: /scratch volumes: - name: cloud-credentials diff --git a/examples/minio/30-restic-daemonset.yaml b/examples/minio/30-restic-daemonset.yaml index b39b55ad84..8c79a6dbda 100644 --- a/examples/minio/30-restic-daemonset.yaml +++ b/examples/minio/30-restic-daemonset.yaml @@ -16,7 +16,7 @@ apiVersion: apps/v1 kind: DaemonSet metadata: name: restic - namespace: heptio-ark + namespace: velero spec: selector: matchLabels: @@ -26,7 +26,7 @@ spec: labels: name: restic spec: - serviceAccountName: ark + serviceAccountName: velero securityContext: runAsUser: 0 volumes: @@ -39,10 +39,10 @@ spec: - name: scratch emptyDir: {} containers: - - name: ark - image: gcr.io/heptio-images/ark:latest + - name: velero + image: gcr.io/heptio-images/velero:latest command: - - /ark + - /velero args: - restic - server @@ -59,11 +59,11 @@ spec: valueFrom: fieldRef: fieldPath: spec.nodeName - - name: HEPTIO_ARK_NAMESPACE + - name: VELERO_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: AWS_SHARED_CREDENTIALS_FILE value: /credentials/cloud - - name: ARK_SCRATCH_DIR - value: /scratch \ No newline at end of file + - name: VELERO_SCRATCH_DIR + value: /scratch diff --git a/examples/nginx-app/README.md b/examples/nginx-app/README.md index 5583e420ac..76cec37e6a 100644 --- a/examples/nginx-app/README.md +++ b/examples/nginx-app/README.md @@ -4,12 +4,12 @@ This directory contains manifests for two versions of a sample Nginx app under t ## `base.yaml` -This is the most basic version of the Nginx app, which can be used to test Ark's backup and restore functionality. +This is the most basic version of the Nginx app, which can be used to test Velero's backup and restore functionality. *This can be deployed as is.* ## `with-pv.yaml` -This sets up an Nginx app that logs to a persistent volume, so that Ark's PV snapshotting functionality can also be tested. +This sets up an Nginx app that logs to a persistent volume, so that Velero's PV snapshotting functionality can also be tested. *This requires you to first replace the placeholder value ``.* diff --git a/examples/nginx-app/with-pv.yaml b/examples/nginx-app/with-pv.yaml index daf333a666..7f91d3fa45 100644 --- a/examples/nginx-app/with-pv.yaml +++ b/examples/nginx-app/with-pv.yaml @@ -49,10 +49,10 @@ spec: labels: app: nginx annotations: - pre.hook.backup.ark.heptio.com/container: fsfreeze - pre.hook.backup.ark.heptio.com/command: '["/sbin/fsfreeze", "--freeze", "/var/log/nginx"]' - post.hook.backup.ark.heptio.com/container: fsfreeze - post.hook.backup.ark.heptio.com/command: '["/sbin/fsfreeze", "--unfreeze", "/var/log/nginx"]' + pre.hook.backup.velero.io/container: fsfreeze + pre.hook.backup.velero.io/command: '["/sbin/fsfreeze", "--freeze", "/var/log/nginx"]' + post.hook.backup.velero.io/container: fsfreeze + post.hook.backup.velero.io/command: '["/sbin/fsfreeze", "--unfreeze", "/var/log/nginx"]' spec: volumes: - name: nginx-logs diff --git a/hack/set-example-tags.sh b/hack/set-example-tags.sh index 21ea2b6eda..44c24323a3 100755 --- a/hack/set-example-tags.sh +++ b/hack/set-example-tags.sh @@ -30,7 +30,7 @@ rm -rf config/ && cp -r examples/ config/ # the "-i'.bak'" flag to sed is necessary, with no space between the flag # and the value, for this to be compatible across BSD/OSX sed and GNU sed. # remove the ".bak" files afterwards (they're copies of the originals). -find config/ -type f -name "*.yaml" | xargs sed -i'.bak' "s|gcr.io/heptio-images/ark:latest|gcr.io/heptio-images/ark:$GIT_TAG|g" +find config/ -type f -name "*.yaml" | xargs sed -i'.bak' "s|gcr.io/heptio-images/velero:latest|gcr.io/heptio-images/velero:$GIT_TAG|g" find config/ -type f -name "*.bak" | xargs rm find config/ -type f -name "*.yaml" | xargs sed -i'.bak' "s|gcr.io/heptio-images/fsfreeze-pause:latest|gcr.io/heptio-images/fsfreeze-pause:$GIT_TAG|g" diff --git a/hack/update-fmt.sh b/hack/update-fmt.sh index bf32a983fc..39dfe0f6fa 100755 --- a/hack/update-fmt.sh +++ b/hack/update-fmt.sh @@ -50,7 +50,7 @@ fi echo "${ACTION} goimports" for file in ${files}; do - output=$(goimports "${MODE}" -local github.com/heptio/ark "${file}") + output=$(goimports "${MODE}" -local github.com/heptio/velero "${file}") if [[ -n "${output}" ]]; then VERIFY_IMPORTS_FAILED=1 echo "${output}" diff --git a/hack/update-generated-crd-code.sh b/hack/update-generated-crd-code.sh index ad9ddc4ae3..ebbb8f1ca6 100755 --- a/hack/update-generated-crd-code.sh +++ b/hack/update-generated-crd-code.sh @@ -32,8 +32,8 @@ cd ${GOPATH}/src/k8s.io/code-generator ./generate-groups.sh \ all \ - github.com/heptio/ark/pkg/generated \ - github.com/heptio/ark/pkg/apis \ - ark:v1 \ - --go-header-file ${GOPATH}/src/github.com/heptio/ark/hack/boilerplate.go.txt \ + github.com/heptio/velero/pkg/generated \ + github.com/heptio/velero/pkg/apis \ + "ark:v1 velero:v1" \ + --go-header-file ${GOPATH}/src/github.com/heptio/velero/hack/boilerplate.go.txt \ $@ diff --git a/hack/update-generated-issue-template.sh b/hack/update-generated-issue-template.sh index 05ad23bc45..79ddaf1088 100755 --- a/hack/update-generated-issue-template.sh +++ b/hack/update-generated-issue-template.sh @@ -14,8 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARK_ROOT=$(dirname ${BASH_SOURCE})/.. -BIN=${ARK_ROOT}/_output/bin +VELERO_ROOT=$(dirname ${BASH_SOURCE})/.. +BIN=${VELERO_ROOT}/_output/bin mkdir -p ${BIN} @@ -29,7 +29,7 @@ fi OUTPUT_ISSUE_FILE="$1" if [[ -z "${OUTPUT_ISSUE_FILE}" ]]; then - OUTPUT_ISSUE_FILE=${ARK_ROOT}/.github/ISSUE_TEMPLATE/bug_report.md + OUTPUT_ISSUE_FILE=${VELERO_ROOT}/.github/ISSUE_TEMPLATE/bug_report.md fi ${BIN}/issue-tmpl-gen ${OUTPUT_ISSUE_FILE} diff --git a/hack/verify-generated-issue-template.sh b/hack/verify-generated-issue-template.sh index 43832c6ac0..7f4aa4d8b2 100755 --- a/hack/verify-generated-issue-template.sh +++ b/hack/verify-generated-issue-template.sh @@ -14,9 +14,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARK_ROOT=$(dirname ${BASH_SOURCE})/.. +VELERO_ROOT=$(dirname ${BASH_SOURCE})/.. HACK_DIR=$(dirname "${BASH_SOURCE}") -ISSUE_TEMPLATE_FILE=${ARK_ROOT}/.github/ISSUE_TEMPLATE/bug_report.md +ISSUE_TEMPLATE_FILE=${VELERO_ROOT}/.github/ISSUE_TEMPLATE/bug_report.md OUT_TMP_FILE="$(mktemp -d)"/bug_report.md diff --git a/pkg/apis/velero/v1/backup.go b/pkg/apis/velero/v1/backup.go new file mode 100644 index 0000000000..a069e45283 --- /dev/null +++ b/pkg/apis/velero/v1/backup.go @@ -0,0 +1,249 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// BackupSpec defines the specification for a Velero backup. +type BackupSpec struct { + // IncludedNamespaces is a slice of namespace names to include objects + // from. If empty, all namespaces are included. + IncludedNamespaces []string `json:"includedNamespaces"` + + // ExcludedNamespaces contains a list of namespaces that are not + // included in the backup. + ExcludedNamespaces []string `json:"excludedNamespaces"` + + // IncludedResources is a slice of resource names to include + // in the backup. If empty, all resources are included. + IncludedResources []string `json:"includedResources"` + + // ExcludedResources is a slice of resource names that are not + // included in the backup. + ExcludedResources []string `json:"excludedResources"` + + // LabelSelector is a metav1.LabelSelector to filter with + // when adding individual objects to the backup. If empty + // or nil, all objects are included. Optional. + LabelSelector *metav1.LabelSelector `json:"labelSelector"` + + // SnapshotVolumes specifies whether to take cloud snapshots + // of any PV's referenced in the set of objects included + // in the Backup. + SnapshotVolumes *bool `json:"snapshotVolumes,omitempty"` + + // TTL is a time.Duration-parseable string describing how long + // the Backup should be retained for. + TTL metav1.Duration `json:"ttl"` + + // IncludeClusterResources specifies whether cluster-scoped resources + // should be included for consideration in the backup. + IncludeClusterResources *bool `json:"includeClusterResources"` + + // Hooks represent custom behaviors that should be executed at different phases of the backup. + Hooks BackupHooks `json:"hooks"` + + // StorageLocation is a string containing the name of a BackupStorageLocation where the backup should be stored. + StorageLocation string `json:"storageLocation"` + + // VolumeSnapshotLocations is a list containing names of VolumeSnapshotLocations associated with this backup. + VolumeSnapshotLocations []string `json:"volumeSnapshotLocations"` +} + +// BackupHooks contains custom behaviors that should be executed at different phases of the backup. +type BackupHooks struct { + // Resources are hooks that should be executed when backing up individual instances of a resource. + Resources []BackupResourceHookSpec `json:"resources"` +} + +// BackupResourceHookSpec defines one or more BackupResourceHooks that should be executed based on +// the rules defined for namespaces, resources, and label selector. +type BackupResourceHookSpec struct { + // Name is the name of this hook. + Name string `json:"name"` + // IncludedNamespaces specifies the namespaces to which this hook spec applies. If empty, it applies + // to all namespaces. + IncludedNamespaces []string `json:"includedNamespaces"` + // ExcludedNamespaces specifies the namespaces to which this hook spec does not apply. + ExcludedNamespaces []string `json:"excludedNamespaces"` + // IncludedResources specifies the resources to which this hook spec applies. If empty, it applies + // to all resources. + IncludedResources []string `json:"includedResources"` + // ExcludedResources specifies the resources to which this hook spec does not apply. + ExcludedResources []string `json:"excludedResources"` + // LabelSelector, if specified, filters the resources to which this hook spec applies. + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` + // Hooks is a list of BackupResourceHooks to execute. DEPRECATED. Replaced by PreHooks. + Hooks []BackupResourceHook `json:"hooks"` + // PreHooks is a list of BackupResourceHooks to execute prior to storing the item in the backup. + // These are executed before any "additional items" from item actions are processed. + PreHooks []BackupResourceHook `json:"pre,omitempty"` + // PostHooks is a list of BackupResourceHooks to execute after storing the item in the backup. + // These are executed after all "additional items" from item actions are processed. + PostHooks []BackupResourceHook `json:"post,omitempty"` +} + +// BackupResourceHook defines a hook for a resource. +type BackupResourceHook struct { + // Exec defines an exec hook. + Exec *ExecHook `json:"exec"` +} + +// ExecHook is a hook that uses the pod exec API to execute a command in a container in a pod. +type ExecHook struct { + // Container is the container in the pod where the command should be executed. If not specified, + // the pod's first container is used. + Container string `json:"container"` + // Command is the command and arguments to execute. + Command []string `json:"command"` + // OnError specifies how Velero should behave if it encounters an error executing this hook. + OnError HookErrorMode `json:"onError"` + // Timeout defines the maximum amount of time Velero should wait for the hook to complete before + // considering the execution a failure. + Timeout metav1.Duration `json:"timeout"` +} + +// HookErrorMode defines how Velero should treat an error from a hook. +type HookErrorMode string + +const ( + // HookErrorModeContinue means that an error from a hook is acceptable, and the backup can + // proceed. + HookErrorModeContinue HookErrorMode = "Continue" + // HookErrorModeFail means that an error from a hook is problematic, and the backup should be in + // error. + HookErrorModeFail HookErrorMode = "Fail" +) + +// BackupPhase is a string representation of the lifecycle phase +// of a Velero backup. +type BackupPhase string + +const ( + // BackupPhaseNew means the backup has been created but not + // yet processed by the BackupController. + BackupPhaseNew BackupPhase = "New" + + // BackupPhaseFailedValidation means the backup has failed + // the controller's validations and therefore will not run. + BackupPhaseFailedValidation BackupPhase = "FailedValidation" + + // BackupPhaseInProgress means the backup is currently executing. + BackupPhaseInProgress BackupPhase = "InProgress" + + // BackupPhaseCompleted means the backup has run successfully without + // errors. + BackupPhaseCompleted BackupPhase = "Completed" + + // BackupPhaseFailed means the backup ran but encountered an error that + // prevented it from completing successfully. + BackupPhaseFailed BackupPhase = "Failed" + + // BackupPhaseDeleting means the backup and all its associated data are being deleted. + BackupPhaseDeleting BackupPhase = "Deleting" +) + +// BackupStatus captures the current status of a Velero backup. +type BackupStatus struct { + // Version is the backup format version. + Version int `json:"version"` + + // Expiration is when this Backup is eligible for garbage-collection. + Expiration metav1.Time `json:"expiration"` + + // Phase is the current state of the Backup. + Phase BackupPhase `json:"phase"` + + // VolumeBackups is a map of PersistentVolume names to + // information about the backed-up volume in the cloud + // provider API. + // + // Deprecated: this field is considered read-only as of v0.10 + // and will be removed in a subsequent release. The information + // previously contained here is now stored in a file in backup + // storage. + VolumeBackups map[string]*VolumeBackupInfo `json:"volumeBackups,omitempty"` + + // ValidationErrors is a slice of all validation errors (if + // applicable). + ValidationErrors []string `json:"validationErrors"` + + // StartTimestamp records the time a backup was started. + // Separate from CreationTimestamp, since that value changes + // on restores. + // The server's time is used for StartTimestamps + StartTimestamp metav1.Time `json:"startTimestamp"` + + // CompletionTimestamp records the time a backup was completed. + // Completion time is recorded even on failed backups. + // Completion time is recorded before uploading the backup object. + // The server's time is used for CompletionTimestamps + CompletionTimestamp metav1.Time `json:"completionTimestamp"` + + // VolumeSnapshotsAttempted is the total number of attempted + // volume snapshots for this backup. + VolumeSnapshotsAttempted int `json:"volumeSnapshotsAttempted"` + + // VolumeSnapshotsCompleted is the total number of successfully + // completed volume snapshots for this backup. + VolumeSnapshotsCompleted int `json:"volumeSnapshotsCompleted"` +} + +// VolumeBackupInfo captures the required information about +// a PersistentVolume at backup time to be able to restore +// it later. +type VolumeBackupInfo struct { + // SnapshotID is the ID of the snapshot taken in the cloud + // provider API of this volume. + SnapshotID string `json:"snapshotID"` + + // Type is the type of the disk/volume in the cloud provider + // API. + Type string `json:"type"` + + // AvailabilityZone is the where the volume is provisioned + // in the cloud provider. + AvailabilityZone string `json:"availabilityZone,omitempty"` + + // Iops is the optional value of provisioned IOPS for the + // disk/volume in the cloud provider API. + Iops *int64 `json:"iops,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Backup is a Velero resource that respresents the capture of Kubernetes +// cluster state at a point in time (API objects and associated volume state). +type Backup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec BackupSpec `json:"spec"` + Status BackupStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupList is a list of Backups. +type BackupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []Backup `json:"items"` +} diff --git a/pkg/apis/velero/v1/backup_storage_location.go b/pkg/apis/velero/v1/backup_storage_location.go new file mode 100644 index 0000000000..2cf6563e9b --- /dev/null +++ b/pkg/apis/velero/v1/backup_storage_location.go @@ -0,0 +1,99 @@ +/* +Copyright 2018 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupStorageLocation is a location where Velero stores backup objects. +type BackupStorageLocation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec BackupStorageLocationSpec `json:"spec"` + Status BackupStorageLocationStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupStorageLocationList is a list of BackupStorageLocations. +type BackupStorageLocationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []BackupStorageLocation `json:"items"` +} + +// StorageType represents the type of storage that a backup location uses. +// ObjectStorage must be non-nil, since it is currently the only supported StorageType. +type StorageType struct { + ObjectStorage *ObjectStorageLocation `json:"objectStorage,omitempty"` +} + +// ObjectStorageLocation specifies the settings necessary to connect to a provider's object storage. +type ObjectStorageLocation struct { + // Bucket is the bucket to use for object storage. + Bucket string `json:"bucket"` + + // Prefix is the path inside a bucket to use for Velero storage. Optional. + Prefix string `json:"prefix"` +} + +// BackupStorageLocationSpec defines the specification for a Velero BackupStorageLocation. +type BackupStorageLocationSpec struct { + // Provider is the provider of the backup storage. + Provider string `json:"provider"` + + // Config is for provider-specific configuration fields. + Config map[string]string `json:"config"` + + StorageType `json:",inline"` +} + +// BackupStorageLocationPhase is the lifecyle phase of a Velero BackupStorageLocation. +type BackupStorageLocationPhase string + +const ( + // BackupStorageLocationPhaseAvailable means the location is available to read and write from. + BackupStorageLocationPhaseAvailable BackupStorageLocationPhase = "Available" + + // BackupStorageLocationPhaseUnavailable means the location is unavailable to read and write from. + BackupStorageLocationPhaseUnavailable BackupStorageLocationPhase = "Unavailable" +) + +// BackupStorageLocationAccessMode represents the permissions for a BackupStorageLocation. +type BackupStorageLocationAccessMode string + +const ( + // BackupStorageLocationAccessModeReadOnly represents read-only access to a BackupStorageLocation. + BackupStorageLocationAccessModeReadOnly BackupStorageLocationAccessMode = "ReadOnly" + + // BackupStorageLocationAccessModeReadWrite represents read and write access to a BackupStorageLocation. + BackupStorageLocationAccessModeReadWrite BackupStorageLocationAccessMode = "ReadWrite" +) + +// BackupStorageLocationStatus describes the current status of a Velero BackupStorageLocation. +type BackupStorageLocationStatus struct { + Phase BackupStorageLocationPhase `json:"phase,omitempty"` + AccessMode BackupStorageLocationAccessMode `json:"accessMode,omitempty"` + LastSyncedRevision types.UID `json:"lastSyncedRevision,omitempty"` + LastSyncedTime metav1.Time `json:"lastSyncedTime,omitempty"` +} diff --git a/pkg/apis/velero/v1/constants.go b/pkg/apis/velero/v1/constants.go new file mode 100644 index 0000000000..754bacff0f --- /dev/null +++ b/pkg/apis/velero/v1/constants.go @@ -0,0 +1,47 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + // DefaultNamespace is the Kubernetes namespace that is used by default for + // the Velero server and API objects. + DefaultNamespace = "velero" + + // ResourcesDir is a top-level directory expected in backups which contains sub-directories + // for each resource type in the backup. + ResourcesDir = "resources" + + // MetadataDir is a top-level directory expected in backups which contains + // files that store metadata about the backup, such as the backup version. + MetadataDir = "metadata" + + // RestoreLabelKey is the label key that's applied to all resources that + // are created during a restore. This is applied for ease of identification + // of restored resources. The value will be the restore's name. + // + // This label is DEPRECATED as of v0.10 and will be removed entirely as of + // v1.0 and replaced with RestoreNameLabel ("velero.io/restore-name"). + RestoreLabelKey = "velero-restore" + + // ClusterScopedDir is the name of the directory containing cluster-scoped + // resources within a Velero backup. + ClusterScopedDir = "cluster" + + // NamespaceScopedDir is the name of the directory containing namespace-scoped + // resource within a Velero backup. + NamespaceScopedDir = "namespaces" +) diff --git a/pkg/apis/velero/v1/delete_backup_request.go b/pkg/apis/velero/v1/delete_backup_request.go new file mode 100644 index 0000000000..6c7212242a --- /dev/null +++ b/pkg/apis/velero/v1/delete_backup_request.go @@ -0,0 +1,65 @@ +/* +Copyright 2018 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// DeleteBackupRequestSpec is the specification for which backups to delete. +type DeleteBackupRequestSpec struct { + BackupName string `json:"backupName"` +} + +// DeleteBackupRequestPhase represents the lifecycle phase of a DeleteBackupRequest. +type DeleteBackupRequestPhase string + +const ( + // DeleteBackupRequestPhaseNew means the DeleteBackupRequest has not been processed yet. + DeleteBackupRequestPhaseNew DeleteBackupRequestPhase = "New" + // DeleteBackupRequestPhaseInProgress means the DeleteBackupRequest is being processed. + DeleteBackupRequestPhaseInProgress DeleteBackupRequestPhase = "InProgress" + // DeleteBackupRequestPhaseProcessed means the DeleteBackupRequest has been processed. + DeleteBackupRequestPhaseProcessed DeleteBackupRequestPhase = "Processed" +) + +// DeleteBackupRequestStatus is the current status of a DeleteBackupRequest. +type DeleteBackupRequestStatus struct { + // Phase is the current state of the DeleteBackupRequest. + Phase DeleteBackupRequestPhase `json:"phase"` + // Errors contains any errors that were encountered during the deletion process. + Errors []string `json:"errors"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeleteBackupRequest is a request to delete one or more backups. +type DeleteBackupRequest struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec DeleteBackupRequestSpec `json:"spec"` + Status DeleteBackupRequestStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeleteBackupRequestList is a list of DeleteBackupRequests. +type DeleteBackupRequestList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []DeleteBackupRequest `json:"items"` +} diff --git a/pkg/apis/velero/v1/doc.go b/pkg/apis/velero/v1/doc.go new file mode 100644 index 0000000000..3edb0d5f4d --- /dev/null +++ b/pkg/apis/velero/v1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package v1 is the v1 version of the API. +// +groupName=velero.io +package v1 diff --git a/pkg/apis/velero/v1/download_request.go b/pkg/apis/velero/v1/download_request.go new file mode 100644 index 0000000000..593e801e83 --- /dev/null +++ b/pkg/apis/velero/v1/download_request.go @@ -0,0 +1,89 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// DownloadRequestSpec is the specification for a download request. +type DownloadRequestSpec struct { + // Target is what to download (e.g. logs for a backup). + Target DownloadTarget `json:"target"` +} + +// DownloadTargetKind represents what type of file to download. +type DownloadTargetKind string + +const ( + DownloadTargetKindBackupLog DownloadTargetKind = "BackupLog" + DownloadTargetKindBackupContents DownloadTargetKind = "BackupContents" + DownloadTargetKindBackupVolumeSnapshots DownloadTargetKind = "BackupVolumeSnapshots" + DownloadTargetKindRestoreLog DownloadTargetKind = "RestoreLog" + DownloadTargetKindRestoreResults DownloadTargetKind = "RestoreResults" +) + +// DownloadTarget is the specification for what kind of file to download, and the name of the +// resource with which it's associated. +type DownloadTarget struct { + // Kind is the type of file to download. + Kind DownloadTargetKind `json:"kind"` + // Name is the name of the kubernetes resource with which the file is associated. + Name string `json:"name"` +} + +// DownloadRequestPhase represents the lifecycle phase of a DownloadRequest. +type DownloadRequestPhase string + +const ( + // DownloadRequestPhaseNew means the DownloadRequest has not been processed by the + // DownloadRequestController yet. + DownloadRequestPhaseNew DownloadRequestPhase = "New" + // DownloadRequestPhaseProcessed means the DownloadRequest has been processed by the + // DownloadRequestController. + DownloadRequestPhaseProcessed DownloadRequestPhase = "Processed" +) + +// DownloadRequestStatus is the current status of a DownloadRequest. +type DownloadRequestStatus struct { + // Phase is the current state of the DownloadRequest. + Phase DownloadRequestPhase `json:"phase"` + // DownloadURL contains the pre-signed URL for the target file. + DownloadURL string `json:"downloadURL"` + // Expiration is when this DownloadRequest expires and can be deleted by the system. + Expiration metav1.Time `json:"expiration"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DownloadRequest is a request to download an artifact from backup object storage, such as a backup +// log file. +type DownloadRequest struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec DownloadRequestSpec `json:"spec"` + Status DownloadRequestStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DownloadRequestList is a list of DownloadRequests. +type DownloadRequestList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []DownloadRequest `json:"items"` +} diff --git a/pkg/apis/velero/v1/labels_annotations.go b/pkg/apis/velero/v1/labels_annotations.go new file mode 100644 index 0000000000..bfe9b09d17 --- /dev/null +++ b/pkg/apis/velero/v1/labels_annotations.go @@ -0,0 +1,50 @@ +/* +Copyright 2018 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + // BackupNameLabel is the label key used to identify a backup by name. + BackupNameLabel = "velero.io/backup-name" + + // BackupUIDLabel is the label key used to identify a backup by uid. + BackupUIDLabel = "velero.io/backup-uid" + + // RestoreNameLabel is the label key used to identify a restore by name. + RestoreNameLabel = "velero.io/restore-name" + + // ScheduleNameLabel is the label key used to identify a schedule by name. + ScheduleNameLabel = "velero.io/schedule-name" + + // RestoreUIDLabel is the label key used to identify a restore by uid. + RestoreUIDLabel = "velero.io/restore-uid" + + // PodUIDLabel is the label key used to identify a pod by uid. + PodUIDLabel = "velero.io/pod-uid" + + // PodVolumeOperationTimeoutAnnotation is the annotation key used to apply + // a backup/restore-specific timeout value for pod volume operations (i.e. + // restic backups/restores). + PodVolumeOperationTimeoutAnnotation = "velero.io/pod-volume-timeout" + + // StorageLocationLabel is the label key used to identify the storage + // location of a backup. + StorageLocationLabel = "velero.io/storage-location" + + // ResticVolumeNamespaceLabel is the label key used to identify which + // namespace a restic repository stores pod volume backups for. + ResticVolumeNamespaceLabel = "velero.io/volume-namespace" +) diff --git a/pkg/apis/velero/v1/pod_volume_backup.go b/pkg/apis/velero/v1/pod_volume_backup.go new file mode 100644 index 0000000000..5634f5e200 --- /dev/null +++ b/pkg/apis/velero/v1/pod_volume_backup.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PodVolumeBackupSpec is the specification for a PodVolumeBackup. +type PodVolumeBackupSpec struct { + // Node is the name of the node that the Pod is running on. + Node string `json:"node"` + + // Pod is a reference to the pod containing the volume to be backed up. + Pod corev1api.ObjectReference `json:"pod"` + + // Volume is the name of the volume within the Pod to be backed + // up. + Volume string `json:"volume"` + + // BackupStorageLocation is the name of the backup storage location + // where the restic repository is stored. + BackupStorageLocation string `json:"backupStorageLocation"` + + // RepoIdentifier is the restic repository identifier. + RepoIdentifier string `json:"repoIdentifier"` + + // Tags are a map of key-value pairs that should be applied to the + // volume backup as tags. + Tags map[string]string `json:"tags"` +} + +// PodVolumeBackupPhase represents the lifecycle phase of a PodVolumeBackup. +type PodVolumeBackupPhase string + +const ( + PodVolumeBackupPhaseNew PodVolumeBackupPhase = "New" + PodVolumeBackupPhaseInProgress PodVolumeBackupPhase = "InProgress" + PodVolumeBackupPhaseCompleted PodVolumeBackupPhase = "Completed" + PodVolumeBackupPhaseFailed PodVolumeBackupPhase = "Failed" +) + +// PodVolumeBackupStatus is the current status of a PodVolumeBackup. +type PodVolumeBackupStatus struct { + // Phase is the current state of the PodVolumeBackup. + Phase PodVolumeBackupPhase `json:"phase"` + + // Path is the full path within the controller pod being backed up. + Path string `json:"path"` + + // SnapshotID is the identifier for the snapshot of the pod volume. + SnapshotID string `json:"snapshotID"` + + // Message is a message about the pod volume backup's status. + Message string `json:"message"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PodVolumeBackup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec PodVolumeBackupSpec `json:"spec"` + Status PodVolumeBackupStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodVolumeBackupList is a list of PodVolumeBackups. +type PodVolumeBackupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []PodVolumeBackup `json:"items"` +} diff --git a/pkg/apis/velero/v1/pod_volume_restore.go b/pkg/apis/velero/v1/pod_volume_restore.go new file mode 100644 index 0000000000..4fbb8f8c2f --- /dev/null +++ b/pkg/apis/velero/v1/pod_volume_restore.go @@ -0,0 +1,80 @@ +/* +Copyright 2018 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1api "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// PodVolumeRestoreSpec is the specification for a PodVolumeRestore. +type PodVolumeRestoreSpec struct { + // Pod is a reference to the pod containing the volume to be restored. + Pod corev1api.ObjectReference `json:"pod"` + + // Volume is the name of the volume within the Pod to be restored. + Volume string `json:"volume"` + + // BackupStorageLocation is the name of the backup storage location + // where the restic repository is stored. + BackupStorageLocation string `json:"backupStorageLocation"` + + // RepoIdentifier is the restic repository identifier. + RepoIdentifier string `json:"repoIdentifier"` + + // SnapshotID is the ID of the volume snapshot to be restored. + SnapshotID string `json:"snapshotID"` +} + +// PodVolumeRestorePhase represents the lifecycle phase of a PodVolumeRestore. +type PodVolumeRestorePhase string + +const ( + PodVolumeRestorePhaseNew PodVolumeRestorePhase = "New" + PodVolumeRestorePhaseInProgress PodVolumeRestorePhase = "InProgress" + PodVolumeRestorePhaseCompleted PodVolumeRestorePhase = "Completed" + PodVolumeRestorePhaseFailed PodVolumeRestorePhase = "Failed" +) + +// PodVolumeRestoreStatus is the current status of a PodVolumeRestore. +type PodVolumeRestoreStatus struct { + // Phase is the current state of the PodVolumeRestore. + Phase PodVolumeRestorePhase `json:"phase"` + + // Message is a message about the pod volume restore's status. + Message string `json:"message"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type PodVolumeRestore struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec PodVolumeRestoreSpec `json:"spec"` + Status PodVolumeRestoreStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodVolumeRestoreList is a list of PodVolumeRestores. +type PodVolumeRestoreList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []PodVolumeRestore `json:"items"` +} diff --git a/pkg/apis/velero/v1/register.go b/pkg/apis/velero/v1/register.go new file mode 100644 index 0000000000..bd1869eab7 --- /dev/null +++ b/pkg/apis/velero/v1/register.go @@ -0,0 +1,83 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + // SchemeBuilder collects the scheme builder functions for the Velero API + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + + // AddToScheme applies the SchemeBuilder functions to a specified scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// GroupName is the group name for the Velero API +const GroupName = "velero.io" + +// SchemeGroupVersion is the GroupVersion for the Velero API +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource gets a Velero GroupResource for a specified resource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +type typeInfo struct { + PluralName string + ItemType runtime.Object + ItemListType runtime.Object +} + +func newTypeInfo(pluralName string, itemType, itemListType runtime.Object) typeInfo { + return typeInfo{ + PluralName: pluralName, + ItemType: itemType, + ItemListType: itemListType, + } +} + +// CustomResources returns a map of all custom resources within the Velero +// API group, keyed on Kind. +func CustomResources() map[string]typeInfo { + return map[string]typeInfo{ + "Backup": newTypeInfo("backups", &Backup{}, &BackupList{}), + "Restore": newTypeInfo("restores", &Restore{}, &RestoreList{}), + "Schedule": newTypeInfo("schedules", &Schedule{}, &ScheduleList{}), + "DownloadRequest": newTypeInfo("downloadrequests", &DownloadRequest{}, &DownloadRequestList{}), + "DeleteBackupRequest": newTypeInfo("deletebackuprequests", &DeleteBackupRequest{}, &DeleteBackupRequestList{}), + "PodVolumeBackup": newTypeInfo("podvolumebackups", &PodVolumeBackup{}, &PodVolumeBackupList{}), + "PodVolumeRestore": newTypeInfo("podvolumerestores", &PodVolumeRestore{}, &PodVolumeRestoreList{}), + "ResticRepository": newTypeInfo("resticrepositories", &ResticRepository{}, &ResticRepositoryList{}), + "BackupStorageLocation": newTypeInfo("backupstoragelocations", &BackupStorageLocation{}, &BackupStorageLocationList{}), + "VolumeSnapshotLocation": newTypeInfo("volumesnapshotlocations", &VolumeSnapshotLocation{}, &VolumeSnapshotLocationList{}), + "ServerStatusRequest": newTypeInfo("serverstatusrequests", &ServerStatusRequest{}, &ServerStatusRequestList{}), + } +} + +func addKnownTypes(scheme *runtime.Scheme) error { + for _, typeInfo := range CustomResources() { + scheme.AddKnownTypes(SchemeGroupVersion, typeInfo.ItemType, typeInfo.ItemListType) + } + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/pkg/apis/velero/v1/restic_repository.go b/pkg/apis/velero/v1/restic_repository.go new file mode 100644 index 0000000000..38895d59b0 --- /dev/null +++ b/pkg/apis/velero/v1/restic_repository.go @@ -0,0 +1,80 @@ +/* +Copyright 2018 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ResticRepositorySpec is the specification for a ResticRepository. +type ResticRepositorySpec struct { + // VolumeNamespace is the namespace this restic repository contains + // pod volume backups for. + VolumeNamespace string `json:"volumeNamespace"` + + // BackupStorageLocation is the name of the BackupStorageLocation + // that should contain this repository. + BackupStorageLocation string `json:"backupStorageLocation"` + + // ResticIdentifier is the full restic-compatible string for identifying + // this repository. + ResticIdentifier string `json:"resticIdentifier"` + + // MaintenanceFrequency is how often maintenance should be run. + MaintenanceFrequency metav1.Duration `json:"maintenanceFrequency"` +} + +// ResticRepositoryPhase represents the lifecycle phase of a ResticRepository. +type ResticRepositoryPhase string + +const ( + ResticRepositoryPhaseNew ResticRepositoryPhase = "New" + ResticRepositoryPhaseReady ResticRepositoryPhase = "Ready" + ResticRepositoryPhaseNotReady ResticRepositoryPhase = "NotReady" +) + +// ResticRepositoryStatus is the current status of a ResticRepository. +type ResticRepositoryStatus struct { + // Phase is the current state of the ResticRepository. + Phase ResticRepositoryPhase `json:"phase"` + + // Message is a message about the current status of the ResticRepository. + Message string `json:"message"` + + // LastMaintenanceTime is the last time maintenance was run. + LastMaintenanceTime metav1.Time `json:"lastMaintenanceTime"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type ResticRepository struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec ResticRepositorySpec `json:"spec"` + Status ResticRepositoryStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResticRepositoryList is a list of ResticRepositories. +type ResticRepositoryList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []ResticRepository `json:"items"` +} diff --git a/pkg/apis/velero/v1/restore.go b/pkg/apis/velero/v1/restore.go new file mode 100644 index 0000000000..772e5ddd45 --- /dev/null +++ b/pkg/apis/velero/v1/restore.go @@ -0,0 +1,159 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// RestoreSpec defines the specification for an Ark restore. +type RestoreSpec struct { + // BackupName is the unique name of the Ark backup to restore + // from. + BackupName string `json:"backupName"` + + // ScheduleName is the unique name of the Ark schedule to restore + // from. If specified, and BackupName is empty, Ark will restore + // from the most recent successful backup created from this schedule. + ScheduleName string `json:"scheduleName,omitempty"` + + // IncludedNamespaces is a slice of namespace names to include objects + // from. If empty, all namespaces are included. + IncludedNamespaces []string `json:"includedNamespaces"` + + // ExcludedNamespaces contains a list of namespaces that are not + // included in the restore. + ExcludedNamespaces []string `json:"excludedNamespaces"` + + // IncludedResources is a slice of resource names to include + // in the restore. If empty, all resources in the backup are included. + IncludedResources []string `json:"includedResources"` + + // ExcludedResources is a slice of resource names that are not + // included in the restore. + ExcludedResources []string `json:"excludedResources"` + + // NamespaceMapping is a map of source namespace names + // to target namespace names to restore into. Any source + // namespaces not included in the map will be restored into + // namespaces of the same name. + NamespaceMapping map[string]string `json:"namespaceMapping"` + + // LabelSelector is a metav1.LabelSelector to filter with + // when restoring individual objects from the backup. If empty + // or nil, all objects are included. Optional. + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"` + + // RestorePVs specifies whether to restore all included + // PVs from snapshot (via the cloudprovider). + RestorePVs *bool `json:"restorePVs,omitempty"` + + // IncludeClusterResources specifies whether cluster-scoped resources + // should be included for consideration in the restore. If null, defaults + // to true. + IncludeClusterResources *bool `json:"includeClusterResources,omitempty"` +} + +// RestorePhase is a string representation of the lifecycle phase +// of an Ark restore +type RestorePhase string + +const ( + // RestorePhaseNew means the restore has been created but not + // yet processed by the RestoreController + RestorePhaseNew RestorePhase = "New" + + // RestorePhaseFailedValidation means the restore has failed + // the controller's validations and therefore will not run. + RestorePhaseFailedValidation RestorePhase = "FailedValidation" + + // RestorePhaseInProgress means the restore is currently executing. + RestorePhaseInProgress RestorePhase = "InProgress" + + // RestorePhaseCompleted means the restore has finished executing. + // Any relevant warnings or errors will be captured in the Status. + RestorePhaseCompleted RestorePhase = "Completed" + + // RestorePhaseFailed means the restore was unable to execute. + // The failing error is recorded in status.FailureReason. + RestorePhaseFailed RestorePhase = "Failed" +) + +// RestoreStatus captures the current status of an Ark restore +type RestoreStatus struct { + // Phase is the current state of the Restore + Phase RestorePhase `json:"phase"` + + // ValidationErrors is a slice of all validation errors (if + // applicable) + ValidationErrors []string `json:"validationErrors"` + + // Warnings is a count of all warning messages that were generated during + // execution of the restore. The actual warnings are stored in object storage. + Warnings int `json:"warnings"` + + // Errors is a count of all error messages that were generated during + // execution of the restore. The actual errors are stored in object storage. + Errors int `json:"errors"` + + // FailureReason is an error that caused the entire restore to fail. + FailureReason string `json:"failureReason"` +} + +// RestoreResult is a collection of messages that were generated +// during execution of a restore. This will typically store either +// warning or error messages. +type RestoreResult struct { + // Ark is a slice of messages related to the operation of Ark + // itself (for example, messages related to connecting to the + // cloud, reading a backup file, etc.) + // TODO(1.0) Remove this field. Currently maintained for backwards compatibility. + Ark []string `json:"ark,omitempty"` + + // Velero is a slice of messages related to the operation of Velero + // itself (for example, messages related to connecting to the + // cloud, reading a backup file, etc.) + Velero []string `json:"velero,omitempty"` + + // Cluster is a slice of messages related to restoring cluster- + // scoped resources. + Cluster []string `json:"cluster,omitempty"` + + // Namespaces is a map of namespace name to slice of messages + // related to restoring namespace-scoped resources. + Namespaces map[string][]string `json:"namespaces,omitempty"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Restore is an Ark resource that represents the application of +// resources from an Ark backup to a target Kubernetes cluster. +type Restore struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec RestoreSpec `json:"spec"` + Status RestoreStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RestoreList is a list of Restores. +type RestoreList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []Restore `json:"items"` +} diff --git a/pkg/apis/velero/v1/schedule.go b/pkg/apis/velero/v1/schedule.go new file mode 100644 index 0000000000..bfc69ad436 --- /dev/null +++ b/pkg/apis/velero/v1/schedule.go @@ -0,0 +1,84 @@ +/* +Copyright 2017 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// ScheduleSpec defines the specification for a Velero schedule +type ScheduleSpec struct { + // Template is the definition of the Backup to be run + // on the provided schedule + Template BackupSpec `json:"template"` + + // Schedule is a Cron expression defining when to run + // the Backup. + Schedule string `json:"schedule"` +} + +// SchedulePhase is a string representation of the lifecycle phase +// of a Velero schedule +type SchedulePhase string + +const ( + // SchedulePhaseNew means the schedule has been created but not + // yet processed by the ScheduleController + SchedulePhaseNew SchedulePhase = "New" + + // SchedulePhaseEnabled means the schedule has been validated and + // will now be triggering backups according to the schedule spec. + SchedulePhaseEnabled SchedulePhase = "Enabled" + + // SchedulePhaseFailedValidation means the schedule has failed + // the controller's validations and therefore will not trigger backups. + SchedulePhaseFailedValidation SchedulePhase = "FailedValidation" +) + +// ScheduleStatus captures the current state of a Velero schedule +type ScheduleStatus struct { + // Phase is the current phase of the Schedule + Phase SchedulePhase `json:"phase"` + + // LastBackup is the last time a Backup was run for this + // Schedule schedule + LastBackup metav1.Time `json:"lastBackup"` + + // ValidationErrors is a slice of all validation errors (if + // applicable) + ValidationErrors []string `json:"validationErrors"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Schedule is a Velero resource that represents a pre-scheduled or +// periodic Backup that should be run. +type Schedule struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec ScheduleSpec `json:"spec"` + Status ScheduleStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ScheduleList is a list of Schedules. +type ScheduleList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []Schedule `json:"items"` +} diff --git a/pkg/apis/velero/v1/server_status_request.go b/pkg/apis/velero/v1/server_status_request.go new file mode 100644 index 0000000000..d28a4883f0 --- /dev/null +++ b/pkg/apis/velero/v1/server_status_request.go @@ -0,0 +1,68 @@ +/* +Copyright 2018 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServerStatusRequest is a request to access current status information about +// the Velero server. +type ServerStatusRequest struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec ServerStatusRequestSpec `json:"spec"` + Status ServerStatusRequestStatus `json:"status,omitempty"` +} + +// ServerStatusRequestSpec is the specification for a ServerStatusRequest. +type ServerStatusRequestSpec struct { +} + +// ServerStatusRequestPhase represents the lifecycle phase of a ServerStatusRequest. +type ServerStatusRequestPhase string + +const ( + // ServerStatusRequestPhaseNew means the ServerStatusRequest has not been processed yet. + ServerStatusRequestPhaseNew ServerStatusRequestPhase = "New" + // ServerStatusRequestPhaseProcessed means the ServerStatusRequest has been processed. + ServerStatusRequestPhaseProcessed ServerStatusRequestPhase = "Processed" +) + +// ServerStatusRequestStatus is the current status of a ServerStatusRequest. +type ServerStatusRequestStatus struct { + // Phase is the current lifecycle phase of the ServerStatusRequest. + Phase ServerStatusRequestPhase `json:"phase"` + + // ProcessedTimestamp is when the ServerStatusRequest was processed + // by the ServerStatusRequestController. + ProcessedTimestamp metav1.Time `json:"processedTimestamp"` + + // ServerVersion is the Velero server version. + ServerVersion string `json:"serverVersion"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServerStatusRequestList is a list of ServerStatusRequests. +type ServerStatusRequestList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []ServerStatusRequest `json:"items"` +} diff --git a/pkg/apis/velero/v1/volume_snapshot_location.go b/pkg/apis/velero/v1/volume_snapshot_location.go new file mode 100644 index 0000000000..81ff10cbb3 --- /dev/null +++ b/pkg/apis/velero/v1/volume_snapshot_location.go @@ -0,0 +1,65 @@ +/* +Copyright 2018 the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeSnapshotLocation is a location where Velero stores volume snapshots. +type VolumeSnapshotLocation struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec VolumeSnapshotLocationSpec `json:"spec"` + Status VolumeSnapshotLocationStatus `json:"status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeSnapshotLocationList is a list of VolumeSnapshotLocations. +type VolumeSnapshotLocationList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []VolumeSnapshotLocation `json:"items"` +} + +// VolumeSnapshotLocationSpec defines the specification for a Velero VolumeSnapshotLocation. +type VolumeSnapshotLocationSpec struct { + // Provider is the provider of the volume storage. + Provider string `json:"provider"` + + // Config is for provider-specific configuration fields. + Config map[string]string `json:"config"` +} + +// VolumeSnapshotLocationPhase is the lifecyle phase of a Velero VolumeSnapshotLocation. +type VolumeSnapshotLocationPhase string + +const ( + // VolumeSnapshotLocationPhaseAvailable means the location is available to read and write from. + VolumeSnapshotLocationPhaseAvailable VolumeSnapshotLocationPhase = "Available" + + // VolumeSnapshotLocationPhaseUnavailable means the location is unavailable to read and write from. + VolumeSnapshotLocationPhaseUnavailable VolumeSnapshotLocationPhase = "Unavailable" +) + +// VolumeSnapshotLocationStatus describes the current status of a Velero VolumeSnapshotLocation. +type VolumeSnapshotLocationStatus struct { + Phase VolumeSnapshotLocationPhase `json:"phase,omitempty"` +} diff --git a/pkg/apis/velero/v1/zz_generated.deepcopy.go b/pkg/apis/velero/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..1aa4aa8ddb --- /dev/null +++ b/pkg/apis/velero/v1/zz_generated.deepcopy.go @@ -0,0 +1,1485 @@ +// +build !ignore_autogenerated + +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Backup) DeepCopyInto(out *Backup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backup. +func (in *Backup) DeepCopy() *Backup { + if in == nil { + return nil + } + out := new(Backup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Backup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupHooks) DeepCopyInto(out *BackupHooks) { + *out = *in + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]BackupResourceHookSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupHooks. +func (in *BackupHooks) DeepCopy() *BackupHooks { + if in == nil { + return nil + } + out := new(BackupHooks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupList) DeepCopyInto(out *BackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Backup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupList. +func (in *BackupList) DeepCopy() *BackupList { + if in == nil { + return nil + } + out := new(BackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupResourceHook) DeepCopyInto(out *BackupResourceHook) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + if *in == nil { + *out = nil + } else { + *out = new(ExecHook) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupResourceHook. +func (in *BackupResourceHook) DeepCopy() *BackupResourceHook { + if in == nil { + return nil + } + out := new(BackupResourceHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupResourceHookSpec) DeepCopyInto(out *BackupResourceHookSpec) { + *out = *in + if in.IncludedNamespaces != nil { + in, out := &in.IncludedNamespaces, &out.IncludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedNamespaces != nil { + in, out := &in.ExcludedNamespaces, &out.ExcludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludedResources != nil { + in, out := &in.IncludedResources, &out.IncludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedResources != nil { + in, out := &in.ExcludedResources, &out.ExcludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + if in.Hooks != nil { + in, out := &in.Hooks, &out.Hooks + *out = make([]BackupResourceHook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreHooks != nil { + in, out := &in.PreHooks, &out.PreHooks + *out = make([]BackupResourceHook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PostHooks != nil { + in, out := &in.PostHooks, &out.PostHooks + *out = make([]BackupResourceHook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupResourceHookSpec. +func (in *BackupResourceHookSpec) DeepCopy() *BackupResourceHookSpec { + if in == nil { + return nil + } + out := new(BackupResourceHookSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupSpec) DeepCopyInto(out *BackupSpec) { + *out = *in + if in.IncludedNamespaces != nil { + in, out := &in.IncludedNamespaces, &out.IncludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedNamespaces != nil { + in, out := &in.ExcludedNamespaces, &out.ExcludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludedResources != nil { + in, out := &in.IncludedResources, &out.IncludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedResources != nil { + in, out := &in.ExcludedResources, &out.ExcludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + if in.SnapshotVolumes != nil { + in, out := &in.SnapshotVolumes, &out.SnapshotVolumes + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + out.TTL = in.TTL + if in.IncludeClusterResources != nil { + in, out := &in.IncludeClusterResources, &out.IncludeClusterResources + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + in.Hooks.DeepCopyInto(&out.Hooks) + if in.VolumeSnapshotLocations != nil { + in, out := &in.VolumeSnapshotLocations, &out.VolumeSnapshotLocations + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSpec. +func (in *BackupSpec) DeepCopy() *BackupSpec { + if in == nil { + return nil + } + out := new(BackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStatus) DeepCopyInto(out *BackupStatus) { + *out = *in + in.Expiration.DeepCopyInto(&out.Expiration) + if in.VolumeBackups != nil { + in, out := &in.VolumeBackups, &out.VolumeBackups + *out = make(map[string]*VolumeBackupInfo, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = new(VolumeBackupInfo) + val.DeepCopyInto((*out)[key]) + } + } + } + if in.ValidationErrors != nil { + in, out := &in.ValidationErrors, &out.ValidationErrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.StartTimestamp.DeepCopyInto(&out.StartTimestamp) + in.CompletionTimestamp.DeepCopyInto(&out.CompletionTimestamp) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus. +func (in *BackupStatus) DeepCopy() *BackupStatus { + if in == nil { + return nil + } + out := new(BackupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStorageLocation) DeepCopyInto(out *BackupStorageLocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocation. +func (in *BackupStorageLocation) DeepCopy() *BackupStorageLocation { + if in == nil { + return nil + } + out := new(BackupStorageLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupStorageLocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStorageLocationList) DeepCopyInto(out *BackupStorageLocationList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BackupStorageLocation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocationList. +func (in *BackupStorageLocationList) DeepCopy() *BackupStorageLocationList { + if in == nil { + return nil + } + out := new(BackupStorageLocationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupStorageLocationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStorageLocationSpec) DeepCopyInto(out *BackupStorageLocationSpec) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.StorageType.DeepCopyInto(&out.StorageType) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocationSpec. +func (in *BackupStorageLocationSpec) DeepCopy() *BackupStorageLocationSpec { + if in == nil { + return nil + } + out := new(BackupStorageLocationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStorageLocationStatus) DeepCopyInto(out *BackupStorageLocationStatus) { + *out = *in + in.LastSyncedTime.DeepCopyInto(&out.LastSyncedTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStorageLocationStatus. +func (in *BackupStorageLocationStatus) DeepCopy() *BackupStorageLocationStatus { + if in == nil { + return nil + } + out := new(BackupStorageLocationStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteBackupRequest) DeepCopyInto(out *DeleteBackupRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteBackupRequest. +func (in *DeleteBackupRequest) DeepCopy() *DeleteBackupRequest { + if in == nil { + return nil + } + out := new(DeleteBackupRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeleteBackupRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteBackupRequestList) DeepCopyInto(out *DeleteBackupRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeleteBackupRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteBackupRequestList. +func (in *DeleteBackupRequestList) DeepCopy() *DeleteBackupRequestList { + if in == nil { + return nil + } + out := new(DeleteBackupRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeleteBackupRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteBackupRequestSpec) DeepCopyInto(out *DeleteBackupRequestSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteBackupRequestSpec. +func (in *DeleteBackupRequestSpec) DeepCopy() *DeleteBackupRequestSpec { + if in == nil { + return nil + } + out := new(DeleteBackupRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteBackupRequestStatus) DeepCopyInto(out *DeleteBackupRequestStatus) { + *out = *in + if in.Errors != nil { + in, out := &in.Errors, &out.Errors + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteBackupRequestStatus. +func (in *DeleteBackupRequestStatus) DeepCopy() *DeleteBackupRequestStatus { + if in == nil { + return nil + } + out := new(DeleteBackupRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownloadRequest) DeepCopyInto(out *DownloadRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequest. +func (in *DownloadRequest) DeepCopy() *DownloadRequest { + if in == nil { + return nil + } + out := new(DownloadRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DownloadRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownloadRequestList) DeepCopyInto(out *DownloadRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownloadRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequestList. +func (in *DownloadRequestList) DeepCopy() *DownloadRequestList { + if in == nil { + return nil + } + out := new(DownloadRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DownloadRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownloadRequestSpec) DeepCopyInto(out *DownloadRequestSpec) { + *out = *in + out.Target = in.Target + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequestSpec. +func (in *DownloadRequestSpec) DeepCopy() *DownloadRequestSpec { + if in == nil { + return nil + } + out := new(DownloadRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownloadRequestStatus) DeepCopyInto(out *DownloadRequestStatus) { + *out = *in + in.Expiration.DeepCopyInto(&out.Expiration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadRequestStatus. +func (in *DownloadRequestStatus) DeepCopy() *DownloadRequestStatus { + if in == nil { + return nil + } + out := new(DownloadRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownloadTarget) DeepCopyInto(out *DownloadTarget) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownloadTarget. +func (in *DownloadTarget) DeepCopy() *DownloadTarget { + if in == nil { + return nil + } + out := new(DownloadTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecHook) DeepCopyInto(out *ExecHook) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.Timeout = in.Timeout + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecHook. +func (in *ExecHook) DeepCopy() *ExecHook { + if in == nil { + return nil + } + out := new(ExecHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectStorageLocation) DeepCopyInto(out *ObjectStorageLocation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageLocation. +func (in *ObjectStorageLocation) DeepCopy() *ObjectStorageLocation { + if in == nil { + return nil + } + out := new(ObjectStorageLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeBackup) DeepCopyInto(out *PodVolumeBackup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackup. +func (in *PodVolumeBackup) DeepCopy() *PodVolumeBackup { + if in == nil { + return nil + } + out := new(PodVolumeBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodVolumeBackup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeBackupList) DeepCopyInto(out *PodVolumeBackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodVolumeBackup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupList. +func (in *PodVolumeBackupList) DeepCopy() *PodVolumeBackupList { + if in == nil { + return nil + } + out := new(PodVolumeBackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodVolumeBackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeBackupSpec) DeepCopyInto(out *PodVolumeBackupSpec) { + *out = *in + out.Pod = in.Pod + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupSpec. +func (in *PodVolumeBackupSpec) DeepCopy() *PodVolumeBackupSpec { + if in == nil { + return nil + } + out := new(PodVolumeBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeBackupStatus) DeepCopyInto(out *PodVolumeBackupStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeBackupStatus. +func (in *PodVolumeBackupStatus) DeepCopy() *PodVolumeBackupStatus { + if in == nil { + return nil + } + out := new(PodVolumeBackupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeRestore) DeepCopyInto(out *PodVolumeRestore) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestore. +func (in *PodVolumeRestore) DeepCopy() *PodVolumeRestore { + if in == nil { + return nil + } + out := new(PodVolumeRestore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodVolumeRestore) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeRestoreList) DeepCopyInto(out *PodVolumeRestoreList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodVolumeRestore, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreList. +func (in *PodVolumeRestoreList) DeepCopy() *PodVolumeRestoreList { + if in == nil { + return nil + } + out := new(PodVolumeRestoreList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodVolumeRestoreList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeRestoreSpec) DeepCopyInto(out *PodVolumeRestoreSpec) { + *out = *in + out.Pod = in.Pod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreSpec. +func (in *PodVolumeRestoreSpec) DeepCopy() *PodVolumeRestoreSpec { + if in == nil { + return nil + } + out := new(PodVolumeRestoreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodVolumeRestoreStatus) DeepCopyInto(out *PodVolumeRestoreStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodVolumeRestoreStatus. +func (in *PodVolumeRestoreStatus) DeepCopy() *PodVolumeRestoreStatus { + if in == nil { + return nil + } + out := new(PodVolumeRestoreStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResticRepository) DeepCopyInto(out *ResticRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepository. +func (in *ResticRepository) DeepCopy() *ResticRepository { + if in == nil { + return nil + } + out := new(ResticRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResticRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResticRepositoryList) DeepCopyInto(out *ResticRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResticRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepositoryList. +func (in *ResticRepositoryList) DeepCopy() *ResticRepositoryList { + if in == nil { + return nil + } + out := new(ResticRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResticRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResticRepositorySpec) DeepCopyInto(out *ResticRepositorySpec) { + *out = *in + out.MaintenanceFrequency = in.MaintenanceFrequency + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepositorySpec. +func (in *ResticRepositorySpec) DeepCopy() *ResticRepositorySpec { + if in == nil { + return nil + } + out := new(ResticRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResticRepositoryStatus) DeepCopyInto(out *ResticRepositoryStatus) { + *out = *in + in.LastMaintenanceTime.DeepCopyInto(&out.LastMaintenanceTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResticRepositoryStatus. +func (in *ResticRepositoryStatus) DeepCopy() *ResticRepositoryStatus { + if in == nil { + return nil + } + out := new(ResticRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Restore) DeepCopyInto(out *Restore) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Restore. +func (in *Restore) DeepCopy() *Restore { + if in == nil { + return nil + } + out := new(Restore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Restore) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreList) DeepCopyInto(out *RestoreList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Restore, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreList. +func (in *RestoreList) DeepCopy() *RestoreList { + if in == nil { + return nil + } + out := new(RestoreList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RestoreList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreResult) DeepCopyInto(out *RestoreResult) { + *out = *in + if in.Ark != nil { + in, out := &in.Ark, &out.Ark + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Velero != nil { + in, out := &in.Velero, &out.Velero + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = make([]string, len(val)) + copy((*out)[key], val) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreResult. +func (in *RestoreResult) DeepCopy() *RestoreResult { + if in == nil { + return nil + } + out := new(RestoreResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreSpec) DeepCopyInto(out *RestoreSpec) { + *out = *in + if in.IncludedNamespaces != nil { + in, out := &in.IncludedNamespaces, &out.IncludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedNamespaces != nil { + in, out := &in.ExcludedNamespaces, &out.ExcludedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IncludedResources != nil { + in, out := &in.IncludedResources, &out.IncludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExcludedResources != nil { + in, out := &in.ExcludedResources, &out.ExcludedResources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NamespaceMapping != nil { + in, out := &in.NamespaceMapping, &out.NamespaceMapping + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + if in.RestorePVs != nil { + in, out := &in.RestorePVs, &out.RestorePVs + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + if in.IncludeClusterResources != nil { + in, out := &in.IncludeClusterResources, &out.IncludeClusterResources + if *in == nil { + *out = nil + } else { + *out = new(bool) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreSpec. +func (in *RestoreSpec) DeepCopy() *RestoreSpec { + if in == nil { + return nil + } + out := new(RestoreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RestoreStatus) DeepCopyInto(out *RestoreStatus) { + *out = *in + if in.ValidationErrors != nil { + in, out := &in.ValidationErrors, &out.ValidationErrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RestoreStatus. +func (in *RestoreStatus) DeepCopy() *RestoreStatus { + if in == nil { + return nil + } + out := new(RestoreStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Schedule) DeepCopyInto(out *Schedule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Schedule. +func (in *Schedule) DeepCopy() *Schedule { + if in == nil { + return nil + } + out := new(Schedule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Schedule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleList) DeepCopyInto(out *ScheduleList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Schedule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleList. +func (in *ScheduleList) DeepCopy() *ScheduleList { + if in == nil { + return nil + } + out := new(ScheduleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScheduleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleSpec) DeepCopyInto(out *ScheduleSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleSpec. +func (in *ScheduleSpec) DeepCopy() *ScheduleSpec { + if in == nil { + return nil + } + out := new(ScheduleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScheduleStatus) DeepCopyInto(out *ScheduleStatus) { + *out = *in + in.LastBackup.DeepCopyInto(&out.LastBackup) + if in.ValidationErrors != nil { + in, out := &in.ValidationErrors, &out.ValidationErrors + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScheduleStatus. +func (in *ScheduleStatus) DeepCopy() *ScheduleStatus { + if in == nil { + return nil + } + out := new(ScheduleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStatusRequest) DeepCopyInto(out *ServerStatusRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatusRequest. +func (in *ServerStatusRequest) DeepCopy() *ServerStatusRequest { + if in == nil { + return nil + } + out := new(ServerStatusRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServerStatusRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStatusRequestList) DeepCopyInto(out *ServerStatusRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServerStatusRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatusRequestList. +func (in *ServerStatusRequestList) DeepCopy() *ServerStatusRequestList { + if in == nil { + return nil + } + out := new(ServerStatusRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServerStatusRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStatusRequestSpec) DeepCopyInto(out *ServerStatusRequestSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatusRequestSpec. +func (in *ServerStatusRequestSpec) DeepCopy() *ServerStatusRequestSpec { + if in == nil { + return nil + } + out := new(ServerStatusRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerStatusRequestStatus) DeepCopyInto(out *ServerStatusRequestStatus) { + *out = *in + in.ProcessedTimestamp.DeepCopyInto(&out.ProcessedTimestamp) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatusRequestStatus. +func (in *ServerStatusRequestStatus) DeepCopy() *ServerStatusRequestStatus { + if in == nil { + return nil + } + out := new(ServerStatusRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageType) DeepCopyInto(out *StorageType) { + *out = *in + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + if *in == nil { + *out = nil + } else { + *out = new(ObjectStorageLocation) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageType. +func (in *StorageType) DeepCopy() *StorageType { + if in == nil { + return nil + } + out := new(StorageType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeBackupInfo) DeepCopyInto(out *VolumeBackupInfo) { + *out = *in + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeBackupInfo. +func (in *VolumeBackupInfo) DeepCopy() *VolumeBackupInfo { + if in == nil { + return nil + } + out := new(VolumeBackupInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotLocation) DeepCopyInto(out *VolumeSnapshotLocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotLocation. +func (in *VolumeSnapshotLocation) DeepCopy() *VolumeSnapshotLocation { + if in == nil { + return nil + } + out := new(VolumeSnapshotLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeSnapshotLocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotLocationList) DeepCopyInto(out *VolumeSnapshotLocationList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeSnapshotLocation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotLocationList. +func (in *VolumeSnapshotLocationList) DeepCopy() *VolumeSnapshotLocationList { + if in == nil { + return nil + } + out := new(VolumeSnapshotLocationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeSnapshotLocationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotLocationSpec) DeepCopyInto(out *VolumeSnapshotLocationSpec) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotLocationSpec. +func (in *VolumeSnapshotLocationSpec) DeepCopy() *VolumeSnapshotLocationSpec { + if in == nil { + return nil + } + out := new(VolumeSnapshotLocationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSnapshotLocationStatus) DeepCopyInto(out *VolumeSnapshotLocationStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSnapshotLocationStatus. +func (in *VolumeSnapshotLocationStatus) DeepCopy() *VolumeSnapshotLocationStatus { + if in == nil { + return nil + } + out := new(VolumeSnapshotLocationStatus) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/backup/backup.go b/pkg/backup/backup.go index 50b27b0d3b..3fa01e3132 100644 --- a/pkg/backup/backup.go +++ b/pkg/backup/backup.go @@ -32,17 +32,17 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" kuberrs "k8s.io/apimachinery/pkg/util/errors" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cloudprovider" - "github.com/heptio/ark/pkg/discovery" - "github.com/heptio/ark/pkg/podexec" - "github.com/heptio/ark/pkg/restic" - "github.com/heptio/ark/pkg/util/collections" - kubeutil "github.com/heptio/ark/pkg/util/kube" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cloudprovider" + "github.com/heptio/velero/pkg/discovery" + "github.com/heptio/velero/pkg/podexec" + "github.com/heptio/velero/pkg/restic" + "github.com/heptio/velero/pkg/util/collections" + kubeutil "github.com/heptio/velero/pkg/util/kube" ) -// BackupVersion is the current backup version for Ark. +// BackupVersion is the current backup version for Velero. const BackupVersion = 1 // Backupper performs backups. diff --git a/pkg/backup/backup_pv_action.go b/pkg/backup/backup_pv_action.go index 44d9bdb5c5..46a0706f0a 100644 --- a/pkg/backup/backup_pv_action.go +++ b/pkg/backup/backup_pv_action.go @@ -22,8 +22,8 @@ import ( corev1api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/kuberesource" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/kuberesource" ) // backupPVAction inspects a PersistentVolumeClaim for the PersistentVolume diff --git a/pkg/backup/backup_pv_action_test.go b/pkg/backup/backup_pv_action_test.go index 5137cfa9d4..73114c8b36 100644 --- a/pkg/backup/backup_pv_action_test.go +++ b/pkg/backup/backup_pv_action_test.go @@ -24,9 +24,9 @@ import ( corev1api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/kuberesource" - arktest "github.com/heptio/ark/pkg/util/test" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/kuberesource" + velerotest "github.com/heptio/velero/pkg/util/test" ) func TestBackupPVAction(t *testing.T) { @@ -39,7 +39,7 @@ func TestBackupPVAction(t *testing.T) { backup := &v1.Backup{} - a := NewBackupPVAction(arktest.NewLogger()) + a := NewBackupPVAction(velerotest.NewLogger()) // no spec.volumeName should result in no error // and no additional items diff --git a/pkg/backup/backup_test.go b/pkg/backup/backup_test.go index 29f27fe3e3..c79e4f9f0d 100644 --- a/pkg/backup/backup_test.go +++ b/pkg/backup/backup_test.go @@ -35,15 +35,15 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/discovery" - "github.com/heptio/ark/pkg/podexec" - "github.com/heptio/ark/pkg/restic" - "github.com/heptio/ark/pkg/util/collections" - kubeutil "github.com/heptio/ark/pkg/util/kube" - "github.com/heptio/ark/pkg/util/logging" - arktest "github.com/heptio/ark/pkg/util/test" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/discovery" + "github.com/heptio/velero/pkg/podexec" + "github.com/heptio/velero/pkg/restic" + "github.com/heptio/velero/pkg/util/collections" + kubeutil "github.com/heptio/velero/pkg/util/kube" + "github.com/heptio/velero/pkg/util/logging" + velerotest "github.com/heptio/velero/pkg/util/test" ) var ( @@ -133,7 +133,7 @@ func TestResolveActions(t *testing.T) { {Resource: "bar"}: {Group: "anothergroup", Resource: "barnacles"}, {Resource: "baz"}: {Group: "anothergroup", Resource: "bazaars"}, } - discoveryHelper := arktest.NewFakeDiscoveryHelper(false, resources) + discoveryHelper := velerotest.NewFakeDiscoveryHelper(false, resources) actual, err := resolveActions(test.input, discoveryHelper) gotError := err != nil @@ -202,7 +202,7 @@ func TestGetResourceIncludesExcludes(t *testing.T) { {Resource: "bar"}: {Group: "anothergroup", Resource: "barnacles"}, {Resource: "baz"}: {Group: "anothergroup", Resource: "bazaars"}, } - discoveryHelper := arktest.NewFakeDiscoveryHelper(false, resources) + discoveryHelper := velerotest.NewFakeDiscoveryHelper(false, resources) actual := getResourceIncludesExcludes(discoveryHelper, test.includes, test.excludes) @@ -470,8 +470,8 @@ func TestBackup(t *testing.T) { Backup: test.backup, } - discoveryHelper := &arktest.FakeDiscoveryHelper{ - Mapper: &arktest.FakeMapper{ + discoveryHelper := &velerotest.FakeDiscoveryHelper{ + Mapper: &velerotest.FakeMapper{ Resources: map[schema.GroupVersionResource]schema.GroupVersionResource{ {Resource: "cm"}: {Group: "", Version: "v1", Resource: "configmaps"}, {Resource: "csr"}: {Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}, @@ -485,9 +485,9 @@ func TestBackup(t *testing.T) { }, } - dynamicFactory := new(arktest.FakeDynamicFactory) + dynamicFactory := new(velerotest.FakeDynamicFactory) - podCommandExecutor := &arktest.MockPodCommandExecutor{} + podCommandExecutor := &velerotest.MockPodCommandExecutor{} defer podCommandExecutor.AssertExpectations(t) groupBackupperFactory := &mockGroupBackupperFactory{} @@ -540,7 +540,7 @@ func TestBackup(t *testing.T) { func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) { groupBackupperFactory := &mockGroupBackupperFactory{} kb := &kubernetesBackupper{ - discoveryHelper: new(arktest.FakeDiscoveryHelper), + discoveryHelper: new(velerotest.FakeDiscoveryHelper), groupBackupperFactory: groupBackupperFactory, } @@ -563,7 +563,7 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) { mock.Anything, ).Return(&mockGroupBackupper{}) - assert.NoError(t, kb.Backup(arktest.NewLogger(), &Request{Backup: &v1.Backup{}}, &bytes.Buffer{}, nil, nil)) + assert.NoError(t, kb.Backup(velerotest.NewLogger(), &Request{Backup: &v1.Backup{}}, &bytes.Buffer{}, nil, nil)) // mutate the cohabitatingResources map that was used in the first backup to simulate // the first backup process having done so. @@ -590,7 +590,7 @@ func TestBackupUsesNewCohabitatingResourcesForEachBackup(t *testing.T) { mock.Anything, ).Return(&mockGroupBackupper{}) - assert.NoError(t, kb.Backup(arktest.NewLogger(), &Request{Backup: new(v1.Backup)}, new(bytes.Buffer), nil, nil)) + assert.NoError(t, kb.Backup(velerotest.NewLogger(), &Request{Backup: new(v1.Backup)}, new(bytes.Buffer), nil, nil)) assert.NotEqual(t, firstCohabitatingResources, secondCohabitatingResources) for _, resource := range secondCohabitatingResources { assert.False(t, resource.seen) @@ -770,7 +770,7 @@ func TestGetResourceHook(t *testing.T) { {Resource: "bar"}: {Group: "anothergroup", Resource: "barnacles"}, {Resource: "baz"}: {Group: "anothergroup", Resource: "bazaars"}, } - discoveryHelper := arktest.NewFakeDiscoveryHelper(false, resources) + discoveryHelper := velerotest.NewFakeDiscoveryHelper(false, resources) actual, err := getResourceHook(test.hookSpec, discoveryHelper) require.NoError(t, err) diff --git a/pkg/backup/delete_helpers.go b/pkg/backup/delete_helpers.go index bf753ae08b..f6111be761 100644 --- a/pkg/backup/delete_helpers.go +++ b/pkg/backup/delete_helpers.go @@ -21,7 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" ) // NewDeleteBackupRequest creates a DeleteBackupRequest for the backup identified by name and uid. diff --git a/pkg/backup/group_backupper.go b/pkg/backup/group_backupper.go index 14920a99d5..f996125955 100644 --- a/pkg/backup/group_backupper.go +++ b/pkg/backup/group_backupper.go @@ -26,10 +26,10 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" kuberrs "k8s.io/apimachinery/pkg/util/errors" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/discovery" - "github.com/heptio/ark/pkg/podexec" - "github.com/heptio/ark/pkg/restic" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/discovery" + "github.com/heptio/velero/pkg/podexec" + "github.com/heptio/velero/pkg/restic" ) type groupBackupperFactory interface { diff --git a/pkg/backup/group_backupper_test.go b/pkg/backup/group_backupper_test.go index 602145d851..f5341f933a 100644 --- a/pkg/backup/group_backupper_test.go +++ b/pkg/backup/group_backupper_test.go @@ -25,11 +25,11 @@ import ( "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/discovery" - "github.com/heptio/ark/pkg/podexec" - "github.com/heptio/ark/pkg/restic" - arktest "github.com/heptio/ark/pkg/util/test" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/discovery" + "github.com/heptio/velero/pkg/podexec" + "github.com/heptio/velero/pkg/restic" + velerotest "github.com/heptio/velero/pkg/util/test" ) func TestBackupGroupBacksUpCorrectResourcesInCorrectOrder(t *testing.T) { @@ -54,7 +54,7 @@ func TestBackupGroupBacksUpCorrectResourcesInCorrectOrder(t *testing.T) { ).Return(resourceBackupper) gb := &defaultGroupBackupper{ - log: arktest.NewLogger(), + log: velerotest.NewLogger(), resourceBackupperFactory: resourceBackupperFactory, } diff --git a/pkg/backup/item_action.go b/pkg/backup/item_action.go index 9881ffa882..531cb1d42b 100644 --- a/pkg/backup/item_action.go +++ b/pkg/backup/item_action.go @@ -20,7 +20,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - api "github.com/heptio/ark/pkg/apis/ark/v1" + api "github.com/heptio/velero/pkg/apis/velero/v1" ) // ItemAction is an actor that performs an operation on an individual item being backed up. diff --git a/pkg/backup/item_backupper.go b/pkg/backup/item_backupper.go index 8a9489f1bd..42d808afd5 100644 --- a/pkg/backup/item_backupper.go +++ b/pkg/backup/item_backupper.go @@ -32,14 +32,14 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" kubeerrs "k8s.io/apimachinery/pkg/util/errors" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cloudprovider" - "github.com/heptio/ark/pkg/discovery" - "github.com/heptio/ark/pkg/kuberesource" - "github.com/heptio/ark/pkg/podexec" - "github.com/heptio/ark/pkg/restic" - "github.com/heptio/ark/pkg/volume" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cloudprovider" + "github.com/heptio/velero/pkg/discovery" + "github.com/heptio/velero/pkg/kuberesource" + "github.com/heptio/velero/pkg/podexec" + "github.com/heptio/velero/pkg/restic" + "github.com/heptio/velero/pkg/volume" ) type itemBackupperFactory interface { @@ -446,8 +446,8 @@ func (ib *defaultItemBackupper) takePVSnapshot(obj runtime.Unstructured, log log log = log.WithField("volumeID", volumeID) tags := map[string]string{ - "ark.heptio.com/backup": ib.backupRequest.Name, - "ark.heptio.com/pv": metadata.GetName(), + "velero.io/backup": ib.backupRequest.Name, + "velero.io/pv": metadata.GetName(), } log.Info("Getting volume information") diff --git a/pkg/backup/item_backupper_test.go b/pkg/backup/item_backupper_test.go index 5fb3c77a01..bd9ba5be7f 100644 --- a/pkg/backup/item_backupper_test.go +++ b/pkg/backup/item_backupper_test.go @@ -38,12 +38,11 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" - "github.com/heptio/ark/pkg/apis/ark/v1" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/cloudprovider" - resticmocks "github.com/heptio/ark/pkg/restic/mocks" - "github.com/heptio/ark/pkg/util/collections" - arktest "github.com/heptio/ark/pkg/util/test" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/cloudprovider" + resticmocks "github.com/heptio/velero/pkg/restic/mocks" + "github.com/heptio/velero/pkg/util/collections" + velerotest "github.com/heptio/velero/pkg/util/test" ) func TestBackupItemSkips(t *testing.T) { @@ -130,7 +129,7 @@ func TestBackupItemSkips(t *testing.T) { unstructuredObj, unmarshalErr := runtime.DefaultUnstructuredConverter.ToUnstructured(pod) require.NoError(t, unmarshalErr) u := &unstructured.Unstructured{Object: unstructuredObj} - err := ib.backupItem(arktest.NewLogger(), u, test.groupResource) + err := ib.backupItem(velerotest.NewLogger(), u, test.groupResource) assert.NoError(t, err) }) } @@ -150,8 +149,8 @@ func TestBackupItemSkipsClusterScopedResourceWhenIncludeClusterResourcesFalse(t }, } - u := arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Foo","metadata":{"name":"bar"}}`) - err := ib.backupItem(arktest.NewLogger(), u, schema.GroupResource{Group: "foo", Resource: "bar"}) + u := velerotest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Foo","metadata":{"name":"bar"}}`) + err := ib.backupItem(velerotest.NewLogger(), u, schema.GroupResource{Group: "foo", Resource: "bar"}) assert.NoError(t, err) } @@ -170,7 +169,7 @@ func TestBackupItemNoSkips(t *testing.T) { customActionAdditionalItemIdentifiers []ResourceIdentifier customActionAdditionalItems []runtime.Unstructured groupResource string - snapshottableVolumes map[string]api.VolumeBackupInfo + snapshottableVolumes map[string]v1.VolumeBackupInfo snapshotError error additionalItemError error trackedPVCs sets.String @@ -253,8 +252,8 @@ func TestBackupItemNoSkips(t *testing.T) { }, }, customActionAdditionalItems: []runtime.Unstructured{ - arktest.UnstructuredOrDie(`{"apiVersion":"g1/v1","kind":"r1","metadata":{"namespace":"ns1","name":"n1"}}`), - arktest.UnstructuredOrDie(`{"apiVersion":"g2/v1","kind":"r1","metadata":{"namespace":"ns2","name":"n2"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"g1/v1","kind":"r1","metadata":{"namespace":"ns1","name":"n1"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"g2/v1","kind":"r1","metadata":{"namespace":"ns2","name":"n2"}}`), }, }, { @@ -279,8 +278,8 @@ func TestBackupItemNoSkips(t *testing.T) { }, }, customActionAdditionalItems: []runtime.Unstructured{ - arktest.UnstructuredOrDie(`{"apiVersion":"g1/v1","kind":"r1","metadata":{"namespace":"ns1","name":"n1"}}`), - arktest.UnstructuredOrDie(`{"apiVersion":"g2/v1","kind":"r1","metadata":{"namespace":"ns2","name":"n2"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"g1/v1","kind":"r1","metadata":{"namespace":"ns1","name":"n1"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"g2/v1","kind":"r1","metadata":{"namespace":"ns2","name":"n2"}}`), }, additionalItemError: errors.New("foo"), }, @@ -301,7 +300,7 @@ func TestBackupItemNoSkips(t *testing.T) { expectExcluded: false, expectedTarHeaderName: "resources/persistentvolumes/cluster/mypv.json", groupResource: "persistentvolumes", - snapshottableVolumes: map[string]api.VolumeBackupInfo{ + snapshottableVolumes: map[string]v1.VolumeBackupInfo{ "vol-abc123": {SnapshotID: "snapshot-1", AvailabilityZone: "us-east-1c"}, }, }, @@ -315,7 +314,7 @@ func TestBackupItemNoSkips(t *testing.T) { groupResource: "persistentvolumes", // empty snapshottableVolumes causes a blockStore to be created, but no // snapshots are expected to be taken. - snapshottableVolumes: map[string]api.VolumeBackupInfo{}, + snapshottableVolumes: map[string]v1.VolumeBackupInfo{}, trackedPVCs: sets.NewString(key("pvc-ns", "pvc"), key("another-pvc-ns", "another-pvc")), }, { @@ -326,7 +325,7 @@ func TestBackupItemNoSkips(t *testing.T) { expectExcluded: false, expectedTarHeaderName: "resources/persistentvolumes/cluster/mypv.json", groupResource: "persistentvolumes", - snapshottableVolumes: map[string]api.VolumeBackupInfo{ + snapshottableVolumes: map[string]v1.VolumeBackupInfo{ "vol-abc123": {SnapshotID: "snapshot-1", AvailabilityZone: "us-east-1c"}, }, trackedPVCs: sets.NewString(key("another-pvc-ns", "another-pvc")), @@ -337,14 +336,14 @@ func TestBackupItemNoSkips(t *testing.T) { item: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv", "labels": {"failure-domain.beta.kubernetes.io/zone": "us-east-1c"}}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-east-1c/vol-abc123"}}}`, expectError: true, groupResource: "persistentvolumes", - snapshottableVolumes: map[string]api.VolumeBackupInfo{ + snapshottableVolumes: map[string]v1.VolumeBackupInfo{ "vol-abc123": {SnapshotID: "snapshot-1", AvailabilityZone: "us-east-1c"}, }, snapshotError: fmt.Errorf("failure"), }, { name: "pod's restic PVC volume backups (only) are tracked", - item: `{"apiVersion": "v1", "kind": "Pod", "spec": {"volumes": [{"name": "volume-1", "persistentVolumeClaim": {"claimName": "bar"}},{"name": "volume-2", "persistentVolumeClaim": {"claimName": "baz"}},{"name": "volume-1", "emptyDir": {}}]}, "metadata":{"namespace":"foo","name":"bar", "annotations": {"backup.ark.heptio.com/backup-volumes": "volume-1,volume-2"}}}`, + item: `{"apiVersion": "v1", "kind": "Pod", "spec": {"volumes": [{"name": "volume-1", "persistentVolumeClaim": {"claimName": "bar"}},{"name": "volume-2", "persistentVolumeClaim": {"claimName": "baz"}},{"name": "volume-1", "emptyDir": {}}]}, "metadata":{"namespace":"foo","name":"bar", "annotations": {"backup.velero.io/backup-volumes": "volume-1,volume-2"}}}`, namespaceIncludesExcludes: collections.NewIncludesExcludes().Includes("*"), groupResource: "pods", expectError: false, @@ -375,7 +374,7 @@ func TestBackupItemNoSkips(t *testing.T) { groupResource = schema.ParseGroupResource(test.groupResource) } - item, err := arktest.GetAsMap(test.item) + item, err := velerotest.GetAsMap(test.item) if err != nil { t.Fatal(err) } @@ -406,13 +405,13 @@ func TestBackupItemNoSkips(t *testing.T) { } } - podCommandExecutor := &arktest.MockPodCommandExecutor{} + podCommandExecutor := &velerotest.MockPodCommandExecutor{} defer podCommandExecutor.AssertExpectations(t) - dynamicFactory := &arktest.FakeDynamicFactory{} + dynamicFactory := &velerotest.FakeDynamicFactory{} defer dynamicFactory.AssertExpectations(t) - discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil) + discoveryHelper := velerotest.NewFakeDiscoveryHelper(true, nil) blockStoreGetter := &blockStoreGetter{} @@ -428,9 +427,9 @@ func TestBackupItemNoSkips(t *testing.T) { blockStoreGetter, ).(*defaultItemBackupper) - var blockStore *arktest.FakeBlockStore + var blockStore *velerotest.FakeBlockStore if test.snapshottableVolumes != nil { - blockStore = &arktest.FakeBlockStore{ + blockStore = &velerotest.FakeBlockStore{ SnapshottableVolumes: test.snapshottableVolumes, VolumeID: "vol-abc123", Error: test.snapshotError, @@ -462,7 +461,7 @@ func TestBackupItemNoSkips(t *testing.T) { if test.additionalItemError != nil && i > 0 { break } - itemClient := &arktest.FakeDynamicClient{} + itemClient := &velerotest.FakeDynamicClient{} defer itemClient.AssertExpectations(t) dynamicFactory.On("ClientForGroupVersionResource", item.GroupResource.WithVersion("").GroupVersion(), metav1.APIResource{Name: item.Resource}, item.Namespace).Return(itemClient, nil) @@ -472,7 +471,7 @@ func TestBackupItemNoSkips(t *testing.T) { additionalItemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), test.customActionAdditionalItems[i], item.GroupResource).Return(test.additionalItemError) } - err = b.backupItem(arktest.NewLogger(), obj, groupResource) + err = b.backupItem(velerotest.NewLogger(), obj, groupResource) gotError := err != nil if e, a := test.expectError, gotError; e != a { t.Fatalf("error: expected %t, got %t: %v", e, a, err) @@ -504,7 +503,7 @@ func TestBackupItemNoSkips(t *testing.T) { assert.False(t, w.headers[0].ModTime.IsZero(), "header.modTime set") assert.Equal(t, 1, len(w.data), "# of data") - actual, err := arktest.GetAsMap(string(w.data[0])) + actual, err := velerotest.GetAsMap(string(w.data[0])) if err != nil { t.Fatal(err) } @@ -614,8 +613,8 @@ func TestItemActionModificationsToItemPersist(t *testing.T) { make(map[itemKey]struct{}), nil, w, - &arktest.FakeDynamicFactory{}, - arktest.NewFakeDiscoveryHelper(true, nil), + &velerotest.FakeDynamicFactory{}, + velerotest.NewFakeDiscoveryHelper(true, nil), nil, newPVCSnapshotTracker(), nil, @@ -628,11 +627,11 @@ func TestItemActionModificationsToItemPersist(t *testing.T) { expected.SetAnnotations(map[string]string{"foo": "bar"}) // method under test - require.NoError(t, b.backupItem(arktest.NewLogger(), obj, schema.ParseGroupResource("resource.group"))) + require.NoError(t, b.backupItem(velerotest.NewLogger(), obj, schema.ParseGroupResource("resource.group"))) // get the actual backed-up item require.Len(t, w.data, 1) - actual, err := arktest.GetAsMap(string(w.data[0])) + actual, err := velerotest.GetAsMap(string(w.data[0])) require.NoError(t, err) assert.EqualValues(t, expected.Object, actual) @@ -647,7 +646,7 @@ func TestResticAnnotationsPersist(t *testing.T) { "namespace": "myns", "name": "bar", "annotations": map[string]interface{}{ - "backup.ark.heptio.com/backup-volumes": "volume-1,volume-2", + "backup.velero.io/backup-volumes": "volume-1,volume-2", }, }, }, @@ -670,8 +669,8 @@ func TestResticAnnotationsPersist(t *testing.T) { make(map[itemKey]struct{}), nil, w, - &arktest.FakeDynamicFactory{}, - arktest.NewFakeDiscoveryHelper(true, nil), + &velerotest.FakeDynamicFactory{}, + velerotest.NewFakeDiscoveryHelper(true, nil), resticBackupper, newPVCSnapshotTracker(), nil, @@ -691,16 +690,16 @@ func TestResticAnnotationsPersist(t *testing.T) { annotations = make(map[string]string) } annotations["foo"] = "bar" - annotations["snapshot.ark.heptio.com/volume-1"] = "snapshot-1" - annotations["snapshot.ark.heptio.com/volume-2"] = "snapshot-2" + annotations["snapshot.velero.io/volume-1"] = "snapshot-1" + annotations["snapshot.velero.io/volume-2"] = "snapshot-2" expected.SetAnnotations(annotations) // method under test - require.NoError(t, b.backupItem(arktest.NewLogger(), obj, schema.ParseGroupResource("pods"))) + require.NoError(t, b.backupItem(velerotest.NewLogger(), obj, schema.ParseGroupResource("pods"))) // get the actual backed-up item require.Len(t, w.data, 1) - actual, err := arktest.GetAsMap(string(w.data[0])) + actual, err := velerotest.GetAsMap(string(w.data[0])) require.NoError(t, err) assert.EqualValues(t, expected.Object, actual) @@ -788,7 +787,7 @@ func TestTakePVSnapshot(t *testing.T) { }, } - blockStore := &arktest.FakeBlockStore{ + blockStore := &velerotest.FakeBlockStore{ SnapshottableVolumes: test.volumeInfo, VolumeID: test.expectedVolumeID, } @@ -801,13 +800,13 @@ func TestTakePVSnapshot(t *testing.T) { blockStoreGetter: &blockStoreGetter{blockStore: blockStore}, } - pv, err := arktest.GetAsMap(test.pv) + pv, err := velerotest.GetAsMap(test.pv) if err != nil { t.Fatal(err) } // method under test - err = ib.takePVSnapshot(&unstructured.Unstructured{Object: pv}, arktest.NewLogger()) + err = ib.takePVSnapshot(&unstructured.Unstructured{Object: pv}, velerotest.NewLogger()) gotErr := err != nil diff --git a/pkg/backup/item_hook_handler.go b/pkg/backup/item_hook_handler.go index 1199861b59..429d2efe20 100644 --- a/pkg/backup/item_hook_handler.go +++ b/pkg/backup/item_hook_handler.go @@ -29,10 +29,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/kuberesource" - "github.com/heptio/ark/pkg/podexec" - "github.com/heptio/ark/pkg/util/collections" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/kuberesource" + "github.com/heptio/velero/pkg/podexec" + "github.com/heptio/velero/pkg/util/collections" ) type hookPhase string @@ -145,10 +145,16 @@ func (h *defaultItemHookHandler) handleHooks( } const ( - podBackupHookContainerAnnotationKey = "hook.backup.ark.heptio.com/container" - podBackupHookCommandAnnotationKey = "hook.backup.ark.heptio.com/command" - podBackupHookOnErrorAnnotationKey = "hook.backup.ark.heptio.com/on-error" - podBackupHookTimeoutAnnotationKey = "hook.backup.ark.heptio.com/timeout" + podBackupHookContainerAnnotationKey = "hook.backup.velero.io/container" + podBackupHookCommandAnnotationKey = "hook.backup.velero.io/command" + podBackupHookOnErrorAnnotationKey = "hook.backup.velero.io/on-error" + podBackupHookTimeoutAnnotationKey = "hook.backup.velero.io/timeout" + + // TODO(1.0) remove all of the legacy ark annotations + arkPodBackupHookContainerAnnotationKey = "hook.backup.ark.heptio.com/container" + arkPodBackupHookCommandAnnotationKey = "hook.backup.ark.heptio.com/command" + arkPodBackupHookOnErrorAnnotationKey = "hook.backup.ark.heptio.com/on-error" + arkPodBackupHookTimeoutAnnotationKey = "hook.backup.ark.heptio.com/timeout" ) func phasedKey(phase hookPhase, key string) string { @@ -162,9 +168,9 @@ func getHookAnnotation(annotations map[string]string, key string, phase hookPhas return annotations[phasedKey(phase, key)] } -// getPodExecHookFromAnnotations returns an ExecHook based on the annotations, as long as the -// 'command' annotation is present. If it is absent, this returns nil. -func getPodExecHookFromAnnotations(annotations map[string]string, phase hookPhase) *api.ExecHook { +// TODO(1.0): rename this function to getPodExecHookFromAnnotations (see +// corresponding comment in getPodExecHookFromAnnotations) +func getVeleroPodExecHookFromAnnotations(annotations map[string]string, phase hookPhase) *api.ExecHook { commandValue := getHookAnnotation(annotations, podBackupHookCommandAnnotationKey, phase) if commandValue == "" { return nil @@ -204,6 +210,62 @@ func getPodExecHookFromAnnotations(annotations map[string]string, phase hookPhas } } +// TODO(1.0) delete this function +func getArkPodExecHookFromAnnotations(annotations map[string]string, phase hookPhase) *api.ExecHook { + commandValue := getHookAnnotation(annotations, arkPodBackupHookCommandAnnotationKey, phase) + if commandValue == "" { + return nil + } + var command []string + // check for json array + if commandValue[0] == '[' { + if err := json.Unmarshal([]byte(commandValue), &command); err != nil { + command = []string{commandValue} + } + } else { + command = append(command, commandValue) + } + + container := getHookAnnotation(annotations, arkPodBackupHookContainerAnnotationKey, phase) + + onError := api.HookErrorMode(getHookAnnotation(annotations, arkPodBackupHookOnErrorAnnotationKey, phase)) + if onError != api.HookErrorModeContinue && onError != api.HookErrorModeFail { + onError = "" + } + + var timeout time.Duration + timeoutString := getHookAnnotation(annotations, arkPodBackupHookTimeoutAnnotationKey, phase) + if timeoutString != "" { + if temp, err := time.ParseDuration(timeoutString); err == nil { + timeout = temp + } else { + // TODO: log error that we couldn't parse duration + } + } + + return &api.ExecHook{ + Container: container, + Command: command, + OnError: onError, + Timeout: metav1.Duration{Duration: timeout}, + } + +} + +// getPodExecHookFromAnnotations returns an ExecHook based on the annotations, as long as the +// 'command' annotation is present. If it is absent, this returns nil. +func getPodExecHookFromAnnotations(annotations map[string]string, phase hookPhase) *api.ExecHook { + // TODO(1.0): delete this function implementation, as + // getVeleroPodExecHookFromAnnotations will be renamed + // in order to replace this implementation. + + if hook := getVeleroPodExecHookFromAnnotations(annotations, phase); hook != nil { + return hook + } + + return getArkPodExecHookFromAnnotations(annotations, phase) +} + type resourceHook struct { name string namespaces *collections.IncludesExcludes diff --git a/pkg/backup/item_hook_handler_test.go b/pkg/backup/item_hook_handler_test.go index 64f7ac3f1d..a12bc7616c 100644 --- a/pkg/backup/item_hook_handler_test.go +++ b/pkg/backup/item_hook_handler_test.go @@ -31,9 +31,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/util/collections" - arktest "github.com/heptio/ark/pkg/util/test" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/util/collections" + velerotest "github.com/heptio/velero/pkg/util/test" ) type mockItemHookHandler struct { @@ -58,7 +58,7 @@ func TestHandleHooksSkips(t *testing.T) { }, { name: "pod without annotation / no spec hooks", - item: arktest.UnstructuredOrDie( + item: velerotest.UnstructuredOrDie( ` { "apiVersion": "v1", @@ -74,7 +74,7 @@ func TestHandleHooksSkips(t *testing.T) { { name: "spec hooks not applicable", groupResource: "pods", - item: arktest.UnstructuredOrDie( + item: velerotest.UnstructuredOrDie( ` { "apiVersion": "v1", @@ -115,7 +115,7 @@ func TestHandleHooksSkips(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - podCommandExecutor := &arktest.MockPodCommandExecutor{} + podCommandExecutor := &velerotest.MockPodCommandExecutor{} defer podCommandExecutor.AssertExpectations(t) h := &defaultItemHookHandler{ @@ -123,7 +123,7 @@ func TestHandleHooksSkips(t *testing.T) { } groupResource := schema.ParseGroupResource(test.groupResource) - err := h.handleHooks(arktest.NewLogger(), groupResource, test.item, test.hooks, hookPhasePre) + err := h.handleHooks(velerotest.NewLogger(), groupResource, test.item, test.hooks, hookPhasePre) assert.NoError(t, err) }) } @@ -145,7 +145,7 @@ func TestHandleHooks(t *testing.T) { name: "pod, no annotation, spec (multiple pre hooks) = run spec", phase: hookPhasePre, groupResource: "pods", - item: arktest.UnstructuredOrDie(` + item: velerotest.UnstructuredOrDie(` { "apiVersion": "v1", "kind": "Pod", @@ -195,7 +195,7 @@ func TestHandleHooks(t *testing.T) { name: "pod, no annotation, spec (multiple post hooks) = run spec", phase: hookPhasePost, groupResource: "pods", - item: arktest.UnstructuredOrDie(` + item: velerotest.UnstructuredOrDie(` { "apiVersion": "v1", "kind": "Pod", @@ -245,7 +245,7 @@ func TestHandleHooks(t *testing.T) { name: "pod, annotation (legacy), no spec = run annotation", phase: hookPhasePre, groupResource: "pods", - item: arktest.UnstructuredOrDie(` + item: velerotest.UnstructuredOrDie(` { "apiVersion": "v1", "kind": "Pod", @@ -253,8 +253,8 @@ func TestHandleHooks(t *testing.T) { "namespace": "ns", "name": "name", "annotations": { - "hook.backup.ark.heptio.com/container": "c", - "hook.backup.ark.heptio.com/command": "/bin/ls" + "hook.backup.velero.io/container": "c", + "hook.backup.velero.io/command": "/bin/ls" } } }`), @@ -267,7 +267,7 @@ func TestHandleHooks(t *testing.T) { name: "pod, annotation (pre), no spec = run annotation", phase: hookPhasePre, groupResource: "pods", - item: arktest.UnstructuredOrDie(` + item: velerotest.UnstructuredOrDie(` { "apiVersion": "v1", "kind": "Pod", @@ -275,8 +275,8 @@ func TestHandleHooks(t *testing.T) { "namespace": "ns", "name": "name", "annotations": { - "pre.hook.backup.ark.heptio.com/container": "c", - "pre.hook.backup.ark.heptio.com/command": "/bin/ls" + "pre.hook.backup.velero.io/container": "c", + "pre.hook.backup.velero.io/command": "/bin/ls" } } }`), @@ -289,7 +289,7 @@ func TestHandleHooks(t *testing.T) { name: "pod, annotation (post), no spec = run annotation", phase: hookPhasePost, groupResource: "pods", - item: arktest.UnstructuredOrDie(` + item: velerotest.UnstructuredOrDie(` { "apiVersion": "v1", "kind": "Pod", @@ -297,8 +297,8 @@ func TestHandleHooks(t *testing.T) { "namespace": "ns", "name": "name", "annotations": { - "post.hook.backup.ark.heptio.com/container": "c", - "post.hook.backup.ark.heptio.com/command": "/bin/ls" + "post.hook.backup.velero.io/container": "c", + "post.hook.backup.velero.io/command": "/bin/ls" } } }`), @@ -311,7 +311,7 @@ func TestHandleHooks(t *testing.T) { name: "pod, annotation & spec = run annotation", phase: hookPhasePre, groupResource: "pods", - item: arktest.UnstructuredOrDie(` + item: velerotest.UnstructuredOrDie(` { "apiVersion": "v1", "kind": "Pod", @@ -319,8 +319,8 @@ func TestHandleHooks(t *testing.T) { "namespace": "ns", "name": "name", "annotations": { - "hook.backup.ark.heptio.com/container": "c", - "hook.backup.ark.heptio.com/command": "/bin/ls" + "hook.backup.velero.io/container": "c", + "hook.backup.velero.io/command": "/bin/ls" } } }`), @@ -346,7 +346,7 @@ func TestHandleHooks(t *testing.T) { name: "pod, annotation, onError=fail = return error", phase: hookPhasePre, groupResource: "pods", - item: arktest.UnstructuredOrDie(` + item: velerotest.UnstructuredOrDie(` { "apiVersion": "v1", "kind": "Pod", @@ -354,9 +354,9 @@ func TestHandleHooks(t *testing.T) { "namespace": "ns", "name": "name", "annotations": { - "hook.backup.ark.heptio.com/container": "c", - "hook.backup.ark.heptio.com/command": "/bin/ls", - "hook.backup.ark.heptio.com/on-error": "Fail" + "hook.backup.velero.io/container": "c", + "hook.backup.velero.io/command": "/bin/ls", + "hook.backup.velero.io/on-error": "Fail" } } }`), @@ -372,7 +372,7 @@ func TestHandleHooks(t *testing.T) { name: "pod, annotation, onError=continue = return nil", phase: hookPhasePre, groupResource: "pods", - item: arktest.UnstructuredOrDie(` + item: velerotest.UnstructuredOrDie(` { "apiVersion": "v1", "kind": "Pod", @@ -380,9 +380,9 @@ func TestHandleHooks(t *testing.T) { "namespace": "ns", "name": "name", "annotations": { - "hook.backup.ark.heptio.com/container": "c", - "hook.backup.ark.heptio.com/command": "/bin/ls", - "hook.backup.ark.heptio.com/on-error": "Continue" + "hook.backup.velero.io/container": "c", + "hook.backup.velero.io/command": "/bin/ls", + "hook.backup.velero.io/on-error": "Continue" } } }`), @@ -398,7 +398,7 @@ func TestHandleHooks(t *testing.T) { name: "pod, spec, onError=fail = don't run other hooks", phase: hookPhasePre, groupResource: "pods", - item: arktest.UnstructuredOrDie(` + item: velerotest.UnstructuredOrDie(` { "apiVersion": "v1", "kind": "Pod", @@ -460,7 +460,7 @@ func TestHandleHooks(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - podCommandExecutor := &arktest.MockPodCommandExecutor{} + podCommandExecutor := &velerotest.MockPodCommandExecutor{} defer podCommandExecutor.AssertExpectations(t) h := &defaultItemHookHandler{ @@ -490,7 +490,7 @@ func TestHandleHooks(t *testing.T) { } groupResource := schema.ParseGroupResource(test.groupResource) - err := h.handleHooks(arktest.NewLogger(), groupResource, test.item, test.hooks, test.phase) + err := h.handleHooks(velerotest.NewLogger(), groupResource, test.item, test.hooks, test.phase) if test.expectedError != nil { assert.EqualError(t, err, test.expectedError.Error()) @@ -595,6 +595,37 @@ func TestGetPodExecHookFromAnnotations(t *testing.T) { Command: []string{"/usr/bin/foo"}, }, }, + { + name: "legacy ark-based annotations are supported", + annotations: map[string]string{ + phasedKey(phase, arkPodBackupHookContainerAnnotationKey): "some-container", + phasedKey(phase, arkPodBackupHookCommandAnnotationKey): "/usr/bin/foo", + }, + expectedHook: &v1.ExecHook{ + Container: "some-container", + Command: []string{"/usr/bin/foo"}, + }, + }, + { + name: "when both current and legacy ark-based annotations are specified, current takes precedence", + annotations: map[string]string{ + phasedKey(phase, podBackupHookContainerAnnotationKey): "current-container", + phasedKey(phase, podBackupHookCommandAnnotationKey): "/usr/bin/current", + phasedKey(phase, podBackupHookOnErrorAnnotationKey): string(v1.HookErrorModeContinue), + phasedKey(phase, podBackupHookTimeoutAnnotationKey): "10m", + + phasedKey(phase, arkPodBackupHookContainerAnnotationKey): "legacy-container", + phasedKey(phase, arkPodBackupHookCommandAnnotationKey): "/usr/bin/legacy", + phasedKey(phase, arkPodBackupHookOnErrorAnnotationKey): string(v1.HookErrorModeFail), + phasedKey(phase, arkPodBackupHookTimeoutAnnotationKey): "5m", + }, + expectedHook: &v1.ExecHook{ + Container: "current-container", + Command: []string{"/usr/bin/current"}, + OnError: v1.HookErrorModeContinue, + Timeout: metav1.Duration{Duration: 10 * time.Minute}, + }, + }, } for _, test := range tests { diff --git a/pkg/backup/mocks/item_action.go b/pkg/backup/mocks/item_action.go index a0046d556d..381edb30dc 100644 --- a/pkg/backup/mocks/item_action.go +++ b/pkg/backup/mocks/item_action.go @@ -16,10 +16,10 @@ limitations under the License. // Code generated by mockery v1.0.0. DO NOT EDIT. package mocks -import backup "github.com/heptio/ark/pkg/backup" +import backup "github.com/heptio/velero/pkg/backup" import mock "github.com/stretchr/testify/mock" import runtime "k8s.io/apimachinery/pkg/runtime" -import v1 "github.com/heptio/ark/pkg/apis/ark/v1" +import v1 "github.com/heptio/velero/pkg/apis/velero/v1" // ItemAction is an autogenerated mock type for the ItemAction type type ItemAction struct { diff --git a/pkg/backup/pod_action.go b/pkg/backup/pod_action.go index 50b6297f64..d779883e8e 100644 --- a/pkg/backup/pod_action.go +++ b/pkg/backup/pod_action.go @@ -22,9 +22,9 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/kuberesource" - "github.com/heptio/ark/pkg/util/collections" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/kuberesource" + "github.com/heptio/velero/pkg/util/collections" ) // podAction implements ItemAction. diff --git a/pkg/backup/pod_action_test.go b/pkg/backup/pod_action_test.go index 88f0bcbe9a..6ce6e3daa1 100644 --- a/pkg/backup/pod_action_test.go +++ b/pkg/backup/pod_action_test.go @@ -23,12 +23,12 @@ import ( "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/runtime" - "github.com/heptio/ark/pkg/kuberesource" - arktest "github.com/heptio/ark/pkg/util/test" + "github.com/heptio/velero/pkg/kuberesource" + velerotest "github.com/heptio/velero/pkg/util/test" ) func TestPodActionAppliesTo(t *testing.T) { - a := NewPodAction(arktest.NewLogger()) + a := NewPodAction(velerotest.NewLogger()) actual, err := a.AppliesTo() require.NoError(t, err) @@ -47,7 +47,7 @@ func TestPodActionExecute(t *testing.T) { }{ { name: "no spec.volumes", - pod: arktest.UnstructuredOrDie(` + pod: velerotest.UnstructuredOrDie(` { "apiVersion": "v1", "kind": "Pod", @@ -60,7 +60,7 @@ func TestPodActionExecute(t *testing.T) { }, { name: "persistentVolumeClaim without claimName", - pod: arktest.UnstructuredOrDie(` + pod: velerotest.UnstructuredOrDie(` { "apiVersion": "v1", "kind": "Pod", @@ -80,7 +80,7 @@ func TestPodActionExecute(t *testing.T) { }, { name: "full test, mix of volume types", - pod: arktest.UnstructuredOrDie(` + pod: velerotest.UnstructuredOrDie(` { "apiVersion": "v1", "kind": "Pod", @@ -118,7 +118,7 @@ func TestPodActionExecute(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - a := NewPodAction(arktest.NewLogger()) + a := NewPodAction(velerotest.NewLogger()) updated, additionalItems, err := a.Execute(test.pod, nil) require.NoError(t, err) diff --git a/pkg/backup/request.go b/pkg/backup/request.go index 7f12da180a..d405ea6069 100644 --- a/pkg/backup/request.go +++ b/pkg/backup/request.go @@ -1,18 +1,18 @@ package backup import ( - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/util/collections" - "github.com/heptio/ark/pkg/volume" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/util/collections" + "github.com/heptio/velero/pkg/volume" ) // Request is a request for a backup, with all references to other objects // materialized (e.g. backup/snapshot locations, includes/excludes, etc.) type Request struct { - *arkv1api.Backup + *velerov1api.Backup - StorageLocation *arkv1api.BackupStorageLocation - SnapshotLocations []*arkv1api.VolumeSnapshotLocation + StorageLocation *velerov1api.BackupStorageLocation + SnapshotLocations []*velerov1api.VolumeSnapshotLocation NamespaceIncludesExcludes *collections.IncludesExcludes ResourceIncludesExcludes *collections.IncludesExcludes ResourceHooks []resourceHook diff --git a/pkg/backup/resource_backupper.go b/pkg/backup/resource_backupper.go index 38475ec470..e1d29075ad 100644 --- a/pkg/backup/resource_backupper.go +++ b/pkg/backup/resource_backupper.go @@ -26,12 +26,12 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" kuberrs "k8s.io/apimachinery/pkg/util/errors" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/discovery" - "github.com/heptio/ark/pkg/kuberesource" - "github.com/heptio/ark/pkg/podexec" - "github.com/heptio/ark/pkg/restic" - "github.com/heptio/ark/pkg/util/collections" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/discovery" + "github.com/heptio/velero/pkg/kuberesource" + "github.com/heptio/velero/pkg/podexec" + "github.com/heptio/velero/pkg/restic" + "github.com/heptio/velero/pkg/util/collections" ) type resourceBackupperFactory interface { diff --git a/pkg/backup/resource_backupper_test.go b/pkg/backup/resource_backupper_test.go index f27fec9eed..ff403d2b33 100644 --- a/pkg/backup/resource_backupper_test.go +++ b/pkg/backup/resource_backupper_test.go @@ -26,14 +26,14 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/discovery" - "github.com/heptio/ark/pkg/kuberesource" - "github.com/heptio/ark/pkg/podexec" - "github.com/heptio/ark/pkg/restic" - "github.com/heptio/ark/pkg/util/collections" - arktest "github.com/heptio/ark/pkg/util/test" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/discovery" + "github.com/heptio/velero/pkg/kuberesource" + "github.com/heptio/velero/pkg/podexec" + "github.com/heptio/velero/pkg/restic" + "github.com/heptio/velero/pkg/util/collections" + velerotest "github.com/heptio/velero/pkg/util/test" ) func TestBackupResource(t *testing.T) { @@ -76,8 +76,8 @@ func TestBackupResource(t *testing.T) { groupResource: schema.GroupResource{Group: "", Resource: "pods"}, listResponses: [][]*unstructured.Unstructured{ { - arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"myns","name":"myname1"}}`), - arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"myns","name":"myname2"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"myns","name":"myname1"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"myns","name":"myname2"}}`), }, }, }, @@ -92,12 +92,12 @@ func TestBackupResource(t *testing.T) { groupResource: schema.GroupResource{Group: "", Resource: "pods"}, listResponses: [][]*unstructured.Unstructured{ { - arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"a","name":"myname1"}}`), - arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"a","name":"myname2"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"a","name":"myname1"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"a","name":"myname2"}}`), }, { - arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"b","name":"myname3"}}`), - arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"b","name":"myname4"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"b","name":"myname3"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Pod","metadata":{"namespace":"b","name":"myname4"}}`), }, }, }, @@ -112,8 +112,8 @@ func TestBackupResource(t *testing.T) { groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"}, listResponses: [][]*unstructured.Unstructured{ { - arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`), - arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`), }, }, }, @@ -129,8 +129,8 @@ func TestBackupResource(t *testing.T) { groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"}, listResponses: [][]*unstructured.Unstructured{ { - arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`), - arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`), }, }, }, @@ -168,8 +168,8 @@ func TestBackupResource(t *testing.T) { groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"}, listResponses: [][]*unstructured.Unstructured{ { - arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`), - arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`), }, }, }, @@ -196,8 +196,8 @@ func TestBackupResource(t *testing.T) { groupResource: schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"}, listResponses: [][]*unstructured.Unstructured{ { - arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`), - arktest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname1"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"certificates.k8s.io/v1beta1","kind":"CertificateSigningRequest","metadata":{"name":"myname2"}}`), }, }, }, @@ -213,8 +213,8 @@ func TestBackupResource(t *testing.T) { groupResource: schema.GroupResource{Group: "", Resource: "namespaces"}, expectSkip: false, getResponses: []*unstructured.Unstructured{ - arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`), - arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-2"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`), + velerotest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-2"}}`), }, }, } @@ -239,10 +239,10 @@ func TestBackupResource(t *testing.T) { NamespaceIncludesExcludes: test.namespaces, } - dynamicFactory := &arktest.FakeDynamicFactory{} + dynamicFactory := &velerotest.FakeDynamicFactory{} defer dynamicFactory.AssertExpectations(t) - discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil) + discoveryHelper := velerotest.NewFakeDiscoveryHelper(true, nil) backedUpItems := map[itemKey]struct{}{ {resource: "foo", namespace: "ns", name: "name"}: {}, @@ -253,14 +253,14 @@ func TestBackupResource(t *testing.T) { "networkpolicies": newCohabitatingResource("networkpolicies", "extensions", "networking.k8s.io"), } - podCommandExecutor := &arktest.MockPodCommandExecutor{} + podCommandExecutor := &velerotest.MockPodCommandExecutor{} defer podCommandExecutor.AssertExpectations(t) tarWriter := &fakeTarWriter{} t.Run(test.name, func(t *testing.T) { rb := (&defaultResourceBackupperFactory{}).newResourceBackupper( - arktest.NewLogger(), + velerotest.NewLogger(), req, dynamicFactory, discoveryHelper, @@ -295,7 +295,7 @@ func TestBackupResource(t *testing.T) { if len(test.listResponses) > 0 { for i, namespace := range test.expectedListedNamespaces { - client := &arktest.FakeDynamicClient{} + client := &velerotest.FakeDynamicClient{} defer client.AssertExpectations(t) dynamicFactory.On("ClientForGroupVersionResource", test.groupVersion, test.apiResource, namespace).Return(client, nil) @@ -312,7 +312,7 @@ func TestBackupResource(t *testing.T) { } if len(test.getResponses) > 0 { - client := &arktest.FakeDynamicClient{} + client := &velerotest.FakeDynamicClient{} defer client.AssertExpectations(t) dynamicFactory.On("ClientForGroupVersionResource", test.groupVersion, test.apiResource, "").Return(client, nil) @@ -399,10 +399,10 @@ func TestBackupResourceCohabitation(t *testing.T) { }, } - dynamicFactory := &arktest.FakeDynamicFactory{} + dynamicFactory := &velerotest.FakeDynamicFactory{} defer dynamicFactory.AssertExpectations(t) - discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil) + discoveryHelper := velerotest.NewFakeDiscoveryHelper(true, nil) backedUpItems := map[itemKey]struct{}{ {resource: "foo", namespace: "ns", name: "name"}: {}, @@ -413,13 +413,13 @@ func TestBackupResourceCohabitation(t *testing.T) { "networkpolicies": newCohabitatingResource("networkpolicies", "extensions", "networking.k8s.io"), } - podCommandExecutor := &arktest.MockPodCommandExecutor{} + podCommandExecutor := &velerotest.MockPodCommandExecutor{} defer podCommandExecutor.AssertExpectations(t) tarWriter := &fakeTarWriter{} rb := (&defaultResourceBackupperFactory{}).newResourceBackupper( - arktest.NewLogger(), + velerotest.NewLogger(), req, dynamicFactory, discoveryHelper, @@ -452,7 +452,7 @@ func TestBackupResourceCohabitation(t *testing.T) { mock.Anything, ).Return(itemBackupper) - client := &arktest.FakeDynamicClient{} + client := &velerotest.FakeDynamicClient{} defer client.AssertExpectations(t) // STEP 1: make sure the initial backup goes through @@ -479,20 +479,20 @@ func TestBackupResourceOnlyIncludesSpecifiedNamespaces(t *testing.T) { backedUpItems := map[itemKey]struct{}{} - dynamicFactory := &arktest.FakeDynamicFactory{} + dynamicFactory := &velerotest.FakeDynamicFactory{} defer dynamicFactory.AssertExpectations(t) - discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil) + discoveryHelper := velerotest.NewFakeDiscoveryHelper(true, nil) cohabitatingResources := map[string]*cohabitatingResource{} - podCommandExecutor := &arktest.MockPodCommandExecutor{} + podCommandExecutor := &velerotest.MockPodCommandExecutor{} defer podCommandExecutor.AssertExpectations(t) tarWriter := &fakeTarWriter{} rb := (&defaultResourceBackupperFactory{}).newResourceBackupper( - arktest.NewLogger(), + velerotest.NewLogger(), req, dynamicFactory, discoveryHelper, @@ -533,12 +533,12 @@ func TestBackupResourceOnlyIncludesSpecifiedNamespaces(t *testing.T) { mock.Anything, ).Return(itemBackupper) - client := &arktest.FakeDynamicClient{} + client := &velerotest.FakeDynamicClient{} defer client.AssertExpectations(t) coreV1Group := schema.GroupVersion{Group: "", Version: "v1"} dynamicFactory.On("ClientForGroupVersionResource", coreV1Group, namespacesResource, "").Return(client, nil) - ns1 := arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`) + ns1 := velerotest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`) client.On("Get", "ns-1", metav1.GetOptions{}).Return(ns1, nil) itemHookHandler.On("handleHooks", mock.Anything, schema.GroupResource{Group: "", Resource: "namespaces"}, ns1, req.ResourceHooks, hookPhasePre).Return(nil) @@ -568,20 +568,20 @@ func TestBackupResourceListAllNamespacesExcludesCorrectly(t *testing.T) { backedUpItems := map[itemKey]struct{}{} - dynamicFactory := &arktest.FakeDynamicFactory{} + dynamicFactory := &velerotest.FakeDynamicFactory{} defer dynamicFactory.AssertExpectations(t) - discoveryHelper := arktest.NewFakeDiscoveryHelper(true, nil) + discoveryHelper := velerotest.NewFakeDiscoveryHelper(true, nil) cohabitatingResources := map[string]*cohabitatingResource{} - podCommandExecutor := &arktest.MockPodCommandExecutor{} + podCommandExecutor := &velerotest.MockPodCommandExecutor{} defer podCommandExecutor.AssertExpectations(t) tarWriter := &fakeTarWriter{} rb := (&defaultResourceBackupperFactory{}).newResourceBackupper( - arktest.NewLogger(), + velerotest.NewLogger(), req, dynamicFactory, discoveryHelper, @@ -616,14 +616,14 @@ func TestBackupResourceListAllNamespacesExcludesCorrectly(t *testing.T) { mock.Anything, ).Return(itemBackupper) - client := &arktest.FakeDynamicClient{} + client := &velerotest.FakeDynamicClient{} defer client.AssertExpectations(t) coreV1Group := schema.GroupVersion{Group: "", Version: "v1"} dynamicFactory.On("ClientForGroupVersionResource", coreV1Group, namespacesResource, "").Return(client, nil) - ns1 := arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`) - ns2 := arktest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-2"}}`) + ns1 := velerotest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-1"}}`) + ns2 := velerotest.UnstructuredOrDie(`{"apiVersion":"v1","kind":"Namespace","metadata":{"name":"ns-2"}}`) list := &unstructured.UnstructuredList{ Items: []unstructured.Unstructured{*ns1, *ns2}, } diff --git a/pkg/backup/service_account_action.go b/pkg/backup/service_account_action.go index a6605e6a3e..057f1e8b00 100644 --- a/pkg/backup/service_account_action.go +++ b/pkg/backup/service_account_action.go @@ -25,9 +25,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "github.com/heptio/ark/pkg/apis/ark/v1" - arkdiscovery "github.com/heptio/ark/pkg/discovery" - "github.com/heptio/ark/pkg/kuberesource" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + velerodiscovery "github.com/heptio/velero/pkg/discovery" + "github.com/heptio/velero/pkg/kuberesource" ) // serviceAccountAction implements ItemAction. @@ -37,7 +37,7 @@ type serviceAccountAction struct { } // NewServiceAccountAction creates a new ItemAction for service accounts. -func NewServiceAccountAction(logger logrus.FieldLogger, clusterRoleBindingListers map[string]ClusterRoleBindingLister, discoveryHelper arkdiscovery.Helper) (ItemAction, error) { +func NewServiceAccountAction(logger logrus.FieldLogger, clusterRoleBindingListers map[string]ClusterRoleBindingLister, discoveryHelper velerodiscovery.Helper) (ItemAction, error) { // Look up the supported RBAC version var supportedAPI metav1.GroupVersionForDiscovery for _, ag := range discoveryHelper.APIGroups() { diff --git a/pkg/backup/service_account_action_test.go b/pkg/backup/service_account_action_test.go index 5053c63304..370ff5e3dd 100644 --- a/pkg/backup/service_account_action_test.go +++ b/pkg/backup/service_account_action_test.go @@ -28,8 +28,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "github.com/heptio/ark/pkg/kuberesource" - arktest "github.com/heptio/ark/pkg/util/test" + "github.com/heptio/velero/pkg/kuberesource" + velerotest "github.com/heptio/velero/pkg/util/test" ) func newV1ClusterRoleBindingList(rbacCRBList []rbac.ClusterRoleBinding) []ClusterRoleBinding { @@ -141,8 +141,8 @@ func TestNewServiceAccountAction(t *testing.T) { }, } // Set up all of our fakes outside the test loop - discoveryHelper := arktest.FakeDiscoveryHelper{} - logger := arktest.NewLogger() + discoveryHelper := velerotest.FakeDiscoveryHelper{} + logger := velerotest.NewLogger() v1crbs := []rbac.ClusterRoleBinding{ { @@ -205,13 +205,13 @@ func TestServiceAccountActionExecute(t *testing.T) { }{ { name: "no crbs", - serviceAccount: arktest.UnstructuredOrDie(` + serviceAccount: velerotest.UnstructuredOrDie(` { "apiVersion": "v1", "kind": "ServiceAccount", "metadata": { - "namespace": "heptio-ark", - "name": "ark" + "namespace": "velero", + "name": "velero" } } `), @@ -220,13 +220,13 @@ func TestServiceAccountActionExecute(t *testing.T) { }, { name: "no matching crbs", - serviceAccount: arktest.UnstructuredOrDie(` + serviceAccount: velerotest.UnstructuredOrDie(` { "apiVersion": "v1", "kind": "ServiceAccount", "metadata": { - "namespace": "heptio-ark", - "name": "ark" + "namespace": "velero", + "name": "velero" } } `), @@ -240,17 +240,17 @@ func TestServiceAccountActionExecute(t *testing.T) { }, { Kind: "non-matching-kind", - Namespace: "heptio-ark", - Name: "ark", + Namespace: "velero", + Name: "velero", }, { Kind: rbac.ServiceAccountKind, Namespace: "non-matching-ns", - Name: "ark", + Name: "velero", }, { Kind: rbac.ServiceAccountKind, - Namespace: "heptio-ark", + Namespace: "velero", Name: "non-matching-name", }, }, @@ -263,13 +263,13 @@ func TestServiceAccountActionExecute(t *testing.T) { }, { name: "some matching crbs", - serviceAccount: arktest.UnstructuredOrDie(` + serviceAccount: velerotest.UnstructuredOrDie(` { "apiVersion": "v1", "kind": "ServiceAccount", "metadata": { - "namespace": "heptio-ark", - "name": "ark" + "namespace": "velero", + "name": "velero" } } `), @@ -301,8 +301,8 @@ func TestServiceAccountActionExecute(t *testing.T) { }, { Kind: rbac.ServiceAccountKind, - Namespace: "heptio-ark", - Name: "ark", + Namespace: "velero", + Name: "velero", }, }, RoleRef: rbac.RoleRef{ @@ -316,8 +316,8 @@ func TestServiceAccountActionExecute(t *testing.T) { Subjects: []rbac.Subject{ { Kind: rbac.ServiceAccountKind, - Namespace: "heptio-ark", - Name: "ark", + Namespace: "velero", + Name: "velero", }, }, RoleRef: rbac.RoleRef{ @@ -331,8 +331,8 @@ func TestServiceAccountActionExecute(t *testing.T) { Subjects: []rbac.Subject{ { Kind: rbac.ServiceAccountKind, - Namespace: "heptio-ark", - Name: "ark", + Namespace: "velero", + Name: "velero", }, { Kind: "non-matching-kind", @@ -378,7 +378,7 @@ func TestServiceAccountActionExecute(t *testing.T) { t.Run(test.name, func(t *testing.T) { // Create the action struct directly so we don't need to mock a clientset action := &serviceAccountAction{ - log: arktest.NewLogger(), + log: velerotest.NewLogger(), clusterRoleBindings: newV1ClusterRoleBindingList(test.crbs), } @@ -413,13 +413,13 @@ func TestServiceAccountActionExecuteOnBeta1(t *testing.T) { }{ { name: "no crbs", - serviceAccount: arktest.UnstructuredOrDie(` + serviceAccount: velerotest.UnstructuredOrDie(` { "apiVersion": "v1", "kind": "ServiceAccount", "metadata": { - "namespace": "heptio-ark", - "name": "ark" + "namespace": "velero", + "name": "velero" } } `), @@ -428,13 +428,13 @@ func TestServiceAccountActionExecuteOnBeta1(t *testing.T) { }, { name: "no matching crbs", - serviceAccount: arktest.UnstructuredOrDie(` + serviceAccount: velerotest.UnstructuredOrDie(` { "apiVersion": "v1", "kind": "ServiceAccount", "metadata": { - "namespace": "heptio-ark", - "name": "ark" + "namespace": "velero", + "name": "velero" } } `), @@ -448,17 +448,17 @@ func TestServiceAccountActionExecuteOnBeta1(t *testing.T) { }, { Kind: "non-matching-kind", - Namespace: "heptio-ark", - Name: "ark", + Namespace: "velero", + Name: "velero", }, { Kind: rbacbeta.ServiceAccountKind, Namespace: "non-matching-ns", - Name: "ark", + Name: "velero", }, { Kind: rbacbeta.ServiceAccountKind, - Namespace: "heptio-ark", + Namespace: "velero", Name: "non-matching-name", }, }, @@ -471,13 +471,13 @@ func TestServiceAccountActionExecuteOnBeta1(t *testing.T) { }, { name: "some matching crbs", - serviceAccount: arktest.UnstructuredOrDie(` + serviceAccount: velerotest.UnstructuredOrDie(` { "apiVersion": "v1", "kind": "ServiceAccount", "metadata": { - "namespace": "heptio-ark", - "name": "ark" + "namespace": "velero", + "name": "velero" } } `), @@ -509,8 +509,8 @@ func TestServiceAccountActionExecuteOnBeta1(t *testing.T) { }, { Kind: rbacbeta.ServiceAccountKind, - Namespace: "heptio-ark", - Name: "ark", + Namespace: "velero", + Name: "velero", }, }, RoleRef: rbacbeta.RoleRef{ @@ -524,8 +524,8 @@ func TestServiceAccountActionExecuteOnBeta1(t *testing.T) { Subjects: []rbacbeta.Subject{ { Kind: rbacbeta.ServiceAccountKind, - Namespace: "heptio-ark", - Name: "ark", + Namespace: "velero", + Name: "velero", }, }, RoleRef: rbacbeta.RoleRef{ @@ -539,8 +539,8 @@ func TestServiceAccountActionExecuteOnBeta1(t *testing.T) { Subjects: []rbacbeta.Subject{ { Kind: rbacbeta.ServiceAccountKind, - Namespace: "heptio-ark", - Name: "ark", + Namespace: "velero", + Name: "velero", }, { Kind: "non-matching-kind", @@ -586,7 +586,7 @@ func TestServiceAccountActionExecuteOnBeta1(t *testing.T) { t.Run(test.name, func(t *testing.T) { // Create the action struct directly so we don't need to mock a clientset action := &serviceAccountAction{ - log: arktest.NewLogger(), + log: velerotest.NewLogger(), clusterRoleBindings: newV1beta1ClusterRoleBindingList(test.crbs), } diff --git a/pkg/buildinfo/version.go b/pkg/buildinfo/version.go index 0d90e6625b..8ba305019d 100644 --- a/pkg/buildinfo/version.go +++ b/pkg/buildinfo/version.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package buildinfo holds build-time information like the ark version. +// Package buildinfo holds build-time information like the velero version. // This is a separate package so that other packages can import it without // worrying about introducing circular dependencies. package buildinfo @@ -22,7 +22,7 @@ package buildinfo import "fmt" var ( - // Version is the current version of Ark, set by the go linker's -X flag at build time. + // Version is the current version of Velero, set by the go linker's -X flag at build time. Version string // GitSHA is the actual commit that is being built, set by the go linker's -X flag at build time. diff --git a/pkg/client/client.go b/pkg/client/client.go index 2d7a56c580..aa35becd9b 100644 --- a/pkg/client/client.go +++ b/pkg/client/client.go @@ -24,7 +24,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "github.com/heptio/ark/pkg/buildinfo" + "github.com/heptio/velero/pkg/buildinfo" ) // Config returns a *rest.Config, using either the kubeconfig (if specified) or an in-cluster diff --git a/pkg/client/client_test.go b/pkg/client/client_test.go index 7aaf4cadf1..b765cd6724 100644 --- a/pkg/client/client_test.go +++ b/pkg/client/client_test.go @@ -33,12 +33,12 @@ func TestBuildUserAgent(t *testing.T) { }{ { name: "Test general interpolation in correct order", - command: "ark", + command: "velero", os: "darwin", arch: "amd64", gitSha: "abc123", version: "v0.1.1", - expected: "ark/v0.1.1 (darwin/amd64) abc123", + expected: "velero/v0.1.1 (darwin/amd64) abc123", }, } diff --git a/pkg/client/config.go b/pkg/client/config.go index b5ec760a63..a0d6487f77 100644 --- a/pkg/client/config.go +++ b/pkg/client/config.go @@ -28,11 +28,47 @@ const ( ConfigKeyNamespace = "namespace" ) -// LoadConfig loads the Ark client configuration file and returns it as a map[string]string. If the +// LoadConfig loads the Velero client configuration file and returns it as a map[string]string. If the // file does not exist, an empty map is returned. func LoadConfig() (map[string]string, error) { fileName := configFileName() + _, err := os.Stat(fileName) + if os.IsNotExist(err) { + // if the file isn't there, try loading from the legacy + // location + // TODO(1.0): remove this line and uncomment the code + // just below. + return LoadLegacyConfig() + + // If the file isn't there, just return an empty map + // return map[string]string{}, nil + } + if err != nil { + // For any other Stat() error, return it + return nil, errors.WithStack(err) + } + + configFile, err := os.Open(fileName) + if err != nil { + return nil, errors.WithStack(err) + } + defer configFile.Close() + + var config map[string]string + if err := json.NewDecoder(configFile).Decode(&config); err != nil { + return nil, errors.WithStack(err) + } + + return config, nil +} + +// LoadLegacyConfig loads the Ark client configuration file and returns it as a map[string]string. If the +// file does not exist, an empty map is returned. +// TODO(1.0): remove this function +func LoadLegacyConfig() (map[string]string, error) { + fileName := legacyConfigFileName() + _, err := os.Stat(fileName) if os.IsNotExist(err) { // If the file isn't there, just return an empty map @@ -57,7 +93,7 @@ func LoadConfig() (map[string]string, error) { return config, nil } -// SaveConfig saves the passed in config map to the Ark client configuration file. +// SaveConfig saves the passed in config map to the Velero client configuration file. func SaveConfig(config map[string]string) error { fileName := configFileName() @@ -77,5 +113,10 @@ func SaveConfig(config map[string]string) error { } func configFileName() string { + return filepath.Join(os.Getenv("HOME"), ".config", "velero", "config.json") +} + +// TODO(1.0): remove this function +func legacyConfigFileName() string { return filepath.Join(os.Getenv("HOME"), ".config", "ark", "config.json") } diff --git a/pkg/client/dynamic.go b/pkg/client/dynamic.go index ead119d8be..a5408bb5c3 100644 --- a/pkg/client/dynamic.go +++ b/pkg/client/dynamic.go @@ -81,7 +81,7 @@ type Patcher interface { Patch(name string, data []byte) (*unstructured.Unstructured, error) } -// Dynamic contains client methods that Ark needs for backing up and restoring resources. +// Dynamic contains client methods that Velero needs for backing up and restoring resources. type Dynamic interface { Creator Lister diff --git a/pkg/client/factory.go b/pkg/client/factory.go index 860719b5e9..ec35ce63a9 100644 --- a/pkg/client/factory.go +++ b/pkg/client/factory.go @@ -24,15 +24,15 @@ import ( "github.com/spf13/pflag" "k8s.io/client-go/kubernetes" - "github.com/heptio/ark/pkg/apis/ark/v1" - clientset "github.com/heptio/ark/pkg/generated/clientset/versioned" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + clientset "github.com/heptio/velero/pkg/generated/clientset/versioned" ) -// Factory knows how to create an ArkClient and Kubernetes client. +// Factory knows how to create a VeleroClient and Kubernetes client. type Factory interface { // BindFlags binds common flags (--kubeconfig, --namespace) to the passed-in FlagSet. BindFlags(flags *pflag.FlagSet) - // Client returns an ArkClient. It uses the following priority to specify the cluster + // Client returns a VeleroClient. It uses the following priority to specify the cluster // configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration. Client() (clientset.Interface, error) // KubeClient returns a Kubernetes client. It uses the following priority to specify the cluster @@ -67,7 +67,7 @@ func NewFactory(baseName string) Factory { } f.flags.StringVar(&f.kubeconfig, "kubeconfig", "", "Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration") - f.flags.StringVarP(&f.namespace, "namespace", "n", f.namespace, "The namespace in which Ark should operate") + f.flags.StringVarP(&f.namespace, "namespace", "n", f.namespace, "The namespace in which Velero should operate") f.flags.StringVar(&f.kubecontext, "kubecontext", "", "The context to use to talk to the Kubernetes apiserver. If unset defaults to whatever your current-context is (kubectl config current-context)") return f @@ -83,11 +83,11 @@ func (f *factory) Client() (clientset.Interface, error) { return nil, err } - arkClient, err := clientset.NewForConfig(clientConfig) + veleroClient, err := clientset.NewForConfig(clientConfig) if err != nil { return nil, errors.WithStack(err) } - return arkClient, nil + return veleroClient, nil } func (f *factory) KubeClient() (kubernetes.Interface, error) { diff --git a/pkg/cloudprovider/aws/block_store.go b/pkg/cloudprovider/aws/block_store.go index d637f9f4cf..1b0e53a308 100644 --- a/pkg/cloudprovider/aws/block_store.go +++ b/pkg/cloudprovider/aws/block_store.go @@ -30,8 +30,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "github.com/heptio/ark/pkg/cloudprovider" - "github.com/heptio/ark/pkg/util/collections" + "github.com/heptio/velero/pkg/cloudprovider" + "github.com/heptio/velero/pkg/util/collections" ) const regionKey = "region" @@ -206,19 +206,19 @@ func getTagsForCluster(snapshotTags []*ec2.Tag) []*ec2.Tag { return result } -func getTags(arkTags map[string]string, volumeTags []*ec2.Tag) []*ec2.Tag { +func getTags(veleroTags map[string]string, volumeTags []*ec2.Tag) []*ec2.Tag { var result []*ec2.Tag - // set Ark-assigned tags - for k, v := range arkTags { + // set Velero-assigned tags + for k, v := range veleroTags { result = append(result, ec2Tag(k, v)) } // copy tags from volume to snapshot for _, tag := range volumeTags { - // we want current Ark-assigned tags to overwrite any older versions + // we want current Velero-assigned tags to overwrite any older versions // of them that may exist due to prior snapshots/restores - if _, found := arkTags[*tag.Key]; found { + if _, found := veleroTags[*tag.Key]; found { continue } diff --git a/pkg/cloudprovider/aws/block_store_test.go b/pkg/cloudprovider/aws/block_store_test.go index fbaafde1fa..42b6850a4e 100644 --- a/pkg/cloudprovider/aws/block_store_test.go +++ b/pkg/cloudprovider/aws/block_store_test.go @@ -26,7 +26,7 @@ import ( "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "github.com/heptio/ark/pkg/util/collections" + "github.com/heptio/velero/pkg/util/collections" ) func TestGetVolumeID(t *testing.T) { @@ -180,31 +180,31 @@ func TestGetTagsForCluster(t *testing.T) { func TestGetTags(t *testing.T) { tests := []struct { name string - arkTags map[string]string + veleroTags map[string]string volumeTags []*ec2.Tag expected []*ec2.Tag }{ { name: "degenerate case (no tags)", - arkTags: nil, + veleroTags: nil, volumeTags: nil, expected: nil, }, { - name: "ark tags only get applied", - arkTags: map[string]string{ - "ark-key1": "ark-val1", - "ark-key2": "ark-val2", + name: "velero tags only get applied", + veleroTags: map[string]string{ + "velero-key1": "velero-val1", + "velero-key2": "velero-val2", }, volumeTags: nil, expected: []*ec2.Tag{ - ec2Tag("ark-key1", "ark-val1"), - ec2Tag("ark-key2", "ark-val2"), + ec2Tag("velero-key1", "velero-val1"), + ec2Tag("velero-key2", "velero-val2"), }, }, { - name: "volume tags only get applied", - arkTags: nil, + name: "volume tags only get applied", + veleroTags: nil, volumeTags: []*ec2.Tag{ ec2Tag("aws-key1", "aws-val1"), ec2Tag("aws-key2", "aws-val2"), @@ -215,27 +215,27 @@ func TestGetTags(t *testing.T) { }, }, { - name: "non-overlapping ark and volume tags both get applied", - arkTags: map[string]string{"ark-key": "ark-val"}, + name: "non-overlapping velero and volume tags both get applied", + veleroTags: map[string]string{"velero-key": "velero-val"}, volumeTags: []*ec2.Tag{ec2Tag("aws-key", "aws-val")}, expected: []*ec2.Tag{ - ec2Tag("ark-key", "ark-val"), + ec2Tag("velero-key", "velero-val"), ec2Tag("aws-key", "aws-val"), }, }, { - name: "when tags overlap, ark tags take precedence", - arkTags: map[string]string{ - "ark-key": "ark-val", - "overlapping-key": "ark-val", + name: "when tags overlap, velero tags take precedence", + veleroTags: map[string]string{ + "velero-key": "velero-val", + "overlapping-key": "velero-val", }, volumeTags: []*ec2.Tag{ ec2Tag("aws-key", "aws-val"), ec2Tag("overlapping-key", "aws-val"), }, expected: []*ec2.Tag{ - ec2Tag("ark-key", "ark-val"), - ec2Tag("overlapping-key", "ark-val"), + ec2Tag("velero-key", "velero-val"), + ec2Tag("overlapping-key", "velero-val"), ec2Tag("aws-key", "aws-val"), }, }, @@ -243,7 +243,7 @@ func TestGetTags(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - res := getTags(test.arkTags, test.volumeTags) + res := getTags(test.veleroTags, test.volumeTags) sort.Slice(res, func(i, j int) bool { return *res[i].Key < *res[j].Key diff --git a/pkg/cloudprovider/aws/object_store.go b/pkg/cloudprovider/aws/object_store.go index a657e80f4f..445be734cb 100644 --- a/pkg/cloudprovider/aws/object_store.go +++ b/pkg/cloudprovider/aws/object_store.go @@ -30,7 +30,7 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/heptio/ark/pkg/cloudprovider" + "github.com/heptio/velero/pkg/cloudprovider" ) const ( @@ -235,7 +235,7 @@ func (o *objectStore) ListObjects(bucket, prefix string) ([]string, error) { // ensure that returned objects are in a consistent order so that the deletion logic deletes the objects before // the pseudo-folder prefix object for s3 providers (such as Quobyte) that return the pseudo-folder as an object. - // See https://github.com/heptio/ark/pull/999 + // See https://github.com/heptio/velero/pull/999 sort.Sort(sort.Reverse(sort.StringSlice(ret))) return ret, nil diff --git a/pkg/cloudprovider/azure/block_store.go b/pkg/cloudprovider/azure/block_store.go index 112a957b88..4aa47e34fa 100644 --- a/pkg/cloudprovider/azure/block_store.go +++ b/pkg/cloudprovider/azure/block_store.go @@ -33,8 +33,8 @@ import ( "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/runtime" - "github.com/heptio/ark/pkg/cloudprovider" - "github.com/heptio/ark/pkg/util/collections" + "github.com/heptio/velero/pkg/cloudprovider" + "github.com/heptio/velero/pkg/util/collections" ) const ( @@ -215,8 +215,8 @@ func (b *blockStore) CreateSnapshot(volumeID, volumeAZ string, tags map[string]s return getComputeResourceName(b.subscription, b.snapsResourceGroup, snapshotsResource, snapshotName), nil } -func getSnapshotTags(arkTags map[string]string, diskTags *map[string]*string) *map[string]*string { - if diskTags == nil && len(arkTags) == 0 { +func getSnapshotTags(veleroTags map[string]string, diskTags *map[string]*string) *map[string]*string { + if diskTags == nil && len(veleroTags) == 0 { return nil } @@ -229,10 +229,10 @@ func getSnapshotTags(arkTags map[string]string, diskTags *map[string]*string) *m } } - // merge Ark-assigned tags with the disk's tags (note that we want current - // Ark-assigned tags to overwrite any older versions of them that may exist + // merge Velero-assigned tags with the disk's tags (note that we want current + // Velero-assigned tags to overwrite any older versions of them that may exist // due to prior snapshots/restores) - for k, v := range arkTags { + for k, v := range veleroTags { // Azure does not allow slashes in tag keys, so replace // with dash (inline with what Kubernetes does) key := strings.Replace(k, "/", "-", -1) @@ -291,7 +291,7 @@ func (b *blockStore) parseSnapshotName(name string) (*snapshotIdentifier, error) case !strings.Contains(name, "/"): return &snapshotIdentifier{ subscription: b.subscription, - // use the disksResourceGroup here because Ark only + // use the disksResourceGroup here because Velero only // supported storing snapshots in that resource group // when the legacy snapshot format was used. resourceGroup: b.disksResourceGroup, diff --git a/pkg/cloudprovider/azure/block_store_test.go b/pkg/cloudprovider/azure/block_store_test.go index 6e51be3a2b..e1c986d49c 100644 --- a/pkg/cloudprovider/azure/block_store_test.go +++ b/pkg/cloudprovider/azure/block_store_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "github.com/heptio/ark/pkg/util/collections" + "github.com/heptio/velero/pkg/util/collections" ) func TestGetVolumeID(t *testing.T) { @@ -136,44 +136,44 @@ func TestGetComputeResourceName(t *testing.T) { func TestGetSnapshotTags(t *testing.T) { tests := []struct { - name string - arkTags map[string]string - diskTags *map[string]*string - expected *map[string]*string + name string + veleroTags map[string]string + diskTags *map[string]*string + expected *map[string]*string }{ { - name: "degenerate case (no tags)", - arkTags: nil, - diskTags: nil, - expected: nil, + name: "degenerate case (no tags)", + veleroTags: nil, + diskTags: nil, + expected: nil, }, { - name: "ark tags only get applied", - arkTags: map[string]string{ - "ark-key1": "ark-val1", - "ark-key2": "ark-val2", + name: "velero tags only get applied", + veleroTags: map[string]string{ + "velero-key1": "velero-val1", + "velero-key2": "velero-val2", }, diskTags: nil, expected: &map[string]*string{ - "ark-key1": stringPtr("ark-val1"), - "ark-key2": stringPtr("ark-val2"), + "velero-key1": stringPtr("velero-val1"), + "velero-key2": stringPtr("velero-val2"), }, }, { - name: "slashes in ark tag keys get replaces with dashes", - arkTags: map[string]string{ - "ark/key1": "ark-val1", - "ark/key/2": "ark-val2", + name: "slashes in velero tag keys get replaces with dashes", + veleroTags: map[string]string{ + "velero/key1": "velero-val1", + "velero/key/2": "velero-val2", }, diskTags: nil, expected: &map[string]*string{ - "ark-key1": stringPtr("ark-val1"), - "ark-key-2": stringPtr("ark-val2"), + "velero-key1": stringPtr("velero-val1"), + "velero-key-2": stringPtr("velero-val2"), }, }, { - name: "volume tags only get applied", - arkTags: nil, + name: "volume tags only get applied", + veleroTags: nil, diskTags: &map[string]*string{ "azure-key1": stringPtr("azure-val1"), "azure-key2": stringPtr("azure-val2"), @@ -184,35 +184,35 @@ func TestGetSnapshotTags(t *testing.T) { }, }, { - name: "non-overlapping ark and volume tags both get applied", - arkTags: map[string]string{"ark-key": "ark-val"}, - diskTags: &map[string]*string{"azure-key": stringPtr("azure-val")}, + name: "non-overlapping velero and volume tags both get applied", + veleroTags: map[string]string{"velero-key": "velero-val"}, + diskTags: &map[string]*string{"azure-key": stringPtr("azure-val")}, expected: &map[string]*string{ - "ark-key": stringPtr("ark-val"), - "azure-key": stringPtr("azure-val"), + "velero-key": stringPtr("velero-val"), + "azure-key": stringPtr("azure-val"), }, }, { - name: "when tags overlap, ark tags take precedence", - arkTags: map[string]string{ - "ark-key": "ark-val", - "overlapping-key": "ark-val", + name: "when tags overlap, velero tags take precedence", + veleroTags: map[string]string{ + "velero-key": "velero-val", + "overlapping-key": "velero-val", }, diskTags: &map[string]*string{ "azure-key": stringPtr("azure-val"), "overlapping-key": stringPtr("azure-val"), }, expected: &map[string]*string{ - "ark-key": stringPtr("ark-val"), + "velero-key": stringPtr("velero-val"), "azure-key": stringPtr("azure-val"), - "overlapping-key": stringPtr("ark-val"), + "overlapping-key": stringPtr("velero-val"), }, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - res := getSnapshotTags(test.arkTags, test.diskTags) + res := getSnapshotTags(test.veleroTags, test.diskTags) if test.expected == nil { assert.Nil(t, res) diff --git a/pkg/cloudprovider/azure/object_store.go b/pkg/cloudprovider/azure/object_store.go index 6fe4262e0a..2b3f2e5bb3 100644 --- a/pkg/cloudprovider/azure/object_store.go +++ b/pkg/cloudprovider/azure/object_store.go @@ -29,7 +29,7 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/heptio/ark/pkg/cloudprovider" + "github.com/heptio/velero/pkg/cloudprovider" ) const ( diff --git a/pkg/cloudprovider/block_store.go b/pkg/cloudprovider/block_store.go index 60a2ffdc25..52ccb4c2d0 100644 --- a/pkg/cloudprovider/block_store.go +++ b/pkg/cloudprovider/block_store.go @@ -21,7 +21,7 @@ import ( ) // BlockStore exposes basic block-storage operations required -// by Ark. +// by Velero. type BlockStore interface { // Init prepares the BlockStore for usage using the provided map of // configuration key-value pairs. It returns an error if the BlockStore diff --git a/pkg/cloudprovider/gcp/block_store.go b/pkg/cloudprovider/gcp/block_store.go index 6b738d7ec4..4fa9243c1b 100644 --- a/pkg/cloudprovider/gcp/block_store.go +++ b/pkg/cloudprovider/gcp/block_store.go @@ -32,8 +32,8 @@ import ( "google.golang.org/api/googleapi" "k8s.io/apimachinery/pkg/runtime" - "github.com/heptio/ark/pkg/cloudprovider" - "github.com/heptio/ark/pkg/util/collections" + "github.com/heptio/velero/pkg/cloudprovider" + "github.com/heptio/velero/pkg/util/collections" ) const projectKey = "project" @@ -133,7 +133,7 @@ func (b *blockStore) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ s // tags. // // use the snapshot's description (which contains tags from the snapshotted disk - // plus Ark-specific tags) to set the new disk's description. + // plus Velero-specific tags) to set the new disk's description. disk := &compute.Disk{ Name: "restore-" + uuid.NewV4().String(), SourceSnapshot: res.SelfLink, @@ -243,7 +243,7 @@ func (b *blockStore) createRegionSnapshot(snapshotName, volumeID, volumeRegion s return gceSnap.Name, nil } -func getSnapshotTags(arkTags map[string]string, diskDescription string, log logrus.FieldLogger) string { +func getSnapshotTags(veleroTags map[string]string, diskDescription string, log logrus.FieldLogger) string { // Kubernetes uses the description field of GCP disks to store a JSON doc containing // tags. // @@ -251,15 +251,15 @@ func getSnapshotTags(arkTags map[string]string, diskDescription string, log logr // to set the snapshot's description. var snapshotTags map[string]string if err := json.Unmarshal([]byte(diskDescription), &snapshotTags); err != nil { - // error decoding the disk's description, so just use the Ark-assigned tags + // error decoding the disk's description, so just use the Velero-assigned tags log.WithError(err). - Error("unable to decode disk's description as JSON, so only applying Ark-assigned tags to snapshot") - snapshotTags = arkTags + Error("unable to decode disk's description as JSON, so only applying Velero-assigned tags to snapshot") + snapshotTags = veleroTags } else { - // merge Ark-assigned tags with the disk's tags (note that we want current - // Ark-assigned tags to overwrite any older versions of them that may exist + // merge Velero-assigned tags with the disk's tags (note that we want current + // Velero-assigned tags to overwrite any older versions of them that may exist // due to prior snapshots/restores) - for k, v := range arkTags { + for k, v := range veleroTags { snapshotTags[k] = v } } diff --git a/pkg/cloudprovider/gcp/block_store_test.go b/pkg/cloudprovider/gcp/block_store_test.go index dd6f3cd154..c72e7acc8b 100644 --- a/pkg/cloudprovider/gcp/block_store_test.go +++ b/pkg/cloudprovider/gcp/block_store_test.go @@ -25,8 +25,8 @@ import ( "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "github.com/heptio/ark/pkg/util/collections" - arktest "github.com/heptio/ark/pkg/util/test" + "github.com/heptio/velero/pkg/util/collections" + velerotest "github.com/heptio/velero/pkg/util/test" ) func TestGetVolumeID(t *testing.T) { @@ -83,57 +83,57 @@ func TestSetVolumeID(t *testing.T) { func TestGetSnapshotTags(t *testing.T) { tests := []struct { name string - arkTags map[string]string + veleroTags map[string]string diskDescription string expected string }{ { name: "degenerate case (no tags)", - arkTags: nil, + veleroTags: nil, diskDescription: "", expected: "", }, { - name: "ark tags only get applied", - arkTags: map[string]string{ - "ark-key1": "ark-val1", - "ark-key2": "ark-val2", + name: "velero tags only get applied", + veleroTags: map[string]string{ + "velero-key1": "velero-val1", + "velero-key2": "velero-val2", }, diskDescription: "", - expected: `{"ark-key1":"ark-val1","ark-key2":"ark-val2"}`, + expected: `{"velero-key1":"velero-val1","velero-key2":"velero-val2"}`, }, { name: "disk tags only get applied", - arkTags: nil, + veleroTags: nil, diskDescription: `{"aws-key1":"aws-val1","aws-key2":"aws-val2"}`, expected: `{"aws-key1":"aws-val1","aws-key2":"aws-val2"}`, }, { - name: "non-overlapping ark and disk tags both get applied", - arkTags: map[string]string{"ark-key": "ark-val"}, + name: "non-overlapping velero and disk tags both get applied", + veleroTags: map[string]string{"velero-key": "velero-val"}, diskDescription: `{"aws-key":"aws-val"}`, - expected: `{"ark-key":"ark-val","aws-key":"aws-val"}`, + expected: `{"velero-key":"velero-val","aws-key":"aws-val"}`, }, { - name: "when tags overlap, ark tags take precedence", - arkTags: map[string]string{ - "ark-key": "ark-val", - "overlapping-key": "ark-val", + name: "when tags overlap, velero tags take precedence", + veleroTags: map[string]string{ + "velero-key": "velero-val", + "overlapping-key": "velero-val", }, diskDescription: `{"aws-key":"aws-val","overlapping-key":"aws-val"}`, - expected: `{"ark-key":"ark-val","aws-key":"aws-val","overlapping-key":"ark-val"}`, + expected: `{"velero-key":"velero-val","aws-key":"aws-val","overlapping-key":"velero-val"}`, }, { - name: "if disk description is invalid JSON, apply just ark tags", - arkTags: map[string]string{"ark-key": "ark-val"}, + name: "if disk description is invalid JSON, apply just velero tags", + veleroTags: map[string]string{"velero-key": "velero-val"}, diskDescription: `THIS IS INVALID JSON`, - expected: `{"ark-key":"ark-val"}`, + expected: `{"velero-key":"velero-val"}`, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - res := getSnapshotTags(test.arkTags, test.diskDescription, arktest.NewLogger()) + res := getSnapshotTags(test.veleroTags, test.diskDescription, velerotest.NewLogger()) if test.expected == "" { assert.Equal(t, test.expected, res) diff --git a/pkg/cloudprovider/gcp/object_store.go b/pkg/cloudprovider/gcp/object_store.go index 6033e19143..f9dd7f428f 100644 --- a/pkg/cloudprovider/gcp/object_store.go +++ b/pkg/cloudprovider/gcp/object_store.go @@ -30,7 +30,7 @@ import ( "google.golang.org/api/iterator" "google.golang.org/api/option" - "github.com/heptio/ark/pkg/cloudprovider" + "github.com/heptio/velero/pkg/cloudprovider" ) const credentialsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS" diff --git a/pkg/cloudprovider/gcp/object_store_test.go b/pkg/cloudprovider/gcp/object_store_test.go index 203695a173..1367921ef9 100644 --- a/pkg/cloudprovider/gcp/object_store_test.go +++ b/pkg/cloudprovider/gcp/object_store_test.go @@ -23,7 +23,7 @@ import ( "github.com/stretchr/testify/assert" - arktest "github.com/heptio/ark/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/util/test" ) type mockWriteCloser struct { @@ -89,7 +89,7 @@ func TestPutObject(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { wc := newMockWriteCloser(test.writeErr, test.closeErr) - o := NewObjectStore(arktest.NewLogger()).(*objectStore) + o := NewObjectStore(velerotest.NewLogger()).(*objectStore) o.bucketWriter = newFakeWriter(wc) err := o.PutObject("bucket", "key", strings.NewReader("contents")) diff --git a/pkg/cloudprovider/object_store.go b/pkg/cloudprovider/object_store.go index f849ca7a02..b5edb1068a 100644 --- a/pkg/cloudprovider/object_store.go +++ b/pkg/cloudprovider/object_store.go @@ -22,7 +22,7 @@ import ( ) // ObjectStore exposes basic object-storage operations required -// by Ark. +// by Velero. type ObjectStore interface { // Init prepares the ObjectStore for usage using the provided map of // configuration key-value pairs. It returns an error if the ObjectStore diff --git a/pkg/cmd/cli/backup/backup.go b/pkg/cmd/cli/backup/backup.go index a11d29ccc3..3c723050b6 100644 --- a/pkg/cmd/cli/backup/backup.go +++ b/pkg/cmd/cli/backup/backup.go @@ -19,7 +19,7 @@ package backup import ( "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/client" + "github.com/heptio/velero/pkg/client" ) func NewCommand(f client.Factory) *cobra.Command { diff --git a/pkg/cmd/cli/backup/create.go b/pkg/cmd/cli/backup/create.go index 1b9597be9a..ab441b9e41 100644 --- a/pkg/cmd/cli/backup/create.go +++ b/pkg/cmd/cli/backup/create.go @@ -25,13 +25,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/flag" - "github.com/heptio/ark/pkg/cmd/util/output" - arkclient "github.com/heptio/ark/pkg/generated/clientset/versioned" - "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/flag" + "github.com/heptio/velero/pkg/cmd/util/output" + veleroclient "github.com/heptio/velero/pkg/generated/clientset/versioned" + v1 "github.com/heptio/velero/pkg/generated/informers/externalversions/velero/v1" ) func NewCreateCommand(f client.Factory, use string) *cobra.Command { @@ -71,7 +71,7 @@ type CreateOptions struct { StorageLocation string SnapshotLocations []string - client arkclient.Interface + client veleroclient.Interface } func NewCreateOptions() *CreateOptions { @@ -115,13 +115,13 @@ func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Facto } if o.StorageLocation != "" { - if _, err := o.client.ArkV1().BackupStorageLocations(f.Namespace()).Get(o.StorageLocation, metav1.GetOptions{}); err != nil { + if _, err := o.client.VeleroV1().BackupStorageLocations(f.Namespace()).Get(o.StorageLocation, metav1.GetOptions{}); err != nil { return err } } for _, loc := range o.SnapshotLocations { - if _, err := o.client.ArkV1().VolumeSnapshotLocations(f.Namespace()).Get(loc, metav1.GetOptions{}); err != nil { + if _, err := o.client.VeleroV1().VolumeSnapshotLocations(f.Namespace()).Get(loc, metav1.GetOptions{}); err != nil { return err } } @@ -204,7 +204,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { go backupInformer.Run(stop) } - _, err := o.client.ArkV1().Backups(backup.Namespace).Create(backup) + _, err := o.client.VeleroV1().Backups(backup.Namespace).Create(backup) if err != nil { return err } @@ -226,7 +226,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { } if backup.Status.Phase != api.BackupPhaseNew && backup.Status.Phase != api.BackupPhaseInProgress { - fmt.Printf("\nBackup completed with status: %s. You may check for more information using the commands `ark backup describe %s` and `ark backup logs %s`.\n", backup.Status.Phase, backup.Name, backup.Name) + fmt.Printf("\nBackup completed with status: %s. You may check for more information using the commands `velero backup describe %s` and `velero backup logs %s`.\n", backup.Status.Phase, backup.Name, backup.Name) return nil } } @@ -235,7 +235,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { // Not waiting - fmt.Printf("Run `ark backup describe %s` or `ark backup logs %s` for more details.\n", backup.Name, backup.Name) + fmt.Printf("Run `velero backup describe %s` or `velero backup logs %s` for more details.\n", backup.Name, backup.Name) return nil } diff --git a/pkg/cmd/cli/backup/delete.go b/pkg/cmd/cli/backup/delete.go index 1ca2de406b..5a044dc771 100644 --- a/pkg/cmd/cli/backup/delete.go +++ b/pkg/cmd/cli/backup/delete.go @@ -25,11 +25,11 @@ import ( "k8s.io/apimachinery/pkg/labels" kubeerrs "k8s.io/apimachinery/pkg/util/errors" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/backup" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/cli" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/backup" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/cli" ) // NewDeleteCommand creates a new command that deletes a backup. @@ -40,19 +40,19 @@ func NewDeleteCommand(f client.Factory, use string) *cobra.Command { Use: fmt.Sprintf("%s [NAMES]", use), Short: "Delete backups", Example: ` # delete a backup named "backup-1" - ark backup delete backup-1 + velero backup delete backup-1 # delete a backup named "backup-1" without prompting for confirmation - ark backup delete backup-1 --confirm + velero backup delete backup-1 --confirm # delete backups named "backup-1" and "backup-2" - ark backup delete backup-1 backup-2 + velero backup delete backup-1 backup-2 # delete all backups triggered by schedule "schedule-1" - ark backup delete --selector ark-schedule=schedule-1 + velero backup delete --selector velero.io/schedule-name=schedule-1 # delete all backups - ark backup delete --all + velero backup delete --all `, Run: func(c *cobra.Command, args []string) { cmd.CheckError(o.Complete(f, args)) @@ -74,7 +74,7 @@ func Run(o *cli.DeleteOptions) error { } var ( - backups []*arkv1api.Backup + backups []*velerov1api.Backup errs []error ) @@ -82,7 +82,7 @@ func Run(o *cli.DeleteOptions) error { switch { case len(o.Names) > 0: for _, name := range o.Names { - backup, err := o.Client.ArkV1().Backups(o.Namespace).Get(name, metav1.GetOptions{}) + backup, err := o.Client.VeleroV1().Backups(o.Namespace).Get(name, metav1.GetOptions{}) if err != nil { errs = append(errs, errors.WithStack(err)) continue @@ -96,7 +96,7 @@ func Run(o *cli.DeleteOptions) error { selector = o.Selector.String() } - res, err := o.Client.ArkV1().Backups(o.Namespace).List(metav1.ListOptions{LabelSelector: selector}) + res, err := o.Client.VeleroV1().Backups(o.Namespace).List(metav1.ListOptions{LabelSelector: selector}) if err != nil { return errors.WithStack(err) } @@ -114,7 +114,7 @@ func Run(o *cli.DeleteOptions) error { for _, b := range backups { deleteRequest := backup.NewDeleteBackupRequest(b.Name, string(b.UID)) - if _, err := o.Client.ArkV1().DeleteBackupRequests(o.Namespace).Create(deleteRequest); err != nil { + if _, err := o.Client.VeleroV1().DeleteBackupRequests(o.Namespace).Create(deleteRequest); err != nil { errs = append(errs, err) continue } diff --git a/pkg/cmd/cli/backup/describe.go b/pkg/cmd/cli/backup/describe.go index c1e3ea56ba..00aa3d98b5 100644 --- a/pkg/cmd/cli/backup/describe.go +++ b/pkg/cmd/cli/backup/describe.go @@ -23,12 +23,12 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/heptio/ark/pkg/apis/ark/v1" - pkgbackup "github.com/heptio/ark/pkg/backup" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/output" - "github.com/heptio/ark/pkg/restic" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + pkgbackup "github.com/heptio/velero/pkg/backup" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/output" + "github.com/heptio/velero/pkg/restic" ) func NewDescribeCommand(f client.Factory, use string) *cobra.Command { @@ -41,37 +41,37 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command { Use: use + " [NAME1] [NAME2] [NAME...]", Short: "Describe backups", Run: func(c *cobra.Command, args []string) { - arkClient, err := f.Client() + veleroClient, err := f.Client() cmd.CheckError(err) var backups *v1.BackupList if len(args) > 0 { backups = new(v1.BackupList) for _, name := range args { - backup, err := arkClient.Ark().Backups(f.Namespace()).Get(name, metav1.GetOptions{}) + backup, err := veleroClient.VeleroV1().Backups(f.Namespace()).Get(name, metav1.GetOptions{}) cmd.CheckError(err) backups.Items = append(backups.Items, *backup) } } else { - backups, err = arkClient.ArkV1().Backups(f.Namespace()).List(listOptions) + backups, err = veleroClient.VeleroV1().Backups(f.Namespace()).List(listOptions) cmd.CheckError(err) } first := true for _, backup := range backups.Items { deleteRequestListOptions := pkgbackup.NewDeleteBackupRequestListOptions(backup.Name, string(backup.UID)) - deleteRequestList, err := arkClient.ArkV1().DeleteBackupRequests(f.Namespace()).List(deleteRequestListOptions) + deleteRequestList, err := veleroClient.VeleroV1().DeleteBackupRequests(f.Namespace()).List(deleteRequestListOptions) if err != nil { fmt.Fprintf(os.Stderr, "error getting DeleteBackupRequests for backup %s: %v\n", backup.Name, err) } - opts := restic.NewPodVolumeBackupListOptions(backup.Name, string(backup.UID)) - podVolumeBackupList, err := arkClient.ArkV1().PodVolumeBackups(f.Namespace()).List(opts) + opts := restic.NewPodVolumeBackupListOptions(backup.Name) + podVolumeBackupList, err := veleroClient.VeleroV1().PodVolumeBackups(f.Namespace()).List(opts) if err != nil { fmt.Fprintf(os.Stderr, "error getting PodVolumeBackups for backup %s: %v\n", backup.Name, err) } - s := output.DescribeBackup(&backup, deleteRequestList.Items, podVolumeBackupList.Items, details, arkClient) + s := output.DescribeBackup(&backup, deleteRequestList.Items, podVolumeBackupList.Items, details, veleroClient) if first { first = false fmt.Print(s) diff --git a/pkg/cmd/cli/backup/download.go b/pkg/cmd/cli/backup/download.go index 33c9ac0799..7528ff54cf 100644 --- a/pkg/cmd/cli/backup/download.go +++ b/pkg/cmd/cli/backup/download.go @@ -26,10 +26,10 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" - "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/downloadrequest" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/downloadrequest" ) func NewDownloadCommand(f client.Factory) *cobra.Command { @@ -94,7 +94,7 @@ func (o *DownloadOptions) Complete(args []string) error { } func (o *DownloadOptions) Run(c *cobra.Command, f client.Factory) error { - arkClient, err := f.Client() + veleroClient, err := f.Client() cmd.CheckError(err) backupDest, err := os.OpenFile(o.Output, o.writeOptions, 0600) @@ -103,7 +103,7 @@ func (o *DownloadOptions) Run(c *cobra.Command, f client.Factory) error { } defer backupDest.Close() - err = downloadrequest.Stream(arkClient.ArkV1(), f.Namespace(), o.Name, v1.DownloadTargetKindBackupContents, backupDest, o.Timeout) + err = downloadrequest.Stream(veleroClient.VeleroV1(), f.Namespace(), o.Name, v1.DownloadTargetKindBackupContents, backupDest, o.Timeout) if err != nil { os.Remove(o.Output) cmd.CheckError(err) diff --git a/pkg/cmd/cli/backup/get.go b/pkg/cmd/cli/backup/get.go index 916d59115a..847738f04b 100644 --- a/pkg/cmd/cli/backup/get.go +++ b/pkg/cmd/cli/backup/get.go @@ -20,10 +20,10 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/output" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/output" ) func NewGetCommand(f client.Factory, use string) *cobra.Command { @@ -36,19 +36,19 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command { err := output.ValidateFlags(c) cmd.CheckError(err) - arkClient, err := f.Client() + veleroClient, err := f.Client() cmd.CheckError(err) var backups *api.BackupList if len(args) > 0 { backups = new(api.BackupList) for _, name := range args { - backup, err := arkClient.Ark().Backups(f.Namespace()).Get(name, metav1.GetOptions{}) + backup, err := veleroClient.VeleroV1().Backups(f.Namespace()).Get(name, metav1.GetOptions{}) cmd.CheckError(err) backups.Items = append(backups.Items, *backup) } } else { - backups, err = arkClient.ArkV1().Backups(f.Namespace()).List(listOptions) + backups, err = veleroClient.VeleroV1().Backups(f.Namespace()).List(listOptions) cmd.CheckError(err) } diff --git a/pkg/cmd/cli/backup/logs.go b/pkg/cmd/cli/backup/logs.go index fd43427b0e..4abd9d9bc8 100644 --- a/pkg/cmd/cli/backup/logs.go +++ b/pkg/cmd/cli/backup/logs.go @@ -22,10 +22,10 @@ import ( "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/downloadrequest" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/downloadrequest" ) func NewLogsCommand(f client.Factory) *cobra.Command { @@ -36,10 +36,10 @@ func NewLogsCommand(f client.Factory) *cobra.Command { Short: "Get backup logs", Args: cobra.ExactArgs(1), Run: func(c *cobra.Command, args []string) { - arkClient, err := f.Client() + veleroClient, err := f.Client() cmd.CheckError(err) - err = downloadrequest.Stream(arkClient.ArkV1(), f.Namespace(), args[0], v1.DownloadTargetKindBackupLog, os.Stdout, timeout) + err = downloadrequest.Stream(veleroClient.VeleroV1(), f.Namespace(), args[0], v1.DownloadTargetKindBackupLog, os.Stdout, timeout) cmd.CheckError(err) }, } diff --git a/pkg/cmd/cli/backuplocation/backup_location.go b/pkg/cmd/cli/backuplocation/backup_location.go index 8c0fd8b91e..4a00d7b9be 100644 --- a/pkg/cmd/cli/backuplocation/backup_location.go +++ b/pkg/cmd/cli/backuplocation/backup_location.go @@ -19,7 +19,7 @@ package backuplocation import ( "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/client" + "github.com/heptio/velero/pkg/client" ) func NewCommand(f client.Factory) *cobra.Command { diff --git a/pkg/cmd/cli/backuplocation/create.go b/pkg/cmd/cli/backuplocation/create.go index 9837152200..a86d7f69da 100644 --- a/pkg/cmd/cli/backuplocation/create.go +++ b/pkg/cmd/cli/backuplocation/create.go @@ -24,11 +24,11 @@ import ( "github.com/spf13/pflag" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/flag" - "github.com/heptio/ark/pkg/cmd/util/output" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/flag" + "github.com/heptio/velero/pkg/cmd/util/output" ) func NewCreateCommand(f client.Factory, use string) *cobra.Command { @@ -70,7 +70,7 @@ func NewCreateOptions() *CreateOptions { func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) { flags.StringVar(&o.Provider, "provider", o.Provider, "name of the backup storage provider (e.g. aws, azure, gcp)") flags.StringVar(&o.Bucket, "bucket", o.Bucket, "name of the object storage bucket where backups should be stored") - flags.StringVar(&o.Prefix, "prefix", o.Prefix, "prefix under which all Ark data should be stored within the bucket. Optional.") + flags.StringVar(&o.Prefix, "prefix", o.Prefix, "prefix under which all Velero data should be stored within the bucket. Optional.") flags.Var(&o.Config, "config", "configuration key-value pairs") flags.Var(&o.Labels, "labels", "labels to apply to the backup storage location") } @@ -124,7 +124,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { return err } - if _, err := client.ArkV1().BackupStorageLocations(backupStorageLocation.Namespace).Create(backupStorageLocation); err != nil { + if _, err := client.VeleroV1().BackupStorageLocations(backupStorageLocation.Namespace).Create(backupStorageLocation); err != nil { return errors.WithStack(err) } diff --git a/pkg/cmd/cli/backuplocation/get.go b/pkg/cmd/cli/backuplocation/get.go index 9cef3d3e6a..7db323429c 100644 --- a/pkg/cmd/cli/backuplocation/get.go +++ b/pkg/cmd/cli/backuplocation/get.go @@ -20,10 +20,10 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/output" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/output" ) func NewGetCommand(f client.Factory, use string) *cobra.Command { @@ -36,19 +36,19 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command { err := output.ValidateFlags(c) cmd.CheckError(err) - arkClient, err := f.Client() + veleroClient, err := f.Client() cmd.CheckError(err) var locations *api.BackupStorageLocationList if len(args) > 0 { locations = new(api.BackupStorageLocationList) for _, name := range args { - location, err := arkClient.Ark().BackupStorageLocations(f.Namespace()).Get(name, metav1.GetOptions{}) + location, err := veleroClient.VeleroV1().BackupStorageLocations(f.Namespace()).Get(name, metav1.GetOptions{}) cmd.CheckError(err) locations.Items = append(locations.Items, *location) } } else { - locations, err = arkClient.ArkV1().BackupStorageLocations(f.Namespace()).List(listOptions) + locations, err = veleroClient.VeleroV1().BackupStorageLocations(f.Namespace()).List(listOptions) cmd.CheckError(err) } diff --git a/pkg/cmd/cli/bug/bug.go b/pkg/cmd/cli/bug/bug.go index 33f2c5bc33..a4393f7cea 100644 --- a/pkg/cmd/cli/bug/bug.go +++ b/pkg/cmd/cli/bug/bug.go @@ -30,18 +30,18 @@ import ( "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/buildinfo" - "github.com/heptio/ark/pkg/cmd" + "github.com/heptio/velero/pkg/buildinfo" + "github.com/heptio/velero/pkg/cmd" ) const ( // kubectlTimeout is how long we wait in seconds for `kubectl version` // before killing the process kubectlTimeout = 5 * time.Second - issueURL = "https://github.com/heptio/ark/issues/new" + issueURL = "https://github.com/heptio/velero/issues/new" // IssueTemplate is used to generate .github/ISSUE_TEMPLATE/bug_report.md // as well as the initial text that's place in a new Github issue as - // the result of running `ark bug`. + // the result of running `velero bug`. IssueTemplate = `--- name: Bug report about: Tell us about a problem you are experiencing @@ -58,11 +58,11 @@ about: Tell us about a problem you are experiencing **The output of the following commands will help us better understand what's going on**: (Pasting long output into a [GitHub gist](https://gist.github.com) or other pastebin is fine.) -* ` + "`kubectl logs deployment/ark -n heptio-ark`" + ` -* ` + "`ark backup describe ` or `kubectl get backup/ -n heptio-ark -o yaml`" + ` -* ` + "`ark backup logs `" + ` -* ` + "`ark restore describe ` or `kubectl get restore/ -n heptio-ark -o yaml`" + ` -* ` + "`ark restore logs `" + ` +* ` + "`kubectl logs deployment/velero -n velero`" + ` +* ` + "`velero backup describe ` or `kubectl get backup/ -n velero -o yaml`" + ` +* ` + "`velero backup logs `" + ` +* ` + "`velero restore describe ` or `kubectl get restore/ -n velero -o yaml`" + ` +* ` + "`velero restore logs `" + ` **Anything else you would like to add:** @@ -71,7 +71,7 @@ about: Tell us about a problem you are experiencing **Environment:** -- Ark version (use ` + "`ark version`" + `):{{.ArkVersion}} {{.GitCommit}} +- Velero version (use ` + "`velero version`" + `):{{.VeleroVersion}} {{.GitCommit}} - Kubernetes version (use ` + "`kubectl version`" + `): {{- if .KubectlVersion}} ` + "```" + ` @@ -89,8 +89,8 @@ about: Tell us about a problem you are experiencing func NewCommand() *cobra.Command { c := &cobra.Command{ Use: "bug", - Short: "Report an Ark bug", - Long: "Open a browser window to report an Ark bug", + Short: "Report a Velero bug", + Long: "Open a browser window to report a Velero bug", Run: func(c *cobra.Command, args []string) { kubectlVersion, err := getKubectlVersion() if err != nil { @@ -106,8 +106,8 @@ func NewCommand() *cobra.Command { return c } -type ArkBugInfo struct { - ArkVersion string +type VeleroBugInfo struct { + VeleroVersion string GitCommit string RuntimeOS string RuntimeArch string @@ -158,9 +158,9 @@ func getKubectlVersion() (string, error) { return kubectlVersion, nil } -func newBugInfo(kubectlVersion string) *ArkBugInfo { - return &ArkBugInfo{ - ArkVersion: buildinfo.Version, +func newBugInfo(kubectlVersion string) *VeleroBugInfo { + return &VeleroBugInfo{ + VeleroVersion: buildinfo.Version, GitCommit: buildinfo.FormattedGitSHA(), RuntimeOS: runtime.GOOS, RuntimeArch: runtime.GOARCH, @@ -168,8 +168,8 @@ func newBugInfo(kubectlVersion string) *ArkBugInfo { } // renderToString renders IssueTemplate to a string using the -// supplied *ArkBugInfo -func renderToString(bugInfo *ArkBugInfo) (string, error) { +// supplied *VeleroBugInfo +func renderToString(bugInfo *VeleroBugInfo) (string, error) { outputTemplate, err := template.New("ghissue").Parse(IssueTemplate) if err != nil { return "", err @@ -193,10 +193,10 @@ func showIssueInBrowser(body string) error { if cmdExistsOnPath("xdg-open") { return exec.Command("xdg-open", url).Start() } - return fmt.Errorf("ark can't open a browser window using the command '%s'", "xdg-open") + return fmt.Errorf("velero can't open a browser window using the command '%s'", "xdg-open") case "windows": return exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start() default: - return fmt.Errorf("ark can't open a browser window on platform %s", runtime.GOOS) + return fmt.Errorf("velero can't open a browser window on platform %s", runtime.GOOS) } } diff --git a/pkg/cmd/cli/client/client.go b/pkg/cmd/cli/client/client.go index a4fd303b42..ba71677830 100644 --- a/pkg/cmd/cli/client/client.go +++ b/pkg/cmd/cli/client/client.go @@ -19,13 +19,13 @@ package client import ( "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/cmd/cli/client/config" + "github.com/heptio/velero/pkg/cmd/cli/client/config" ) func NewCommand() *cobra.Command { c := &cobra.Command{ Use: "client", - Short: "Ark client related commands", + Short: "Velero client related commands", } c.AddCommand( diff --git a/pkg/cmd/cli/client/config/get.go b/pkg/cmd/cli/client/config/get.go index a46a123f53..3639317101 100644 --- a/pkg/cmd/cli/client/config/get.go +++ b/pkg/cmd/cli/client/config/get.go @@ -22,8 +22,8 @@ import ( "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" ) func NewGetCommand() *cobra.Command { diff --git a/pkg/cmd/cli/client/config/set.go b/pkg/cmd/cli/client/config/set.go index 85c6536ebd..eb42843e9a 100644 --- a/pkg/cmd/cli/client/config/set.go +++ b/pkg/cmd/cli/client/config/set.go @@ -23,8 +23,8 @@ import ( "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" ) func NewSetCommand() *cobra.Command { diff --git a/pkg/cmd/cli/completion/completion.go b/pkg/cmd/cli/completion/completion.go index 6120473185..24ffd65e49 100644 --- a/pkg/cmd/cli/completion/completion.go +++ b/pkg/cmd/cli/completion/completion.go @@ -22,7 +22,7 @@ import ( "github.com/spf13/cobra" - kubectlcmd "github.com/heptio/ark/third_party/kubernetes/pkg/kubectl/cmd" + kubectlcmd "github.com/heptio/velero/third_party/kubernetes/pkg/kubectl/cmd" ) func NewCommand() *cobra.Command { @@ -33,11 +33,11 @@ func NewCommand() *cobra.Command { Auto completion supports both bash and zsh. Output is to STDOUT. -Load the ark completion code for bash into the current shell - -source <(ark completion bash) +Load the velero completion code for bash into the current shell - +source <(velero completion bash) -Load the ark completion code for zsh into the current shell - -source <(ark completion zsh) +Load the velero completion code for zsh into the current shell - +source <(velero completion zsh) `, Args: cobra.ExactArgs(1), ValidArgs: []string{"bash", "zsh"}, diff --git a/pkg/cmd/cli/create/create.go b/pkg/cmd/cli/create/create.go index 83881dbdff..12afd12a38 100644 --- a/pkg/cmd/cli/create/create.go +++ b/pkg/cmd/cli/create/create.go @@ -19,18 +19,18 @@ package create import ( "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd/cli/backup" - "github.com/heptio/ark/pkg/cmd/cli/backuplocation" - "github.com/heptio/ark/pkg/cmd/cli/restore" - "github.com/heptio/ark/pkg/cmd/cli/schedule" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd/cli/backup" + "github.com/heptio/velero/pkg/cmd/cli/backuplocation" + "github.com/heptio/velero/pkg/cmd/cli/restore" + "github.com/heptio/velero/pkg/cmd/cli/schedule" ) func NewCommand(f client.Factory) *cobra.Command { c := &cobra.Command{ Use: "create", - Short: "Create ark resources", - Long: "Create ark resources", + Short: "Create velero resources", + Long: "Create velero resources", } c.AddCommand( diff --git a/pkg/cmd/cli/delete/delete.go b/pkg/cmd/cli/delete/delete.go index c9616c8871..6e392db6fb 100644 --- a/pkg/cmd/cli/delete/delete.go +++ b/pkg/cmd/cli/delete/delete.go @@ -19,17 +19,17 @@ package delete import ( "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd/cli/backup" - "github.com/heptio/ark/pkg/cmd/cli/restore" - "github.com/heptio/ark/pkg/cmd/cli/schedule" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd/cli/backup" + "github.com/heptio/velero/pkg/cmd/cli/restore" + "github.com/heptio/velero/pkg/cmd/cli/schedule" ) func NewCommand(f client.Factory) *cobra.Command { c := &cobra.Command{ Use: "delete", - Short: "Delete ark resources", - Long: "Delete ark resources", + Short: "Delete velero resources", + Long: "Delete velero resources", } backupCommand := backup.NewDeleteCommand(f, "backup") diff --git a/pkg/cmd/cli/delete_options.go b/pkg/cmd/cli/delete_options.go index d3807c057b..ba28dbf723 100644 --- a/pkg/cmd/cli/delete_options.go +++ b/pkg/cmd/cli/delete_options.go @@ -26,9 +26,9 @@ import ( "github.com/spf13/cobra" "github.com/spf13/pflag" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd/util/flag" - clientset "github.com/heptio/ark/pkg/generated/clientset/versioned" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd/util/flag" + clientset "github.com/heptio/velero/pkg/generated/clientset/versioned" ) // DeleteOptions contains parameters used for deleting a restore. @@ -63,7 +63,7 @@ func (o *DeleteOptions) Complete(f client.Factory, args []string) error { // Validate validates the fields of the DeleteOptions struct. func (o *DeleteOptions) Validate(c *cobra.Command, f client.Factory, args []string) error { if o.Client == nil { - return errors.New("Ark client is not set; unable to proceed") + return errors.New("Velero client is not set; unable to proceed") } var ( hasNames = len(o.Names) > 0 diff --git a/pkg/cmd/cli/describe/describe.go b/pkg/cmd/cli/describe/describe.go index 8290fdfd17..000e585fe5 100644 --- a/pkg/cmd/cli/describe/describe.go +++ b/pkg/cmd/cli/describe/describe.go @@ -19,17 +19,17 @@ package describe import ( "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd/cli/backup" - "github.com/heptio/ark/pkg/cmd/cli/restore" - "github.com/heptio/ark/pkg/cmd/cli/schedule" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd/cli/backup" + "github.com/heptio/velero/pkg/cmd/cli/restore" + "github.com/heptio/velero/pkg/cmd/cli/schedule" ) func NewCommand(f client.Factory) *cobra.Command { c := &cobra.Command{ Use: "describe", - Short: "Describe ark resources", - Long: "Describe ark resources", + Short: "Describe velero resources", + Long: "Describe velero resources", } backupCommand := backup.NewDescribeCommand(f, "backups") diff --git a/pkg/cmd/cli/get/get.go b/pkg/cmd/cli/get/get.go index 1bfc7c0f5e..c612aae8b4 100644 --- a/pkg/cmd/cli/get/get.go +++ b/pkg/cmd/cli/get/get.go @@ -19,19 +19,19 @@ package get import ( "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd/cli/backup" - "github.com/heptio/ark/pkg/cmd/cli/backuplocation" - "github.com/heptio/ark/pkg/cmd/cli/restore" - "github.com/heptio/ark/pkg/cmd/cli/schedule" - "github.com/heptio/ark/pkg/cmd/cli/snapshotlocation" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd/cli/backup" + "github.com/heptio/velero/pkg/cmd/cli/backuplocation" + "github.com/heptio/velero/pkg/cmd/cli/restore" + "github.com/heptio/velero/pkg/cmd/cli/schedule" + "github.com/heptio/velero/pkg/cmd/cli/snapshotlocation" ) func NewCommand(f client.Factory) *cobra.Command { c := &cobra.Command{ Use: "get", - Short: "Get ark resources", - Long: "Get ark resources", + Short: "Get velero resources", + Long: "Get velero resources", } backupCommand := backup.NewGetCommand(f, "backups") diff --git a/pkg/cmd/cli/plugin/add.go b/pkg/cmd/cli/plugin/add.go index 25c2aed8de..847937e70c 100644 --- a/pkg/cmd/cli/plugin/add.go +++ b/pkg/cmd/cli/plugin/add.go @@ -28,15 +28,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/flag" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/flag" ) const ( pluginsVolumeName = "plugins" - arkDeployment = "ark" - arkContainer = "ark" + veleroDeployment = "velero" + veleroContainer = "velero" ) func NewAddCommand(f client.Factory) *cobra.Command { @@ -55,17 +55,17 @@ func NewAddCommand(f client.Factory) *cobra.Command { cmd.CheckError(err) } - arkDeploy, err := kubeClient.AppsV1beta1().Deployments(f.Namespace()).Get(arkDeployment, metav1.GetOptions{}) + veleroDeploy, err := kubeClient.AppsV1beta1().Deployments(f.Namespace()).Get(veleroDeployment, metav1.GetOptions{}) if err != nil { cmd.CheckError(err) } - original, err := json.Marshal(arkDeploy) + original, err := json.Marshal(veleroDeploy) cmd.CheckError(err) // ensure the plugins volume & mount exist volumeExists := false - for _, volume := range arkDeploy.Spec.Template.Spec.Volumes { + for _, volume := range veleroDeploy.Spec.Template.Spec.Volumes { if volume.Name == pluginsVolumeName { volumeExists = true break @@ -85,19 +85,19 @@ func NewAddCommand(f client.Factory) *cobra.Command { MountPath: "/plugins", } - arkDeploy.Spec.Template.Spec.Volumes = append(arkDeploy.Spec.Template.Spec.Volumes, volume) + veleroDeploy.Spec.Template.Spec.Volumes = append(veleroDeploy.Spec.Template.Spec.Volumes, volume) - containers := arkDeploy.Spec.Template.Spec.Containers + containers := veleroDeploy.Spec.Template.Spec.Containers containerIndex := -1 for x, container := range containers { - if container.Name == arkContainer { + if container.Name == veleroContainer { containerIndex = x break } } if containerIndex < 0 { - cmd.CheckError(errors.New("ark container not found in ark deployment")) + cmd.CheckError(errors.New("velero container not found in velero deployment")) } containers[containerIndex].VolumeMounts = append(containers[containerIndex].VolumeMounts, volumeMount) @@ -116,16 +116,16 @@ func NewAddCommand(f client.Factory) *cobra.Command { }, } - arkDeploy.Spec.Template.Spec.InitContainers = append(arkDeploy.Spec.Template.Spec.InitContainers, plugin) + veleroDeploy.Spec.Template.Spec.InitContainers = append(veleroDeploy.Spec.Template.Spec.InitContainers, plugin) // create & apply the patch - updated, err := json.Marshal(arkDeploy) + updated, err := json.Marshal(veleroDeploy) cmd.CheckError(err) patchBytes, err := jsonpatch.CreateMergePatch(original, updated) cmd.CheckError(err) - _, err = kubeClient.AppsV1beta1().Deployments(arkDeploy.Namespace).Patch(arkDeploy.Name, types.MergePatchType, patchBytes) + _, err = kubeClient.AppsV1beta1().Deployments(veleroDeploy.Namespace).Patch(veleroDeploy.Name, types.MergePatchType, patchBytes) cmd.CheckError(err) }, } diff --git a/pkg/cmd/cli/plugin/plugin.go b/pkg/cmd/cli/plugin/plugin.go index 918ec4e7d5..e42465cdf7 100644 --- a/pkg/cmd/cli/plugin/plugin.go +++ b/pkg/cmd/cli/plugin/plugin.go @@ -19,7 +19,7 @@ package plugin import ( "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/client" + "github.com/heptio/velero/pkg/client" ) func NewCommand(f client.Factory) *cobra.Command { diff --git a/pkg/cmd/cli/plugin/remove.go b/pkg/cmd/cli/plugin/remove.go index 4f1b93b85c..6bbe1e26cb 100644 --- a/pkg/cmd/cli/plugin/remove.go +++ b/pkg/cmd/cli/plugin/remove.go @@ -25,8 +25,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" ) func NewRemoveCommand(f client.Factory) *cobra.Command { @@ -40,16 +40,16 @@ func NewRemoveCommand(f client.Factory) *cobra.Command { cmd.CheckError(err) } - arkDeploy, err := kubeClient.AppsV1beta1().Deployments(f.Namespace()).Get(arkDeployment, metav1.GetOptions{}) + veleroDeploy, err := kubeClient.AppsV1beta1().Deployments(f.Namespace()).Get(veleroDeployment, metav1.GetOptions{}) if err != nil { cmd.CheckError(err) } - original, err := json.Marshal(arkDeploy) + original, err := json.Marshal(veleroDeploy) cmd.CheckError(err) var ( - initContainers = arkDeploy.Spec.Template.Spec.InitContainers + initContainers = veleroDeploy.Spec.Template.Spec.InitContainers index = -1 ) @@ -61,18 +61,18 @@ func NewRemoveCommand(f client.Factory) *cobra.Command { } if index == -1 { - cmd.CheckError(errors.Errorf("init container %s not found in Ark server deployment", args[0])) + cmd.CheckError(errors.Errorf("init container %s not found in Velero server deployment", args[0])) } - arkDeploy.Spec.Template.Spec.InitContainers = append(initContainers[0:index], initContainers[index+1:]...) + veleroDeploy.Spec.Template.Spec.InitContainers = append(initContainers[0:index], initContainers[index+1:]...) - updated, err := json.Marshal(arkDeploy) + updated, err := json.Marshal(veleroDeploy) cmd.CheckError(err) patchBytes, err := jsonpatch.CreateMergePatch(original, updated) cmd.CheckError(err) - _, err = kubeClient.AppsV1beta1().Deployments(arkDeploy.Namespace).Patch(arkDeploy.Name, types.MergePatchType, patchBytes) + _, err = kubeClient.AppsV1beta1().Deployments(veleroDeploy.Namespace).Patch(veleroDeploy.Name, types.MergePatchType, patchBytes) cmd.CheckError(err) }, } diff --git a/pkg/cmd/cli/restic/repo/get.go b/pkg/cmd/cli/restic/repo/get.go index bb452fbcd6..39d0edc587 100644 --- a/pkg/cmd/cli/restic/repo/get.go +++ b/pkg/cmd/cli/restic/repo/get.go @@ -20,10 +20,10 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/output" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/output" ) func NewGetCommand(f client.Factory, use string) *cobra.Command { @@ -36,19 +36,19 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command { err := output.ValidateFlags(c) cmd.CheckError(err) - arkClient, err := f.Client() + veleroClient, err := f.Client() cmd.CheckError(err) var repos *api.ResticRepositoryList if len(args) > 0 { repos = new(api.ResticRepositoryList) for _, name := range args { - repo, err := arkClient.Ark().ResticRepositories(f.Namespace()).Get(name, metav1.GetOptions{}) + repo, err := veleroClient.VeleroV1().ResticRepositories(f.Namespace()).Get(name, metav1.GetOptions{}) cmd.CheckError(err) repos.Items = append(repos.Items, *repo) } } else { - repos, err = arkClient.ArkV1().ResticRepositories(f.Namespace()).List(listOptions) + repos, err = veleroClient.VeleroV1().ResticRepositories(f.Namespace()).List(listOptions) cmd.CheckError(err) } diff --git a/pkg/cmd/cli/restic/repo/repo.go b/pkg/cmd/cli/restic/repo/repo.go index e8b9bcaeb0..a66fe9d475 100644 --- a/pkg/cmd/cli/restic/repo/repo.go +++ b/pkg/cmd/cli/restic/repo/repo.go @@ -19,7 +19,7 @@ package repo import ( "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/client" + "github.com/heptio/velero/pkg/client" ) func NewRepositoryCommand(f client.Factory) *cobra.Command { diff --git a/pkg/cmd/cli/restic/restic.go b/pkg/cmd/cli/restic/restic.go index 171f6e81e2..e03821653b 100644 --- a/pkg/cmd/cli/restic/restic.go +++ b/pkg/cmd/cli/restic/restic.go @@ -19,8 +19,8 @@ package restic import ( "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd/cli/restic/repo" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd/cli/restic/repo" ) func NewCommand(f client.Factory) *cobra.Command { diff --git a/pkg/cmd/cli/restic/server.go b/pkg/cmd/cli/restic/server.go index 9847e419c2..dbf5aa4f36 100644 --- a/pkg/cmd/cli/restic/server.go +++ b/pkg/cmd/cli/restic/server.go @@ -31,15 +31,15 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" - "github.com/heptio/ark/pkg/buildinfo" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/signals" - "github.com/heptio/ark/pkg/controller" - clientset "github.com/heptio/ark/pkg/generated/clientset/versioned" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions" - "github.com/heptio/ark/pkg/restic" - "github.com/heptio/ark/pkg/util/logging" + "github.com/heptio/velero/pkg/buildinfo" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/signals" + "github.com/heptio/velero/pkg/controller" + clientset "github.com/heptio/velero/pkg/generated/clientset/versioned" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions" + "github.com/heptio/velero/pkg/restic" + "github.com/heptio/velero/pkg/util/logging" ) func NewServerCommand(f client.Factory) *cobra.Command { @@ -47,14 +47,14 @@ func NewServerCommand(f client.Factory) *cobra.Command { command := &cobra.Command{ Use: "server", - Short: "Run the ark restic server", - Long: "Run the ark restic server", + Short: "Run the velero restic server", + Long: "Run the velero restic server", Run: func(c *cobra.Command, args []string) { logLevel := logLevelFlag.Parse() logrus.Infof("Setting log-level to %s", strings.ToUpper(logLevel.String())) logger := logging.DefaultLogger(logLevel) - logger.Infof("Starting Ark restic server %s", buildinfo.FormattedGitSHA()) + logger.Infof("Starting Velero restic server %s", buildinfo.FormattedGitSHA()) s, err := newResticServer(logger, fmt.Sprintf("%s-%s", c.Parent().Name(), c.Name())) cmd.CheckError(err) @@ -69,15 +69,15 @@ func NewServerCommand(f client.Factory) *cobra.Command { } type resticServer struct { - kubeClient kubernetes.Interface - arkClient clientset.Interface - arkInformerFactory informers.SharedInformerFactory - kubeInformerFactory kubeinformers.SharedInformerFactory - podInformer cache.SharedIndexInformer - secretInformer cache.SharedIndexInformer - logger logrus.FieldLogger - ctx context.Context - cancelFunc context.CancelFunc + kubeClient kubernetes.Interface + veleroClient clientset.Interface + veleroInformerFactory informers.SharedInformerFactory + kubeInformerFactory kubeinformers.SharedInformerFactory + podInformer cache.SharedIndexInformer + secretInformer cache.SharedIndexInformer + logger logrus.FieldLogger + ctx context.Context + cancelFunc context.CancelFunc } func newResticServer(logger logrus.FieldLogger, baseName string) (*resticServer, error) { @@ -91,7 +91,7 @@ func newResticServer(logger logrus.FieldLogger, baseName string) (*resticServer, return nil, errors.WithStack(err) } - arkClient, err := clientset.NewForConfig(clientConfig) + veleroClient, err := clientset.NewForConfig(clientConfig) if err != nil { return nil, errors.WithStack(err) } @@ -109,14 +109,14 @@ func newResticServer(logger logrus.FieldLogger, baseName string) (*resticServer, ) // use a stand-alone secrets informer so we can filter to only the restic credentials - // secret(s) within the heptio-ark namespace + // secret(s) within the velero namespace // - // note: using an informer to access the single secret for all ark-managed + // note: using an informer to access the single secret for all velero-managed // restic repositories is overkill for now, but will be useful when we move // to fully-encrypted backups and have unique keys per repository. secretInformer := corev1informers.NewFilteredSecretInformer( kubeClient, - os.Getenv("HEPTIO_ARK_NAMESPACE"), + os.Getenv("VELERO_NAMESPACE"), 0, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, func(opts *metav1.ListOptions) { @@ -127,15 +127,15 @@ func newResticServer(logger logrus.FieldLogger, baseName string) (*resticServer, ctx, cancelFunc := context.WithCancel(context.Background()) return &resticServer{ - kubeClient: kubeClient, - arkClient: arkClient, - arkInformerFactory: informers.NewFilteredSharedInformerFactory(arkClient, 0, os.Getenv("HEPTIO_ARK_NAMESPACE"), nil), - kubeInformerFactory: kubeinformers.NewSharedInformerFactory(kubeClient, 0), - podInformer: podInformer, - secretInformer: secretInformer, - logger: logger, - ctx: ctx, - cancelFunc: cancelFunc, + kubeClient: kubeClient, + veleroClient: veleroClient, + veleroInformerFactory: informers.NewFilteredSharedInformerFactory(veleroClient, 0, os.Getenv("VELERO_NAMESPACE"), nil), + kubeInformerFactory: kubeinformers.NewSharedInformerFactory(kubeClient, 0), + podInformer: podInformer, + secretInformer: secretInformer, + logger: logger, + ctx: ctx, + cancelFunc: cancelFunc, }, nil } @@ -148,12 +148,12 @@ func (s *resticServer) run() { backupController := controller.NewPodVolumeBackupController( s.logger, - s.arkInformerFactory.Ark().V1().PodVolumeBackups(), - s.arkClient.ArkV1(), + s.veleroInformerFactory.Velero().V1().PodVolumeBackups(), + s.veleroClient.VeleroV1(), s.podInformer, s.secretInformer, s.kubeInformerFactory.Core().V1().PersistentVolumeClaims(), - s.arkInformerFactory.Ark().V1().BackupStorageLocations(), + s.veleroInformerFactory.Velero().V1().BackupStorageLocations(), os.Getenv("NODE_NAME"), ) wg.Add(1) @@ -164,12 +164,12 @@ func (s *resticServer) run() { restoreController := controller.NewPodVolumeRestoreController( s.logger, - s.arkInformerFactory.Ark().V1().PodVolumeRestores(), - s.arkClient.ArkV1(), + s.veleroInformerFactory.Velero().V1().PodVolumeRestores(), + s.veleroClient.VeleroV1(), s.podInformer, s.secretInformer, s.kubeInformerFactory.Core().V1().PersistentVolumeClaims(), - s.arkInformerFactory.Ark().V1().BackupStorageLocations(), + s.veleroInformerFactory.Velero().V1().BackupStorageLocations(), os.Getenv("NODE_NAME"), ) wg.Add(1) @@ -178,7 +178,7 @@ func (s *resticServer) run() { restoreController.Run(s.ctx, 1) }() - go s.arkInformerFactory.Start(s.ctx.Done()) + go s.veleroInformerFactory.Start(s.ctx.Done()) go s.kubeInformerFactory.Start(s.ctx.Done()) go s.podInformer.Run(s.ctx.Done()) go s.secretInformer.Run(s.ctx.Done()) diff --git a/pkg/cmd/cli/restore/create.go b/pkg/cmd/cli/restore/create.go index 9a4e9e6f52..a3e17c9d4e 100644 --- a/pkg/cmd/cli/restore/create.go +++ b/pkg/cmd/cli/restore/create.go @@ -26,13 +26,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/flag" - "github.com/heptio/ark/pkg/cmd/util/output" - arkclient "github.com/heptio/ark/pkg/generated/clientset/versioned" - "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/flag" + "github.com/heptio/velero/pkg/cmd/util/output" + veleroclient "github.com/heptio/velero/pkg/generated/clientset/versioned" + v1 "github.com/heptio/velero/pkg/generated/informers/externalversions/velero/v1" ) func NewCreateCommand(f client.Factory, use string) *cobra.Command { @@ -42,13 +42,13 @@ func NewCreateCommand(f client.Factory, use string) *cobra.Command { Use: use + " [RESTORE_NAME] [--from-backup BACKUP_NAME | --from-schedule SCHEDULE_NAME]", Short: "Create a restore", Example: ` # create a restore named "restore-1" from backup "backup-1" - ark restore create restore-1 --from-backup backup-1 + velero restore create restore-1 --from-backup backup-1 # create a restore with a default name ("backup-1-") from backup "backup-1" - ark restore create --from-backup backup-1 + velero restore create --from-backup backup-1 # create a restore from the latest successful backup triggered by schedule "schedule-1" - ark restore create --from-schedule schedule-1 + velero restore create --from-schedule schedule-1 `, Args: cobra.MaximumNArgs(1), Run: func(c *cobra.Command, args []string) { @@ -80,7 +80,7 @@ type CreateOptions struct { IncludeClusterResources flag.OptionalBool Wait bool - client arkclient.Interface + client veleroclient.Interface } func NewCreateOptions() *CreateOptions { @@ -150,16 +150,16 @@ func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Facto if o.client == nil { // This should never happen - return errors.New("Ark client is not set; unable to proceed") + return errors.New("Velero client is not set; unable to proceed") } switch { case o.BackupName != "": - if _, err := o.client.ArkV1().Backups(f.Namespace()).Get(o.BackupName, metav1.GetOptions{}); err != nil { + if _, err := o.client.VeleroV1().Backups(f.Namespace()).Get(o.BackupName, metav1.GetOptions{}); err != nil { return err } case o.ScheduleName != "": - if _, err := o.client.ArkV1().Schedules(f.Namespace()).Get(o.ScheduleName, metav1.GetOptions{}); err != nil { + if _, err := o.client.VeleroV1().Schedules(f.Namespace()).Get(o.ScheduleName, metav1.GetOptions{}); err != nil { return err } } @@ -170,7 +170,7 @@ func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Facto func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { if o.client == nil { // This should never happen - return errors.New("Ark client is not set; unable to proceed") + return errors.New("Velero client is not set; unable to proceed") } restore := &api.Restore{ @@ -237,7 +237,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { go restoreInformer.Run(stop) } - restore, err := o.client.ArkV1().Restores(restore.Namespace).Create(restore) + restore, err := o.client.VeleroV1().Restores(restore.Namespace).Create(restore) if err != nil { return err } @@ -259,7 +259,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { } if restore.Status.Phase != api.RestorePhaseNew && restore.Status.Phase != api.RestorePhaseInProgress { - fmt.Printf("\nRestore completed with status: %s. You may check for more information using the commands `ark restore describe %s` and `ark restore logs %s`.\n", restore.Status.Phase, restore.Name, restore.Name) + fmt.Printf("\nRestore completed with status: %s. You may check for more information using the commands `velero restore describe %s` and `velero restore logs %s`.\n", restore.Status.Phase, restore.Name, restore.Name) return nil } } @@ -268,7 +268,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { // Not waiting - fmt.Printf("Run `ark restore describe %s` or `ark restore logs %s` for more details.\n", restore.Name, restore.Name) + fmt.Printf("Run `velero restore describe %s` or `velero restore logs %s` for more details.\n", restore.Name, restore.Name) return nil } diff --git a/pkg/cmd/cli/restore/delete.go b/pkg/cmd/cli/restore/delete.go index 824eacdc58..7cc5b3a41a 100644 --- a/pkg/cmd/cli/restore/delete.go +++ b/pkg/cmd/cli/restore/delete.go @@ -25,10 +25,10 @@ import ( "k8s.io/apimachinery/pkg/labels" kubeerrs "k8s.io/apimachinery/pkg/util/errors" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/cli" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/cli" ) // NewDeleteCommand creates and returns a new cobra command for deleting restores. @@ -39,19 +39,19 @@ func NewDeleteCommand(f client.Factory, use string) *cobra.Command { Use: fmt.Sprintf("%s [NAMES]", use), Short: "Delete restores", Example: ` # delete a restore named "restore-1" - ark restore delete restore-1 + velero restore delete restore-1 # delete a restore named "restore-1" without prompting for confirmation - ark restore delete restore-1 --confirm + velero restore delete restore-1 --confirm # delete restores named "restore-1" and "restore-2" - ark restore delete restore-1 restore-2 + velero restore delete restore-1 restore-2 # delete all restores labelled with foo=bar" - ark restore delete --selector foo=bar + velero restore delete --selector foo=bar # delete all restores - ark restore delete --all`, + velero restore delete --all`, Run: func(c *cobra.Command, args []string) { cmd.CheckError(o.Complete(f, args)) @@ -70,14 +70,14 @@ func Run(o *cli.DeleteOptions) error { return nil } var ( - restores []*arkv1api.Restore + restores []*velerov1api.Restore errs []error ) switch { case len(o.Names) > 0: for _, name := range o.Names { - restore, err := o.Client.ArkV1().Restores(o.Namespace).Get(name, metav1.GetOptions{}) + restore, err := o.Client.VeleroV1().Restores(o.Namespace).Get(name, metav1.GetOptions{}) if err != nil { errs = append(errs, errors.WithStack(err)) continue @@ -89,7 +89,7 @@ func Run(o *cli.DeleteOptions) error { if o.Selector.LabelSelector != nil { selector = o.Selector.String() } - res, err := o.Client.ArkV1().Restores(o.Namespace).List(metav1.ListOptions{ + res, err := o.Client.VeleroV1().Restores(o.Namespace).List(metav1.ListOptions{ LabelSelector: selector, }) if err != nil { @@ -105,7 +105,7 @@ func Run(o *cli.DeleteOptions) error { return nil } for _, r := range restores { - err := o.Client.ArkV1().Restores(r.Namespace).Delete(r.Name, nil) + err := o.Client.VeleroV1().Restores(r.Namespace).Delete(r.Name, nil) if err != nil { errs = append(errs, errors.WithStack(err)) continue diff --git a/pkg/cmd/cli/restore/describe.go b/pkg/cmd/cli/restore/describe.go index 8b5974f60c..6137b41dd9 100644 --- a/pkg/cmd/cli/restore/describe.go +++ b/pkg/cmd/cli/restore/describe.go @@ -23,11 +23,11 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/output" - "github.com/heptio/ark/pkg/restic" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/output" + "github.com/heptio/velero/pkg/restic" ) func NewDescribeCommand(f client.Factory, use string) *cobra.Command { @@ -40,31 +40,31 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command { Use: use + " [NAME1] [NAME2] [NAME...]", Short: "Describe restores", Run: func(c *cobra.Command, args []string) { - arkClient, err := f.Client() + veleroClient, err := f.Client() cmd.CheckError(err) var restores *api.RestoreList if len(args) > 0 { restores = new(api.RestoreList) for _, name := range args { - restore, err := arkClient.Ark().Restores(f.Namespace()).Get(name, metav1.GetOptions{}) + restore, err := veleroClient.VeleroV1().Restores(f.Namespace()).Get(name, metav1.GetOptions{}) cmd.CheckError(err) restores.Items = append(restores.Items, *restore) } } else { - restores, err = arkClient.ArkV1().Restores(f.Namespace()).List(listOptions) + restores, err = veleroClient.VeleroV1().Restores(f.Namespace()).List(listOptions) cmd.CheckError(err) } first := true for _, restore := range restores.Items { - opts := restic.NewPodVolumeRestoreListOptions(restore.Name, string(restore.UID)) - podvolumeRestoreList, err := arkClient.ArkV1().PodVolumeRestores(f.Namespace()).List(opts) + opts := restic.NewPodVolumeRestoreListOptions(restore.Name) + podvolumeRestoreList, err := veleroClient.VeleroV1().PodVolumeRestores(f.Namespace()).List(opts) if err != nil { fmt.Fprintf(os.Stderr, "error getting PodVolumeRestores for restore %s: %v\n", restore.Name, err) } - s := output.DescribeRestore(&restore, podvolumeRestoreList.Items, details, arkClient) + s := output.DescribeRestore(&restore, podvolumeRestoreList.Items, details, veleroClient) if first { first = false fmt.Print(s) diff --git a/pkg/cmd/cli/restore/get.go b/pkg/cmd/cli/restore/get.go index 0472c59cd0..ffcda39f61 100644 --- a/pkg/cmd/cli/restore/get.go +++ b/pkg/cmd/cli/restore/get.go @@ -20,10 +20,10 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/output" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/output" ) func NewGetCommand(f client.Factory, use string) *cobra.Command { @@ -36,19 +36,19 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command { err := output.ValidateFlags(c) cmd.CheckError(err) - arkClient, err := f.Client() + veleroClient, err := f.Client() cmd.CheckError(err) var restores *api.RestoreList if len(args) > 0 { restores = new(api.RestoreList) for _, name := range args { - restore, err := arkClient.Ark().Restores(f.Namespace()).Get(name, metav1.GetOptions{}) + restore, err := veleroClient.VeleroV1().Restores(f.Namespace()).Get(name, metav1.GetOptions{}) cmd.CheckError(err) restores.Items = append(restores.Items, *restore) } } else { - restores, err = arkClient.ArkV1().Restores(f.Namespace()).List(listOptions) + restores, err = veleroClient.VeleroV1().Restores(f.Namespace()).List(listOptions) cmd.CheckError(err) } diff --git a/pkg/cmd/cli/restore/logs.go b/pkg/cmd/cli/restore/logs.go index e7454b1c9c..a953205c7f 100644 --- a/pkg/cmd/cli/restore/logs.go +++ b/pkg/cmd/cli/restore/logs.go @@ -24,11 +24,11 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/downloadrequest" - arkclient "github.com/heptio/ark/pkg/generated/clientset/versioned" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/downloadrequest" + veleroclient "github.com/heptio/velero/pkg/generated/clientset/versioned" ) func NewLogsCommand(f client.Factory) *cobra.Command { @@ -42,9 +42,9 @@ func NewLogsCommand(f client.Factory) *cobra.Command { l := NewLogsOptions() cmd.CheckError(l.Complete(args)) cmd.CheckError(l.Validate(f)) - arkClient, err := f.Client() + veleroClient, err := f.Client() cmd.CheckError(err) - err = downloadrequest.Stream(arkClient.ArkV1(), f.Namespace(), args[0], v1.DownloadTargetKindRestoreLog, os.Stdout, timeout) + err = downloadrequest.Stream(veleroClient.VeleroV1(), f.Namespace(), args[0], v1.DownloadTargetKindRestoreLog, os.Stdout, timeout) cmd.CheckError(err) }, } @@ -58,7 +58,7 @@ func NewLogsCommand(f client.Factory) *cobra.Command { type LogsOptions struct { RestoreName string - client arkclient.Interface + client veleroclient.Interface } // NewLogsOptions returns a new instance of LogsOptions @@ -82,7 +82,7 @@ func (l *LogsOptions) Validate(f client.Factory) error { } l.client = c - r, err := l.client.ArkV1().Restores(f.Namespace()).Get(l.RestoreName, metav1.GetOptions{}) + r, err := l.client.VeleroV1().Restores(f.Namespace()).Get(l.RestoreName, metav1.GetOptions{}) if err != nil { return err } diff --git a/pkg/cmd/cli/restore/restore.go b/pkg/cmd/cli/restore/restore.go index 90aba1135b..12b7642554 100644 --- a/pkg/cmd/cli/restore/restore.go +++ b/pkg/cmd/cli/restore/restore.go @@ -19,7 +19,7 @@ package restore import ( "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/client" + "github.com/heptio/velero/pkg/client" ) func NewCommand(f client.Factory) *cobra.Command { diff --git a/pkg/cmd/cli/schedule/create.go b/pkg/cmd/cli/schedule/create.go index 91f7bda97b..c2a8940a95 100644 --- a/pkg/cmd/cli/schedule/create.go +++ b/pkg/cmd/cli/schedule/create.go @@ -24,11 +24,11 @@ import ( "github.com/spf13/pflag" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/cli/backup" - "github.com/heptio/ark/pkg/cmd/util/output" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/cli/backup" + "github.com/heptio/velero/pkg/cmd/util/output" ) func NewCreateCommand(f client.Factory, use string) *cobra.Command { @@ -47,7 +47,7 @@ func NewCreateCommand(f client.Factory, use string) *cobra.Command { | 4 | Month | 1-12,* | | 5 | Day of Week | 0-7,* |`, - Example: `ark create schedule NAME --schedule="0 */6 * * *"`, + Example: `velero create schedule NAME --schedule="0 */6 * * *"`, Args: cobra.ExactArgs(1), Run: func(c *cobra.Command, args []string) { cmd.CheckError(o.Complete(args, f)) @@ -94,7 +94,7 @@ func (o *CreateOptions) Complete(args []string, f client.Factory) error { } func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { - arkClient, err := f.Client() + veleroClient, err := f.Client() if err != nil { return err } @@ -126,7 +126,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { return err } - _, err = arkClient.ArkV1().Schedules(schedule.Namespace).Create(schedule) + _, err = veleroClient.VeleroV1().Schedules(schedule.Namespace).Create(schedule) if err != nil { return err } diff --git a/pkg/cmd/cli/schedule/delete.go b/pkg/cmd/cli/schedule/delete.go index 95a264b1ed..18fbe3158e 100644 --- a/pkg/cmd/cli/schedule/delete.go +++ b/pkg/cmd/cli/schedule/delete.go @@ -25,10 +25,10 @@ import ( "k8s.io/apimachinery/pkg/labels" kubeerrs "k8s.io/apimachinery/pkg/util/errors" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/cli" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/cli" ) // NewDeleteCommand creates and returns a new cobra command for deleting schedules. @@ -39,19 +39,19 @@ func NewDeleteCommand(f client.Factory, use string) *cobra.Command { Use: fmt.Sprintf("%s [NAMES]", use), Short: "Delete schedules", Example: ` # delete a schedule named "schedule-1" - ark schedule delete schedule-1 + velero schedule delete schedule-1 # delete a schedule named "schedule-1" without prompting for confirmation - ark schedule delete schedule-1 --confirm + velero schedule delete schedule-1 --confirm # delete schedules named "schedule-1" and "schedule-2" - ark schedule delete schedule-1 schedule-2 + velero schedule delete schedule-1 schedule-2 # delete all schedules labelled with foo=bar" - ark schedule delete --selector foo=bar + velero schedule delete --selector foo=bar # delete all schedules - ark schedule delete --all`, + velero schedule delete --all`, Run: func(c *cobra.Command, args []string) { cmd.CheckError(o.Complete(f, args)) @@ -70,13 +70,13 @@ func Run(o *cli.DeleteOptions) error { return nil } var ( - schedules []*arkv1api.Schedule + schedules []*velerov1api.Schedule errs []error ) switch { case len(o.Names) > 0: for _, name := range o.Names { - schedule, err := o.Client.ArkV1().Schedules(o.Namespace).Get(name, metav1.GetOptions{}) + schedule, err := o.Client.VeleroV1().Schedules(o.Namespace).Get(name, metav1.GetOptions{}) if err != nil { errs = append(errs, errors.WithStack(err)) continue @@ -88,7 +88,7 @@ func Run(o *cli.DeleteOptions) error { if o.Selector.LabelSelector != nil { selector = o.Selector.String() } - res, err := o.Client.ArkV1().Schedules(o.Namespace).List(metav1.ListOptions{ + res, err := o.Client.VeleroV1().Schedules(o.Namespace).List(metav1.ListOptions{ LabelSelector: selector, }) if err != nil { @@ -105,7 +105,7 @@ func Run(o *cli.DeleteOptions) error { } for _, s := range schedules { - err := o.Client.ArkV1().Schedules(s.Namespace).Delete(s.Name, nil) + err := o.Client.VeleroV1().Schedules(s.Namespace).Delete(s.Name, nil) if err != nil { errs = append(errs, errors.WithStack(err)) continue diff --git a/pkg/cmd/cli/schedule/describe.go b/pkg/cmd/cli/schedule/describe.go index b375b13a06..b05eee01a2 100644 --- a/pkg/cmd/cli/schedule/describe.go +++ b/pkg/cmd/cli/schedule/describe.go @@ -22,10 +22,10 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/output" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/output" ) func NewDescribeCommand(f client.Factory, use string) *cobra.Command { @@ -35,19 +35,19 @@ func NewDescribeCommand(f client.Factory, use string) *cobra.Command { Use: use + " [NAME1] [NAME2] [NAME...]", Short: "Describe schedules", Run: func(c *cobra.Command, args []string) { - arkClient, err := f.Client() + veleroClient, err := f.Client() cmd.CheckError(err) var schedules *v1.ScheduleList if len(args) > 0 { schedules = new(v1.ScheduleList) for _, name := range args { - schedule, err := arkClient.Ark().Schedules(f.Namespace()).Get(name, metav1.GetOptions{}) + schedule, err := veleroClient.VeleroV1().Schedules(f.Namespace()).Get(name, metav1.GetOptions{}) cmd.CheckError(err) schedules.Items = append(schedules.Items, *schedule) } } else { - schedules, err = arkClient.ArkV1().Schedules(f.Namespace()).List(listOptions) + schedules, err = veleroClient.VeleroV1().Schedules(f.Namespace()).List(listOptions) cmd.CheckError(err) } diff --git a/pkg/cmd/cli/schedule/get.go b/pkg/cmd/cli/schedule/get.go index dd6cd101d7..1ebf6ec2ff 100644 --- a/pkg/cmd/cli/schedule/get.go +++ b/pkg/cmd/cli/schedule/get.go @@ -20,10 +20,10 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/output" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/output" ) func NewGetCommand(f client.Factory, use string) *cobra.Command { @@ -36,19 +36,19 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command { err := output.ValidateFlags(c) cmd.CheckError(err) - arkClient, err := f.Client() + veleroClient, err := f.Client() cmd.CheckError(err) var schedules *api.ScheduleList if len(args) > 0 { schedules = new(api.ScheduleList) for _, name := range args { - schedule, err := arkClient.Ark().Schedules(f.Namespace()).Get(name, metav1.GetOptions{}) + schedule, err := veleroClient.VeleroV1().Schedules(f.Namespace()).Get(name, metav1.GetOptions{}) cmd.CheckError(err) schedules.Items = append(schedules.Items, *schedule) } } else { - schedules, err = arkClient.ArkV1().Schedules(f.Namespace()).List(listOptions) + schedules, err = veleroClient.VeleroV1().Schedules(f.Namespace()).List(listOptions) cmd.CheckError(err) } diff --git a/pkg/cmd/cli/schedule/schedule.go b/pkg/cmd/cli/schedule/schedule.go index 1e4f84380a..fce55db0ed 100644 --- a/pkg/cmd/cli/schedule/schedule.go +++ b/pkg/cmd/cli/schedule/schedule.go @@ -19,7 +19,7 @@ package schedule import ( "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/client" + "github.com/heptio/velero/pkg/client" ) func NewCommand(f client.Factory) *cobra.Command { diff --git a/pkg/cmd/cli/snapshotlocation/create.go b/pkg/cmd/cli/snapshotlocation/create.go index 9f65ad594e..bd6647255c 100644 --- a/pkg/cmd/cli/snapshotlocation/create.go +++ b/pkg/cmd/cli/snapshotlocation/create.go @@ -24,11 +24,11 @@ import ( "github.com/spf13/pflag" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/flag" - "github.com/heptio/ark/pkg/cmd/util/output" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/flag" + "github.com/heptio/velero/pkg/cmd/util/output" ) func NewCreateCommand(f client.Factory, use string) *cobra.Command { @@ -110,7 +110,7 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error { return err } - if _, err := client.ArkV1().VolumeSnapshotLocations(volumeSnapshotLocation.Namespace).Create(volumeSnapshotLocation); err != nil { + if _, err := client.VeleroV1().VolumeSnapshotLocations(volumeSnapshotLocation.Namespace).Create(volumeSnapshotLocation); err != nil { return errors.WithStack(err) } diff --git a/pkg/cmd/cli/snapshotlocation/get.go b/pkg/cmd/cli/snapshotlocation/get.go index c890e33195..7cc09839ca 100644 --- a/pkg/cmd/cli/snapshotlocation/get.go +++ b/pkg/cmd/cli/snapshotlocation/get.go @@ -20,10 +20,10 @@ import ( "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/output" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/output" ) func NewGetCommand(f client.Factory, use string) *cobra.Command { @@ -34,18 +34,18 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command { Run: func(c *cobra.Command, args []string) { err := output.ValidateFlags(c) cmd.CheckError(err) - arkClient, err := f.Client() + veleroClient, err := f.Client() cmd.CheckError(err) var locations *api.VolumeSnapshotLocationList if len(args) > 0 { locations = new(api.VolumeSnapshotLocationList) for _, name := range args { - location, err := arkClient.Ark().VolumeSnapshotLocations(f.Namespace()).Get(name, metav1.GetOptions{}) + location, err := veleroClient.VeleroV1().VolumeSnapshotLocations(f.Namespace()).Get(name, metav1.GetOptions{}) cmd.CheckError(err) locations.Items = append(locations.Items, *location) } } else { - locations, err = arkClient.ArkV1().VolumeSnapshotLocations(f.Namespace()).List(listOptions) + locations, err = veleroClient.VeleroV1().VolumeSnapshotLocations(f.Namespace()).List(listOptions) cmd.CheckError(err) } _, err = output.PrintWithFormat(c, locations) diff --git a/pkg/cmd/cli/snapshotlocation/snapshot_location.go b/pkg/cmd/cli/snapshotlocation/snapshot_location.go index e7d7d05b9d..59e9966d45 100644 --- a/pkg/cmd/cli/snapshotlocation/snapshot_location.go +++ b/pkg/cmd/cli/snapshotlocation/snapshot_location.go @@ -19,7 +19,7 @@ package snapshotlocation import ( "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/client" + "github.com/heptio/velero/pkg/client" ) func NewCommand(f client.Factory) *cobra.Command { diff --git a/pkg/cmd/server/plugin/plugin.go b/pkg/cmd/server/plugin/plugin.go index 966df8c0af..4425e55118 100644 --- a/pkg/cmd/server/plugin/plugin.go +++ b/pkg/cmd/server/plugin/plugin.go @@ -20,18 +20,18 @@ import ( "github.com/sirupsen/logrus" "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/backup" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cloudprovider/aws" - "github.com/heptio/ark/pkg/cloudprovider/azure" - "github.com/heptio/ark/pkg/cloudprovider/gcp" - arkdiscovery "github.com/heptio/ark/pkg/discovery" - arkplugin "github.com/heptio/ark/pkg/plugin" - "github.com/heptio/ark/pkg/restore" + "github.com/heptio/velero/pkg/backup" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cloudprovider/aws" + "github.com/heptio/velero/pkg/cloudprovider/azure" + "github.com/heptio/velero/pkg/cloudprovider/gcp" + velerodiscovery "github.com/heptio/velero/pkg/discovery" + veleroplugin "github.com/heptio/velero/pkg/plugin" + "github.com/heptio/velero/pkg/restore" ) func NewCommand(f client.Factory) *cobra.Command { - logger := arkplugin.NewLogger() + logger := veleroplugin.NewLogger() c := &cobra.Command{ Use: "run-plugins", @@ -40,7 +40,7 @@ func NewCommand(f client.Factory) *cobra.Command { Run: func(c *cobra.Command, args []string) { logger.Debug("Executing run-plugins command") - arkplugin.NewServer(logger). + veleroplugin.NewServer(logger). RegisterObjectStore("aws", newAwsObjectStore). RegisterObjectStore("azure", newAzureObjectStore). RegisterObjectStore("gcp", newGcpObjectStore). @@ -94,7 +94,7 @@ func newPodBackupItemAction(logger logrus.FieldLogger) (interface{}, error) { return backup.NewPodAction(logger), nil } -func newServiceAccountBackupItemAction(f client.Factory) arkplugin.HandlerInitializer { +func newServiceAccountBackupItemAction(f client.Factory) veleroplugin.HandlerInitializer { return func(logger logrus.FieldLogger) (interface{}, error) { // TODO(ncdc): consider a k8s style WantsKubernetesClientSet initialization approach clientset, err := f.KubeClient() @@ -102,7 +102,7 @@ func newServiceAccountBackupItemAction(f client.Factory) arkplugin.HandlerInitia return nil, err } - discoveryHelper, err := arkdiscovery.NewHelper(clientset.Discovery(), logger) + discoveryHelper, err := velerodiscovery.NewHelper(clientset.Discovery(), logger) if err != nil { return nil, err } diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index 7dd10e1818..867a763ecd 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -48,26 +48,26 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/backup" - "github.com/heptio/ark/pkg/buildinfo" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - "github.com/heptio/ark/pkg/cmd/util/flag" - "github.com/heptio/ark/pkg/cmd/util/signals" - "github.com/heptio/ark/pkg/controller" - arkdiscovery "github.com/heptio/ark/pkg/discovery" - clientset "github.com/heptio/ark/pkg/generated/clientset/versioned" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions" - "github.com/heptio/ark/pkg/metrics" - "github.com/heptio/ark/pkg/persistence" - "github.com/heptio/ark/pkg/plugin" - "github.com/heptio/ark/pkg/podexec" - "github.com/heptio/ark/pkg/restic" - "github.com/heptio/ark/pkg/restore" - "github.com/heptio/ark/pkg/util/kube" - "github.com/heptio/ark/pkg/util/logging" - "github.com/heptio/ark/pkg/util/stringslice" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/backup" + "github.com/heptio/velero/pkg/buildinfo" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + "github.com/heptio/velero/pkg/cmd/util/flag" + "github.com/heptio/velero/pkg/cmd/util/signals" + "github.com/heptio/velero/pkg/controller" + velerodiscovery "github.com/heptio/velero/pkg/discovery" + clientset "github.com/heptio/velero/pkg/generated/clientset/versioned" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions" + "github.com/heptio/velero/pkg/metrics" + "github.com/heptio/velero/pkg/persistence" + "github.com/heptio/velero/pkg/plugin" + "github.com/heptio/velero/pkg/podexec" + "github.com/heptio/velero/pkg/restic" + "github.com/heptio/velero/pkg/restore" + "github.com/heptio/velero/pkg/util/kube" + "github.com/heptio/velero/pkg/util/logging" + "github.com/heptio/velero/pkg/util/stringslice" ) const ( @@ -115,8 +115,8 @@ func NewCommand() *cobra.Command { var command = &cobra.Command{ Use: "server", - Short: "Run the ark server", - Long: "Run the ark server", + Short: "Run the velero server", + Long: "Run the velero server", Run: func(c *cobra.Command, args []string) { // go-plugin uses log.Println to log when it's waiting for all plugin processes to complete so we need to // set its output to stdout. @@ -127,11 +127,11 @@ func NewCommand() *cobra.Command { logrus.SetOutput(os.Stdout) logrus.Infof("setting log-level to %s", strings.ToUpper(logLevel.String())) - // Ark's DefaultLogger logs to stdout, so all is good there. + // Velero's DefaultLogger logs to stdout, so all is good there. logger := logging.DefaultLogger(logLevel) - logger.Infof("Starting Ark server %s", buildinfo.FormattedGitSHA()) + logger.Infof("Starting Velero server %s", buildinfo.FormattedGitSHA()) - // NOTE: the namespace flag is bound to ark's persistent flags when the root ark command + // NOTE: the namespace flag is bound to velero's persistent flags when the root velero command // creates the client Factory and binds the Factory's flags. We're not using a Factory here in // the server because the Factory gets its basename set at creation time, and the basename is // used to construct the user-agent for clients. Also, the Factory's Namespace() method uses @@ -157,9 +157,9 @@ func NewCommand() *cobra.Command { } command.Flags().Var(logLevelFlag, "log-level", fmt.Sprintf("the level at which to log. Valid values are %s.", strings.Join(logLevelFlag.AllowedValues(), ", "))) - command.Flags().StringVar(&config.pluginDir, "plugin-dir", config.pluginDir, "directory containing Ark plugins") + command.Flags().StringVar(&config.pluginDir, "plugin-dir", config.pluginDir, "directory containing Velero plugins") command.Flags().StringVar(&config.metricsAddress, "metrics-address", config.metricsAddress, "the address to expose prometheus metrics") - command.Flags().DurationVar(&config.backupSyncPeriod, "backup-sync-period", config.backupSyncPeriod, "how often to ensure all Ark backups in object storage exist as Backup API objects in the cluster") + command.Flags().DurationVar(&config.backupSyncPeriod, "backup-sync-period", config.backupSyncPeriod, "how often to ensure all Velero backups in object storage exist as Backup API objects in the cluster") command.Flags().DurationVar(&config.podVolumeOperationTimeout, "restic-timeout", config.podVolumeOperationTimeout, "how long backups/restores of pod volumes should be allowed to run before timing out") command.Flags().BoolVar(&config.restoreOnly, "restore-only", config.restoreOnly, "run in a mode where only restores are allowed; backups, schedules, and garbage-collection are all disabled") command.Flags().StringSliceVar(&config.restoreResourcePriorities, "restore-resource-priorities", config.restoreResourcePriorities, "desired order of resource restores; any resource not in the list will be restored alphabetically after the prioritized resources") @@ -191,9 +191,9 @@ type server struct { metricsAddress string kubeClientConfig *rest.Config kubeClient kubernetes.Interface - arkClient clientset.Interface + veleroClient clientset.Interface discoveryClient discovery.DiscoveryInterface - discoveryHelper arkdiscovery.Helper + discoveryHelper velerodiscovery.Helper dynamicClient dynamic.Interface sharedInformerFactory informers.SharedInformerFactory ctx context.Context @@ -227,7 +227,7 @@ func newServer(namespace, baseName string, config serverConfig, logger *logrus.L return nil, errors.WithStack(err) } - arkClient, err := clientset.NewForConfig(clientConfig) + veleroClient, err := clientset.NewForConfig(clientConfig) if err != nil { return nil, errors.WithStack(err) } @@ -253,10 +253,10 @@ func newServer(namespace, baseName string, config serverConfig, logger *logrus.L metricsAddress: config.metricsAddress, kubeClientConfig: clientConfig, kubeClient: kubeClient, - arkClient: arkClient, - discoveryClient: arkClient.Discovery(), + veleroClient: veleroClient, + discoveryClient: veleroClient.Discovery(), dynamicClient: dynamicClient, - sharedInformerFactory: informers.NewSharedInformerFactoryWithOptions(arkClient, 0, informers.WithNamespace(namespace)), + sharedInformerFactory: informers.NewSharedInformerFactoryWithOptions(veleroClient, 0, informers.WithNamespace(namespace)), ctx: ctx, cancelFunc: cancelFunc, logger: logger, @@ -279,7 +279,7 @@ func (s *server) run() error { } // Since s.namespace, which specifies where backups/restores/schedules/etc. should live, - // *could* be different from the namespace where the Ark server pod runs, check to make + // *could* be different from the namespace where the Velero server pod runs, check to make // sure it exists, and fail fast if it doesn't. if err := s.namespaceExists(s.namespace); err != nil { return err @@ -289,7 +289,7 @@ func (s *server) run() error { return err } - if err := s.arkResourcesExist(); err != nil { + if err := s.veleroResourcesExist(); err != nil { return err } @@ -297,7 +297,7 @@ func (s *server) run() error { return err } - if _, err := s.arkClient.ArkV1().BackupStorageLocations(s.namespace).Get(s.config.defaultBackupLocation, metav1.GetOptions{}); err != nil { + if _, err := s.veleroClient.VeleroV1().BackupStorageLocations(s.namespace).Get(s.config.defaultBackupLocation, metav1.GetOptions{}); err != nil { s.logger.WithError(errors.WithStack(err)). Warnf("A backup storage location named %s has been specified for the server to use by default, but no corresponding backup storage location exists. Backups with a location not matching the default will need to explicitly specify an existing location", s.config.defaultBackupLocation) } @@ -329,7 +329,7 @@ func (s *server) namespaceExists(namespace string) error { // initDiscoveryHelper instantiates the server's discovery helper and spawns a // goroutine to call Refresh() every 5 minutes. func (s *server) initDiscoveryHelper() error { - discoveryHelper, err := arkdiscovery.NewHelper(s.discoveryClient, s.logger) + discoveryHelper, err := velerodiscovery.NewHelper(s.discoveryClient, s.logger) if err != nil { return err } @@ -348,25 +348,25 @@ func (s *server) initDiscoveryHelper() error { return nil } -// arkResourcesExist checks for the existence of each Ark CRD via discovery +// veleroResourcesExist checks for the existence of each Velero CRD via discovery // and returns an error if any of them don't exist. -func (s *server) arkResourcesExist() error { - s.logger.Info("Checking existence of Ark custom resource definitions") +func (s *server) veleroResourcesExist() error { + s.logger.Info("Checking existence of Velero custom resource definitions") - var arkGroupVersion *metav1.APIResourceList + var veleroGroupVersion *metav1.APIResourceList for _, gv := range s.discoveryHelper.Resources() { if gv.GroupVersion == api.SchemeGroupVersion.String() { - arkGroupVersion = gv + veleroGroupVersion = gv break } } - if arkGroupVersion == nil { - return errors.Errorf("Ark API group %s not found. Apply examples/common/00-prereqs.yaml to create it.", api.SchemeGroupVersion) + if veleroGroupVersion == nil { + return errors.Errorf("Velero API group %s not found. Apply examples/common/00-prereqs.yaml to create it.", api.SchemeGroupVersion) } foundResources := sets.NewString() - for _, resource := range arkGroupVersion.APIResources { + for _, resource := range veleroGroupVersion.APIResources { foundResources.Insert(resource.Kind) } @@ -377,15 +377,15 @@ func (s *server) arkResourcesExist() error { continue } - errs = append(errs, errors.Errorf("custom resource %s not found in Ark API group %s", kind, api.SchemeGroupVersion)) + errs = append(errs, errors.Errorf("custom resource %s not found in Velero API group %s", kind, api.SchemeGroupVersion)) } if len(errs) > 0 { - errs = append(errs, errors.New("Ark custom resources not found - apply examples/common/00-prereqs.yaml to update the custom resource definitions")) + errs = append(errs, errors.New("Velero custom resources not found - apply examples/common/00-prereqs.yaml to update the custom resource definitions")) return kubeerrs.NewAggregate(errs) } - s.logger.Info("All Ark custom resource definitions exist") + s.logger.Info("All Velero custom resource definitions exist") return nil } @@ -394,7 +394,7 @@ func (s *server) arkResourcesExist() error { func (s *server) validateBackupStorageLocations() error { s.logger.Info("Checking that all backup storage locations are valid") - locations, err := s.arkClient.ArkV1().BackupStorageLocations(s.namespace).List(metav1.ListOptions{}) + locations, err := s.veleroClient.VeleroV1().BackupStorageLocations(s.namespace).List(metav1.ListOptions{}) if err != nil { return errors.WithStack(err) } @@ -409,7 +409,7 @@ func (s *server) validateBackupStorageLocations() error { if err := backupStore.IsValid(); err != nil { invalid = append(invalid, errors.Wrapf(err, - "backup store for location %q is invalid (if upgrading from a pre-v0.10 version of Ark, please refer to https://heptio.github.io/ark/v0.10.0/storage-layout-reorg-v0.10 for instructions)", + "backup store for location %q is invalid (if upgrading from a pre-v0.10 version of Velero, please refer to https://heptio.github.io/velero/v0.10.0/storage-layout-reorg-v0.10 for instructions)", location.Name, ).Error()) } @@ -451,9 +451,9 @@ var defaultRestorePriorities = []string{ func (s *server) initRestic() error { // warn if restic daemonset does not exist if _, err := s.kubeClient.AppsV1().DaemonSets(s.namespace).Get(restic.DaemonSet, metav1.GetOptions{}); apierrors.IsNotFound(err) { - s.logger.Warn("Ark restic daemonset not found; restic backups/restores will not work until it's created") + s.logger.Warn("Velero restic daemonset not found; restic backups/restores will not work until it's created") } else if err != nil { - s.logger.WithError(errors.WithStack(err)).Warn("Error checking for existence of ark restic daemonset") + s.logger.WithError(errors.WithStack(err)).Warn("Error checking for existence of velero restic daemonset") } // ensure the repo key secret is set up @@ -462,9 +462,9 @@ func (s *server) initRestic() error { } // use a stand-alone secrets informer so we can filter to only the restic credentials - // secret(s) within the heptio-ark namespace + // secret(s) within the velero namespace // - // note: using an informer to access the single secret for all ark-managed + // note: using an informer to access the single secret for all velero-managed // restic repositories is overkill for now, but will be useful when we move // to fully-encrypted backups and have unique keys per repository. secretsInformer := corev1informers.NewFilteredSecretInformer( @@ -481,11 +481,11 @@ func (s *server) initRestic() error { res, err := restic.NewRepositoryManager( s.ctx, s.namespace, - s.arkClient, + s.veleroClient, secretsInformer, - s.sharedInformerFactory.Ark().V1().ResticRepositories(), - s.arkClient.ArkV1(), - s.sharedInformerFactory.Ark().V1().BackupStorageLocations(), + s.sharedInformerFactory.Velero().V1().ResticRepositories(), + s.veleroClient.VeleroV1(), + s.sharedInformerFactory.Velero().V1().BackupStorageLocations(), s.logger, ) if err != nil { @@ -520,10 +520,10 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string } backupSyncController := controller.NewBackupSyncController( - s.arkClient.ArkV1(), - s.arkClient.ArkV1(), - s.sharedInformerFactory.Ark().V1().Backups(), - s.sharedInformerFactory.Ark().V1().BackupStorageLocations(), + s.veleroClient.VeleroV1(), + s.veleroClient.VeleroV1(), + s.sharedInformerFactory.Velero().V1().Backups(), + s.sharedInformerFactory.Velero().V1().BackupStorageLocations(), s.config.backupSyncPeriod, s.namespace, s.config.defaultBackupLocation, @@ -551,16 +551,16 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string cmd.CheckError(err) backupController := controller.NewBackupController( - s.sharedInformerFactory.Ark().V1().Backups(), - s.arkClient.ArkV1(), + s.sharedInformerFactory.Velero().V1().Backups(), + s.veleroClient.VeleroV1(), backupper, s.logger, s.logLevel, newPluginManager, backupTracker, - s.sharedInformerFactory.Ark().V1().BackupStorageLocations(), + s.sharedInformerFactory.Velero().V1().BackupStorageLocations(), s.config.defaultBackupLocation, - s.sharedInformerFactory.Ark().V1().VolumeSnapshotLocations(), + s.sharedInformerFactory.Velero().V1().VolumeSnapshotLocations(), defaultVolumeSnapshotLocations, s.metrics, ) @@ -572,9 +572,9 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string scheduleController := controller.NewScheduleController( s.namespace, - s.arkClient.ArkV1(), - s.arkClient.ArkV1(), - s.sharedInformerFactory.Ark().V1().Schedules(), + s.veleroClient.VeleroV1(), + s.veleroClient.VeleroV1(), + s.sharedInformerFactory.Velero().V1().Schedules(), s.logger, s.metrics, ) @@ -586,9 +586,9 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string gcController := controller.NewGCController( s.logger, - s.sharedInformerFactory.Ark().V1().Backups(), - s.sharedInformerFactory.Ark().V1().DeleteBackupRequests(), - s.arkClient.ArkV1(), + s.sharedInformerFactory.Velero().V1().Backups(), + s.sharedInformerFactory.Velero().V1().DeleteBackupRequests(), + s.veleroClient.VeleroV1(), ) wg.Add(1) go func() { @@ -598,16 +598,16 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string backupDeletionController := controller.NewBackupDeletionController( s.logger, - s.sharedInformerFactory.Ark().V1().DeleteBackupRequests(), - s.arkClient.ArkV1(), // deleteBackupRequestClient - s.arkClient.ArkV1(), // backupClient - s.sharedInformerFactory.Ark().V1().Restores(), - s.arkClient.ArkV1(), // restoreClient + s.sharedInformerFactory.Velero().V1().DeleteBackupRequests(), + s.veleroClient.VeleroV1(), // deleteBackupRequestClient + s.veleroClient.VeleroV1(), // backupClient + s.sharedInformerFactory.Velero().V1().Restores(), + s.veleroClient.VeleroV1(), // restoreClient backupTracker, s.resticManager, - s.sharedInformerFactory.Ark().V1().PodVolumeBackups(), - s.sharedInformerFactory.Ark().V1().BackupStorageLocations(), - s.sharedInformerFactory.Ark().V1().VolumeSnapshotLocations(), + s.sharedInformerFactory.Velero().V1().PodVolumeBackups(), + s.sharedInformerFactory.Velero().V1().BackupStorageLocations(), + s.sharedInformerFactory.Velero().V1().VolumeSnapshotLocations(), newPluginManager, ) wg.Add(1) @@ -631,13 +631,13 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string restoreController := controller.NewRestoreController( s.namespace, - s.sharedInformerFactory.Ark().V1().Restores(), - s.arkClient.ArkV1(), - s.arkClient.ArkV1(), + s.sharedInformerFactory.Velero().V1().Restores(), + s.veleroClient.VeleroV1(), + s.veleroClient.VeleroV1(), restorer, - s.sharedInformerFactory.Ark().V1().Backups(), - s.sharedInformerFactory.Ark().V1().BackupStorageLocations(), - s.sharedInformerFactory.Ark().V1().VolumeSnapshotLocations(), + s.sharedInformerFactory.Velero().V1().Backups(), + s.sharedInformerFactory.Velero().V1().BackupStorageLocations(), + s.sharedInformerFactory.Velero().V1().VolumeSnapshotLocations(), s.logger, s.logLevel, newPluginManager, @@ -652,11 +652,11 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string }() downloadRequestController := controller.NewDownloadRequestController( - s.arkClient.ArkV1(), - s.sharedInformerFactory.Ark().V1().DownloadRequests(), - s.sharedInformerFactory.Ark().V1().Restores(), - s.sharedInformerFactory.Ark().V1().BackupStorageLocations(), - s.sharedInformerFactory.Ark().V1().Backups(), + s.veleroClient.VeleroV1(), + s.sharedInformerFactory.Velero().V1().DownloadRequests(), + s.sharedInformerFactory.Velero().V1().Restores(), + s.sharedInformerFactory.Velero().V1().BackupStorageLocations(), + s.sharedInformerFactory.Velero().V1().Backups(), newPluginManager, s.logger, ) @@ -668,9 +668,9 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string resticRepoController := controller.NewResticRepositoryController( s.logger, - s.sharedInformerFactory.Ark().V1().ResticRepositories(), - s.arkClient.ArkV1(), - s.sharedInformerFactory.Ark().V1().BackupStorageLocations(), + s.sharedInformerFactory.Velero().V1().ResticRepositories(), + s.veleroClient.VeleroV1(), + s.sharedInformerFactory.Velero().V1().BackupStorageLocations(), s.resticManager, ) wg.Add(1) @@ -683,8 +683,8 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string serverStatusRequestController := controller.NewServerStatusRequestController( s.logger, - s.arkClient.ArkV1(), - s.sharedInformerFactory.Ark().V1().ServerStatusRequests(), + s.veleroClient.VeleroV1(), + s.sharedInformerFactory.Velero().V1().ServerStatusRequests(), ) wg.Add(1) go func() { @@ -696,7 +696,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string go s.sharedInformerFactory.Start(ctx.Done()) // TODO(1.0): remove - cache.WaitForCacheSync(ctx.Done(), s.sharedInformerFactory.Ark().V1().Backups().Informer().HasSynced) + cache.WaitForCacheSync(ctx.Done(), s.sharedInformerFactory.Velero().V1().Backups().Informer().HasSynced) s.removeDeprecatedGCFinalizer() s.logger.Info("Server started successfully") @@ -726,7 +726,7 @@ func (s *server) runProfiler() { func (s *server) removeDeprecatedGCFinalizer() { const gcFinalizer = "gc.ark.heptio.com" - backups, err := s.sharedInformerFactory.Ark().V1().Backups().Lister().List(labels.Everything()) + backups, err := s.sharedInformerFactory.Velero().V1().Backups().Lister().List(labels.Everything()) if err != nil { s.logger.WithError(errors.WithStack(err)).Error("error listing backups from cache - unable to remove old finalizers") return @@ -755,7 +755,7 @@ func (s *server) removeDeprecatedGCFinalizer() { continue } - _, err = s.arkClient.ArkV1().Backups(backup.Namespace).Patch(backup.Name, types.MergePatchType, patchBytes) + _, err = s.veleroClient.VeleroV1().Backups(backup.Namespace).Patch(backup.Name, types.MergePatchType, patchBytes) if err != nil { log.WithError(errors.WithStack(err)).Error("error marshaling finalizers patch") } diff --git a/pkg/cmd/server/server_test.go b/pkg/cmd/server/server_test.go index 372aff8a0c..5db2ac0e45 100644 --- a/pkg/cmd/server/server_test.go +++ b/pkg/cmd/server/server_test.go @@ -22,20 +22,20 @@ import ( "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/heptio/ark/pkg/apis/ark/v1" - arktest "github.com/heptio/ark/pkg/util/test" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + velerotest "github.com/heptio/velero/pkg/util/test" ) -func TestArkResourcesExist(t *testing.T) { +func TestVeleroResourcesExist(t *testing.T) { var ( - fakeDiscoveryHelper = &arktest.FakeDiscoveryHelper{} + fakeDiscoveryHelper = &velerotest.FakeDiscoveryHelper{} server = &server{ - logger: arktest.NewLogger(), + logger: velerotest.NewLogger(), discoveryHelper: fakeDiscoveryHelper, } ) - // Ark API group doesn't exist in discovery: should error + // Velero API group doesn't exist in discovery: should error fakeDiscoveryHelper.ResourceList = []*metav1.APIResourceList{ { GroupVersion: "foo/v1", @@ -47,25 +47,25 @@ func TestArkResourcesExist(t *testing.T) { }, }, } - assert.Error(t, server.arkResourcesExist()) + assert.Error(t, server.veleroResourcesExist()) - // Ark API group doesn't contain any custom resources: should error - arkAPIResourceList := &metav1.APIResourceList{ + // Velero API group doesn't contain any custom resources: should error + veleroAPIResourceList := &metav1.APIResourceList{ GroupVersion: v1.SchemeGroupVersion.String(), } - fakeDiscoveryHelper.ResourceList = append(fakeDiscoveryHelper.ResourceList, arkAPIResourceList) - assert.Error(t, server.arkResourcesExist()) + fakeDiscoveryHelper.ResourceList = append(fakeDiscoveryHelper.ResourceList, veleroAPIResourceList) + assert.Error(t, server.veleroResourcesExist()) - // Ark API group contains all custom resources: should not error + // Velero API group contains all custom resources: should not error for kind := range v1.CustomResources() { - arkAPIResourceList.APIResources = append(arkAPIResourceList.APIResources, metav1.APIResource{ + veleroAPIResourceList.APIResources = append(veleroAPIResourceList.APIResources, metav1.APIResource{ Kind: kind, }) } - assert.NoError(t, server.arkResourcesExist()) + assert.NoError(t, server.veleroResourcesExist()) - // Ark API group contains some but not all custom resources: should error - arkAPIResourceList.APIResources = arkAPIResourceList.APIResources[:3] - assert.Error(t, server.arkResourcesExist()) + // Velero API group contains some but not all custom resources: should error + veleroAPIResourceList.APIResources = veleroAPIResourceList.APIResources[:3] + assert.Error(t, server.veleroResourcesExist()) } diff --git a/pkg/cmd/util/downloadrequest/downloadrequest.go b/pkg/cmd/util/downloadrequest/downloadrequest.go index ff400e2deb..64b8d92f83 100644 --- a/pkg/cmd/util/downloadrequest/downloadrequest.go +++ b/pkg/cmd/util/downloadrequest/downloadrequest.go @@ -28,11 +28,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" - "github.com/heptio/ark/pkg/apis/ark/v1" - arkclientv1 "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" ) -func Stream(client arkclientv1.DownloadRequestsGetter, namespace, name string, kind v1.DownloadTargetKind, w io.Writer, timeout time.Duration) error { +func Stream(client velerov1client.DownloadRequestsGetter, namespace, name string, kind v1.DownloadTargetKind, w io.Writer, timeout time.Duration) error { req := &v1.DownloadRequest{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, diff --git a/pkg/cmd/util/downloadrequest/downloadrequest_test.go b/pkg/cmd/util/downloadrequest/downloadrequest_test.go index f46e19ebab..669cde434a 100644 --- a/pkg/cmd/util/downloadrequest/downloadrequest_test.go +++ b/pkg/cmd/util/downloadrequest/downloadrequest_test.go @@ -33,8 +33,8 @@ import ( "k8s.io/apimachinery/pkg/watch" core "k8s.io/client-go/testing" - "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/generated/clientset/versioned/fake" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" ) func TestStream(t *testing.T) { @@ -151,7 +151,7 @@ func TestStream(t *testing.T) { output := new(bytes.Buffer) errCh := make(chan error) go func() { - err := Stream(client.ArkV1(), "namespace", "name", test.kind, output, timeout) + err := Stream(client.VeleroV1(), "namespace", "name", test.kind, output, timeout) errCh <- err }() diff --git a/pkg/cmd/util/output/backup_describer.go b/pkg/cmd/util/output/backup_describer.go index a2e4715270..156b9d1c23 100644 --- a/pkg/cmd/util/output/backup_describer.go +++ b/pkg/cmd/util/output/backup_describer.go @@ -25,19 +25,19 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/cmd/util/downloadrequest" - clientset "github.com/heptio/ark/pkg/generated/clientset/versioned" - "github.com/heptio/ark/pkg/volume" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/cmd/util/downloadrequest" + clientset "github.com/heptio/velero/pkg/generated/clientset/versioned" + "github.com/heptio/velero/pkg/volume" ) // DescribeBackup describes a backup in human-readable format. func DescribeBackup( - backup *arkv1api.Backup, - deleteRequests []arkv1api.DeleteBackupRequest, - podVolumeBackups []arkv1api.PodVolumeBackup, + backup *velerov1api.Backup, + deleteRequests []velerov1api.DeleteBackupRequest, + podVolumeBackups []velerov1api.PodVolumeBackup, details bool, - arkClient clientset.Interface, + veleroClient clientset.Interface, ) string { return Describe(func(d *Describer) { d.DescribeMetadata(backup.ObjectMeta) @@ -45,7 +45,7 @@ func DescribeBackup( d.Println() phase := backup.Status.Phase if phase == "" { - phase = arkv1api.BackupPhaseNew + phase = velerov1api.BackupPhaseNew } d.Printf("Phase:\t%s\n", phase) @@ -53,7 +53,7 @@ func DescribeBackup( DescribeBackupSpec(d, backup.Spec) d.Println() - DescribeBackupStatus(d, backup, details, arkClient) + DescribeBackupStatus(d, backup, details, veleroClient) if len(deleteRequests) > 0 { d.Println() @@ -68,7 +68,7 @@ func DescribeBackup( } // DescribeBackupSpec describes a backup spec in human-readable format. -func DescribeBackupSpec(d *Describer, spec arkv1api.BackupSpec) { +func DescribeBackupSpec(d *Describer, spec velerov1api.BackupSpec) { // TODO make a helper for this and use it in all the describers. d.Printf("Namespaces:\n") var s string @@ -179,7 +179,7 @@ func DescribeBackupSpec(d *Describer, spec arkv1api.BackupSpec) { } // DescribeBackupStatus describes a backup status in human-readable format. -func DescribeBackupStatus(d *Describer, backup *arkv1api.Backup, details bool, arkClient clientset.Interface) { +func DescribeBackupStatus(d *Describer, backup *velerov1api.Backup, details bool, veleroClient clientset.Interface) { status := backup.Status d.Printf("Backup Format Version:\t%d\n", status.Version) @@ -228,7 +228,7 @@ func DescribeBackupStatus(d *Describer, backup *arkv1api.Backup, details bool, a } buf := new(bytes.Buffer) - if err := downloadrequest.Stream(arkClient.ArkV1(), backup.Namespace, backup.Name, arkv1api.DownloadTargetKindBackupVolumeSnapshots, buf, downloadRequestTimeout); err != nil { + if err := downloadrequest.Stream(veleroClient.VeleroV1(), backup.Namespace, backup.Name, velerov1api.DownloadTargetKindBackupVolumeSnapshots, buf, downloadRequestTimeout); err != nil { d.Printf("Persistent Volumes:\t\n", err) return } @@ -262,7 +262,7 @@ func printSnapshot(d *Describer, pvName, snapshotID, volumeType, volumeAZ string } // DescribeDeleteBackupRequests describes delete backup requests in human-readable format. -func DescribeDeleteBackupRequests(d *Describer, requests []arkv1api.DeleteBackupRequest) { +func DescribeDeleteBackupRequests(d *Describer, requests []velerov1api.DeleteBackupRequest) { d.Printf("Deletion Attempts") if count := failedDeletionCount(requests); count > 0 { d.Printf(" (%d failed)", count) @@ -287,10 +287,10 @@ func DescribeDeleteBackupRequests(d *Describer, requests []arkv1api.DeleteBackup } } -func failedDeletionCount(requests []arkv1api.DeleteBackupRequest) int { +func failedDeletionCount(requests []velerov1api.DeleteBackupRequest) int { var count int for _, req := range requests { - if req.Status.Phase == arkv1api.DeleteBackupRequestPhaseProcessed && len(req.Status.Errors) > 0 { + if req.Status.Phase == velerov1api.DeleteBackupRequestPhaseProcessed && len(req.Status.Errors) > 0 { count++ } } @@ -298,7 +298,7 @@ func failedDeletionCount(requests []arkv1api.DeleteBackupRequest) int { } // DescribePodVolumeBackups describes pod volume backups in human-readable format. -func DescribePodVolumeBackups(d *Describer, backups []arkv1api.PodVolumeBackup, details bool) { +func DescribePodVolumeBackups(d *Describer, backups []velerov1api.PodVolumeBackup, details bool) { if details { d.Printf("Restic Backups:\n") } else { @@ -310,10 +310,10 @@ func DescribePodVolumeBackups(d *Describer, backups []arkv1api.PodVolumeBackup, // go through phases in a specific order for _, phase := range []string{ - string(arkv1api.PodVolumeBackupPhaseCompleted), - string(arkv1api.PodVolumeBackupPhaseFailed), + string(velerov1api.PodVolumeBackupPhaseCompleted), + string(velerov1api.PodVolumeBackupPhaseFailed), "In Progress", - string(arkv1api.PodVolumeBackupPhaseNew), + string(velerov1api.PodVolumeBackupPhaseNew), } { if len(backupsByPhase[phase]) == 0 { continue @@ -342,15 +342,15 @@ func DescribePodVolumeBackups(d *Describer, backups []arkv1api.PodVolumeBackup, } } -func groupByPhase(backups []arkv1api.PodVolumeBackup) map[string][]arkv1api.PodVolumeBackup { - backupsByPhase := make(map[string][]arkv1api.PodVolumeBackup) +func groupByPhase(backups []velerov1api.PodVolumeBackup) map[string][]velerov1api.PodVolumeBackup { + backupsByPhase := make(map[string][]velerov1api.PodVolumeBackup) - phaseToGroup := map[arkv1api.PodVolumeBackupPhase]string{ - arkv1api.PodVolumeBackupPhaseCompleted: string(arkv1api.PodVolumeBackupPhaseCompleted), - arkv1api.PodVolumeBackupPhaseFailed: string(arkv1api.PodVolumeBackupPhaseFailed), - arkv1api.PodVolumeBackupPhaseInProgress: "In Progress", - arkv1api.PodVolumeBackupPhaseNew: string(arkv1api.PodVolumeBackupPhaseNew), - "": string(arkv1api.PodVolumeBackupPhaseNew), + phaseToGroup := map[velerov1api.PodVolumeBackupPhase]string{ + velerov1api.PodVolumeBackupPhaseCompleted: string(velerov1api.PodVolumeBackupPhaseCompleted), + velerov1api.PodVolumeBackupPhaseFailed: string(velerov1api.PodVolumeBackupPhaseFailed), + velerov1api.PodVolumeBackupPhaseInProgress: "In Progress", + velerov1api.PodVolumeBackupPhaseNew: string(velerov1api.PodVolumeBackupPhaseNew), + "": string(velerov1api.PodVolumeBackupPhaseNew), } for _, backup := range backups { diff --git a/pkg/cmd/util/output/backup_printer.go b/pkg/cmd/util/output/backup_printer.go index 1098dd7cef..f9f7aab907 100644 --- a/pkg/cmd/util/output/backup_printer.go +++ b/pkg/cmd/util/output/backup_printer.go @@ -27,14 +27,14 @@ import ( "k8s.io/apimachinery/pkg/util/duration" "k8s.io/kubernetes/pkg/printers" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" ) var ( backupColumns = []string{"NAME", "STATUS", "CREATED", "EXPIRES", "STORAGE LOCATION", "SELECTOR"} ) -func printBackupList(list *arkv1api.BackupList, w io.Writer, options printers.PrintOptions) error { +func printBackupList(list *velerov1api.BackupList, w io.Writer, options printers.PrintOptions) error { sortBackupsByPrefixAndTimestamp(list) for i := range list.Items { @@ -45,7 +45,7 @@ func printBackupList(list *arkv1api.BackupList, w io.Writer, options printers.Pr return nil } -func sortBackupsByPrefixAndTimestamp(list *arkv1api.BackupList) { +func sortBackupsByPrefixAndTimestamp(list *velerov1api.BackupList) { // sort by default alphabetically, but if backups stem from a common schedule // (detected by the presence of a 14-digit timestamp suffix), then within that // group, sort by newest to oldest (i.e. prefix ASC, suffix DESC) @@ -70,7 +70,7 @@ func sortBackupsByPrefixAndTimestamp(list *arkv1api.BackupList) { }) } -func printBackup(backup *arkv1api.Backup, w io.Writer, options printers.PrintOptions) error { +func printBackup(backup *velerov1api.Backup, w io.Writer, options printers.PrintOptions) error { name := printers.FormatResourceName(options.Kind, backup.Name, options.WithKind) if options.WithNamespace { @@ -86,7 +86,7 @@ func printBackup(backup *arkv1api.Backup, w io.Writer, options printers.PrintOpt status := backup.Status.Phase if status == "" { - status = arkv1api.BackupPhaseNew + status = velerov1api.BackupPhaseNew } if backup.DeletionTimestamp != nil && !backup.DeletionTimestamp.Time.IsZero() { status = "Deleting" diff --git a/pkg/cmd/util/output/backup_printer_test.go b/pkg/cmd/util/output/backup_printer_test.go index 0e08139d0b..49365ce29a 100644 --- a/pkg/cmd/util/output/backup_printer_test.go +++ b/pkg/cmd/util/output/backup_printer_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" ) func TestSortBackups(t *testing.T) { diff --git a/pkg/cmd/util/output/backup_storage_location_printer.go b/pkg/cmd/util/output/backup_storage_location_printer.go index 7fa0d773e4..a32568050c 100644 --- a/pkg/cmd/util/output/backup_storage_location_printer.go +++ b/pkg/cmd/util/output/backup_storage_location_printer.go @@ -22,7 +22,7 @@ import ( "k8s.io/kubernetes/pkg/printers" - "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" ) var ( diff --git a/pkg/cmd/util/output/output.go b/pkg/cmd/util/output/output.go index 402d964023..bc56f5ffc0 100644 --- a/pkg/cmd/util/output/output.go +++ b/pkg/cmd/util/output/output.go @@ -28,8 +28,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/printers" - "github.com/heptio/ark/pkg/cmd/util/flag" - "github.com/heptio/ark/pkg/util/encode" + "github.com/heptio/velero/pkg/cmd/util/flag" + "github.com/heptio/velero/pkg/util/encode" ) const downloadRequestTimeout = 30 * time.Second @@ -159,7 +159,7 @@ func printTable(cmd *cobra.Command, obj runtime.Object) (bool, error) { } // NewPrinter returns a printer for doing human-readable table printing of -// Ark objects. +// Velero objects. func NewPrinter(cmd *cobra.Command) (*printers.HumanReadablePrinter, error) { options := printers.PrintOptions{ NoHeaders: flag.GetOptionalBoolFlag(cmd, "no-headers"), diff --git a/pkg/cmd/util/output/restic_repo_printer.go b/pkg/cmd/util/output/restic_repo_printer.go index a2e374232a..e7e9f8a37c 100644 --- a/pkg/cmd/util/output/restic_repo_printer.go +++ b/pkg/cmd/util/output/restic_repo_printer.go @@ -22,7 +22,7 @@ import ( "k8s.io/kubernetes/pkg/printers" - "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" ) var ( diff --git a/pkg/cmd/util/output/restore_describer.go b/pkg/cmd/util/output/restore_describer.go index ba8f631db0..50d626e2e1 100644 --- a/pkg/cmd/util/output/restore_describer.go +++ b/pkg/cmd/util/output/restore_describer.go @@ -24,12 +24,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/cmd/util/downloadrequest" - clientset "github.com/heptio/ark/pkg/generated/clientset/versioned" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/cmd/util/downloadrequest" + clientset "github.com/heptio/velero/pkg/generated/clientset/versioned" ) -func DescribeRestore(restore *v1.Restore, podVolumeRestores []v1.PodVolumeRestore, details bool, arkClient clientset.Interface) string { +func DescribeRestore(restore *v1.Restore, podVolumeRestores []v1.PodVolumeRestore, details bool, veleroClient clientset.Interface) string { return Describe(func(d *Describer) { d.DescribeMetadata(restore.ObjectMeta) @@ -96,7 +96,7 @@ func DescribeRestore(restore *v1.Restore, podVolumeRestores []v1.PodVolumeRestor } d.Println() - describeRestoreResults(d, restore, arkClient) + describeRestoreResults(d, restore, veleroClient) if len(podVolumeRestores) > 0 { d.Println() @@ -105,7 +105,7 @@ func DescribeRestore(restore *v1.Restore, podVolumeRestores []v1.PodVolumeRestor }) } -func describeRestoreResults(d *Describer, restore *v1.Restore, arkClient clientset.Interface) { +func describeRestoreResults(d *Describer, restore *v1.Restore, veleroClient clientset.Interface) { if restore.Status.Warnings == 0 && restore.Status.Errors == 0 { d.Printf("Warnings:\t\nErrors:\t\n") return @@ -114,7 +114,7 @@ func describeRestoreResults(d *Describer, restore *v1.Restore, arkClient clients var buf bytes.Buffer var resultMap map[string]v1.RestoreResult - if err := downloadrequest.Stream(arkClient.ArkV1(), restore.Namespace, restore.Name, v1.DownloadTargetKindRestoreResults, &buf, downloadRequestTimeout); err != nil { + if err := downloadrequest.Stream(veleroClient.VeleroV1(), restore.Namespace, restore.Name, v1.DownloadTargetKindRestoreResults, &buf, downloadRequestTimeout); err != nil { d.Printf("Warnings:\t\n\nErrors:\t\n", err, err) return } @@ -131,7 +131,8 @@ func describeRestoreResults(d *Describer, restore *v1.Restore, arkClient clients func describeRestoreResult(d *Describer, name string, result v1.RestoreResult) { d.Printf("%s:\n", name) - d.DescribeSlice(1, "Ark", result.Ark) + // TODO(1.0): only describe result.Velero + d.DescribeSlice(1, "Velero", append(result.Ark, result.Velero...)) d.DescribeSlice(1, "Cluster", result.Cluster) if len(result.Namespaces) == 0 { d.Printf("\tNamespaces: \n") diff --git a/pkg/cmd/util/output/restore_printer.go b/pkg/cmd/util/output/restore_printer.go index 5d8e5facb0..6a3d52e863 100644 --- a/pkg/cmd/util/output/restore_printer.go +++ b/pkg/cmd/util/output/restore_printer.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/printers" - "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" ) var ( diff --git a/pkg/cmd/util/output/schedule_describer.go b/pkg/cmd/util/output/schedule_describer.go index 7ef811650b..77c54e9200 100644 --- a/pkg/cmd/util/output/schedule_describer.go +++ b/pkg/cmd/util/output/schedule_describer.go @@ -19,7 +19,7 @@ package output import ( "fmt" - "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" ) func DescribeSchedule(schedule *v1.Schedule) string { diff --git a/pkg/cmd/util/output/schedule_printer.go b/pkg/cmd/util/output/schedule_printer.go index e75fbb3e2a..8029198675 100644 --- a/pkg/cmd/util/output/schedule_printer.go +++ b/pkg/cmd/util/output/schedule_printer.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/printers" - "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" ) var ( diff --git a/pkg/cmd/util/output/volume_snapshot_location_printer.go b/pkg/cmd/util/output/volume_snapshot_location_printer.go index 29d831d220..e1ad8bc5fb 100644 --- a/pkg/cmd/util/output/volume_snapshot_location_printer.go +++ b/pkg/cmd/util/output/volume_snapshot_location_printer.go @@ -22,7 +22,7 @@ import ( "k8s.io/kubernetes/pkg/printers" - "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" ) var ( diff --git a/pkg/cmd/ark/ark.go b/pkg/cmd/velero/velero.go similarity index 58% rename from pkg/cmd/ark/ark.go rename to pkg/cmd/velero/velero.go index d0b22462c4..1135da47f0 100644 --- a/pkg/cmd/ark/ark.go +++ b/pkg/cmd/velero/velero.go @@ -14,44 +14,44 @@ See the License for the specific language governing permissions and limitations under the License. */ -package ark +package velero import ( "flag" "github.com/spf13/cobra" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd/cli/backup" - "github.com/heptio/ark/pkg/cmd/cli/backuplocation" - "github.com/heptio/ark/pkg/cmd/cli/bug" - cliclient "github.com/heptio/ark/pkg/cmd/cli/client" - "github.com/heptio/ark/pkg/cmd/cli/completion" - "github.com/heptio/ark/pkg/cmd/cli/create" - "github.com/heptio/ark/pkg/cmd/cli/delete" - "github.com/heptio/ark/pkg/cmd/cli/describe" - "github.com/heptio/ark/pkg/cmd/cli/get" - "github.com/heptio/ark/pkg/cmd/cli/plugin" - "github.com/heptio/ark/pkg/cmd/cli/restic" - "github.com/heptio/ark/pkg/cmd/cli/restore" - "github.com/heptio/ark/pkg/cmd/cli/schedule" - "github.com/heptio/ark/pkg/cmd/cli/snapshotlocation" - "github.com/heptio/ark/pkg/cmd/server" - runplugin "github.com/heptio/ark/pkg/cmd/server/plugin" - "github.com/heptio/ark/pkg/cmd/version" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd/cli/backup" + "github.com/heptio/velero/pkg/cmd/cli/backuplocation" + "github.com/heptio/velero/pkg/cmd/cli/bug" + cliclient "github.com/heptio/velero/pkg/cmd/cli/client" + "github.com/heptio/velero/pkg/cmd/cli/completion" + "github.com/heptio/velero/pkg/cmd/cli/create" + "github.com/heptio/velero/pkg/cmd/cli/delete" + "github.com/heptio/velero/pkg/cmd/cli/describe" + "github.com/heptio/velero/pkg/cmd/cli/get" + "github.com/heptio/velero/pkg/cmd/cli/plugin" + "github.com/heptio/velero/pkg/cmd/cli/restic" + "github.com/heptio/velero/pkg/cmd/cli/restore" + "github.com/heptio/velero/pkg/cmd/cli/schedule" + "github.com/heptio/velero/pkg/cmd/cli/snapshotlocation" + "github.com/heptio/velero/pkg/cmd/server" + runplugin "github.com/heptio/velero/pkg/cmd/server/plugin" + "github.com/heptio/velero/pkg/cmd/version" ) func NewCommand(name string) *cobra.Command { c := &cobra.Command{ Use: name, Short: "Back up and restore Kubernetes cluster resources.", - Long: `Heptio Ark is a tool for managing disaster recovery, specifically for Kubernetes + Long: `Velero is a tool for managing disaster recovery, specifically for Kubernetes cluster resources. It provides a simple, configurable, and operationally robust way to back up your application state and associated data. -If you're familiar with kubectl, Ark supports a similar model, allowing you to -execute commands such as 'ark get backup' and 'ark create schedule'. The same -operations can also be performed as 'ark backup get' and 'ark schedule create'.`, +If you're familiar with kubectl, Velero supports a similar model, allowing you to +execute commands such as 'velero get backup' and 'velero create schedule'. The same +operations can also be performed as 'velero backup get' and 'velero schedule create'.`, } f := client.NewFactory(name) diff --git a/pkg/cmd/version/version.go b/pkg/cmd/version/version.go index c829035c27..40660a1be8 100644 --- a/pkg/cmd/version/version.go +++ b/pkg/cmd/version/version.go @@ -27,12 +27,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/buildinfo" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cmd" - arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" - "github.com/heptio/ark/pkg/serverstatusrequest" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/buildinfo" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cmd" + velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" + "github.com/heptio/velero/pkg/serverstatusrequest" ) func NewCommand(f client.Factory) *cobra.Command { @@ -44,28 +44,28 @@ func NewCommand(f client.Factory) *cobra.Command { c := &cobra.Command{ Use: "version", - Short: "Print the ark version and associated image", + Short: "Print the velero version and associated image", Run: func(c *cobra.Command, args []string) { - var arkClient arkv1client.ServerStatusRequestsGetter + var veleroClient velerov1client.ServerStatusRequestsGetter if !clientOnly { client, err := f.Client() cmd.CheckError(err) - arkClient = client.ArkV1() + veleroClient = client.VeleroV1() } - printVersion(os.Stdout, clientOnly, arkClient, serverStatusGetter) + printVersion(os.Stdout, clientOnly, veleroClient, serverStatusGetter) }, } c.Flags().DurationVar(&serverStatusGetter.timeout, "timeout", serverStatusGetter.timeout, "maximum time to wait for server version to be reported") - c.Flags().BoolVar(&clientOnly, "client-only", clientOnly, "only get ark client version, not server version") + c.Flags().BoolVar(&clientOnly, "client-only", clientOnly, "only get velero client version, not server version") return c } -func printVersion(w io.Writer, clientOnly bool, client arkv1client.ServerStatusRequestsGetter, serverStatusGetter serverStatusGetter) { +func printVersion(w io.Writer, clientOnly bool, client velerov1client.ServerStatusRequestsGetter, serverStatusGetter serverStatusGetter) { fmt.Fprintln(w, "Client:") fmt.Fprintf(w, "\tVersion: %s\n", buildinfo.Version) fmt.Fprintf(w, "\tGit commit: %s\n", buildinfo.FormattedGitSHA()) @@ -85,7 +85,7 @@ func printVersion(w io.Writer, clientOnly bool, client arkv1client.ServerStatusR } type serverStatusGetter interface { - getServerStatus(client arkv1client.ServerStatusRequestsGetter) (*arkv1api.ServerStatusRequest, error) + getServerStatus(client velerov1client.ServerStatusRequestsGetter) (*velerov1api.ServerStatusRequest, error) } type defaultServerStatusGetter struct { @@ -93,8 +93,8 @@ type defaultServerStatusGetter struct { timeout time.Duration } -func (g *defaultServerStatusGetter) getServerStatus(client arkv1client.ServerStatusRequestsGetter) (*arkv1api.ServerStatusRequest, error) { - req := serverstatusrequest.NewBuilder().Namespace(g.namespace).GenerateName("ark-cli-").Build() +func (g *defaultServerStatusGetter) getServerStatus(client velerov1client.ServerStatusRequestsGetter) (*velerov1api.ServerStatusRequest, error) { + req := serverstatusrequest.NewBuilder().Namespace(g.namespace).GenerateName("velero-cli-").Build() created, err := client.ServerStatusRequests(g.namespace).Create(req) if err != nil { @@ -123,7 +123,7 @@ Loop: case <-expired.C: return nil, errors.New("timed out waiting for server status request to be processed") case e := <-watcher.ResultChan(): - updated, ok := e.Object.(*arkv1api.ServerStatusRequest) + updated, ok := e.Object.(*velerov1api.ServerStatusRequest) if !ok { return nil, errors.Errorf("unexpected type %T", e.Object) } @@ -138,7 +138,7 @@ Loop: case watch.Deleted: return nil, errors.New("server status request was unexpectedly deleted") case watch.Modified: - if updated.Status.Phase == arkv1api.ServerStatusRequestPhaseProcessed { + if updated.Status.Phase == velerov1api.ServerStatusRequestPhaseProcessed { req = updated break Loop } diff --git a/pkg/cmd/version/version_test.go b/pkg/cmd/version/version_test.go index 960393fde0..2e7d27471e 100644 --- a/pkg/cmd/version/version_test.go +++ b/pkg/cmd/version/version_test.go @@ -25,11 +25,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - arkv1 "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/buildinfo" - "github.com/heptio/ark/pkg/generated/clientset/versioned/fake" - v1 "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" - "github.com/heptio/ark/pkg/serverstatusrequest" + velerov1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/buildinfo" + "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" + v1 "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" + "github.com/heptio/velero/pkg/serverstatusrequest" ) func TestPrintVersion(t *testing.T) { @@ -54,7 +54,7 @@ func TestPrintVersion(t *testing.T) { tests := []struct { name string clientOnly bool - serverStatusRequest *arkv1.ServerStatusRequest + serverStatusRequest *velerov1.ServerStatusRequest getterError error want string }{ @@ -90,10 +90,10 @@ func TestPrintVersion(t *testing.T) { // getServerStatus should only be called when clientOnly = false if !tc.clientOnly { - serverStatusGetter.On("getServerStatus", client.ArkV1()).Return(tc.serverStatusRequest, tc.getterError) + serverStatusGetter.On("getServerStatus", client.VeleroV1()).Return(tc.serverStatusRequest, tc.getterError) } - printVersion(buf, tc.clientOnly, client.ArkV1(), serverStatusGetter) + printVersion(buf, tc.clientOnly, client.VeleroV1(), serverStatusGetter) assert.Equal(t, tc.want, buf.String()) }) @@ -106,15 +106,15 @@ type mockServerStatusGetter struct { } // getServerStatus provides a mock function with given fields: client -func (_m *mockServerStatusGetter) getServerStatus(client v1.ServerStatusRequestsGetter) (*arkv1.ServerStatusRequest, error) { +func (_m *mockServerStatusGetter) getServerStatus(client v1.ServerStatusRequestsGetter) (*velerov1.ServerStatusRequest, error) { ret := _m.Called(client) - var r0 *arkv1.ServerStatusRequest - if rf, ok := ret.Get(0).(func(v1.ServerStatusRequestsGetter) *arkv1.ServerStatusRequest); ok { + var r0 *velerov1.ServerStatusRequest + if rf, ok := ret.Get(0).(func(v1.ServerStatusRequestsGetter) *velerov1.ServerStatusRequest); ok { r0 = rf(client) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*arkv1.ServerStatusRequest) + r0 = ret.Get(0).(*velerov1.ServerStatusRequest) } } diff --git a/pkg/controller/backup_controller.go b/pkg/controller/backup_controller.go index e6397e6b28..b06800c911 100644 --- a/pkg/controller/backup_controller.go +++ b/pkg/controller/backup_controller.go @@ -37,19 +37,19 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/cache" - api "github.com/heptio/ark/pkg/apis/ark/v1" - pkgbackup "github.com/heptio/ark/pkg/backup" - arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" - listers "github.com/heptio/ark/pkg/generated/listers/ark/v1" - "github.com/heptio/ark/pkg/metrics" - "github.com/heptio/ark/pkg/persistence" - "github.com/heptio/ark/pkg/plugin" - "github.com/heptio/ark/pkg/util/collections" - "github.com/heptio/ark/pkg/util/encode" - kubeutil "github.com/heptio/ark/pkg/util/kube" - "github.com/heptio/ark/pkg/util/logging" - "github.com/heptio/ark/pkg/volume" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + pkgbackup "github.com/heptio/velero/pkg/backup" + velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions/velero/v1" + listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" + "github.com/heptio/velero/pkg/metrics" + "github.com/heptio/velero/pkg/persistence" + "github.com/heptio/velero/pkg/plugin" + "github.com/heptio/velero/pkg/util/collections" + "github.com/heptio/velero/pkg/util/encode" + kubeutil "github.com/heptio/velero/pkg/util/kube" + "github.com/heptio/velero/pkg/util/logging" + "github.com/heptio/velero/pkg/volume" ) type backupController struct { @@ -57,7 +57,7 @@ type backupController struct { backupper pkgbackup.Backupper lister listers.BackupLister - client arkv1client.BackupsGetter + client velerov1client.BackupsGetter clock clock.Clock backupLogLevel logrus.Level newPluginManager func(logrus.FieldLogger) plugin.Manager @@ -67,12 +67,12 @@ type backupController struct { snapshotLocationLister listers.VolumeSnapshotLocationLister defaultSnapshotLocations map[string]string metrics *metrics.ServerMetrics - newBackupStore func(*api.BackupStorageLocation, persistence.ObjectStoreGetter, logrus.FieldLogger) (persistence.BackupStore, error) + newBackupStore func(*velerov1api.BackupStorageLocation, persistence.ObjectStoreGetter, logrus.FieldLogger) (persistence.BackupStore, error) } func NewBackupController( backupInformer informers.BackupInformer, - client arkv1client.BackupsGetter, + client velerov1client.BackupsGetter, backupper pkgbackup.Backupper, logger logrus.FieldLogger, backupLogLevel logrus.Level, @@ -112,10 +112,10 @@ func NewBackupController( backupInformer.Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - backup := obj.(*api.Backup) + backup := obj.(*velerov1api.Backup) switch backup.Status.Phase { - case "", api.BackupPhaseNew: + case "", velerov1api.BackupPhaseNew: // only process new backups default: c.logger.WithFields(logrus.Fields{ @@ -162,7 +162,7 @@ func (c *backupController) processBackup(key string) error { // InProgress, we still need this check so we can return nil to indicate we've finished processing // this key (even though it was a no-op). switch original.Status.Phase { - case "", api.BackupPhaseNew: + case "", velerov1api.BackupPhaseNew: // only process new backups default: return nil @@ -172,9 +172,9 @@ func (c *backupController) processBackup(key string) error { request := c.prepareBackupRequest(original) if len(request.Status.ValidationErrors) > 0 { - request.Status.Phase = api.BackupPhaseFailedValidation + request.Status.Phase = velerov1api.BackupPhaseFailedValidation } else { - request.Status.Phase = api.BackupPhaseInProgress + request.Status.Phase = velerov1api.BackupPhaseInProgress } // update status @@ -186,7 +186,7 @@ func (c *backupController) processBackup(key string) error { original = updatedBackup request.Backup = updatedBackup.DeepCopy() - if request.Status.Phase == api.BackupPhaseFailedValidation { + if request.Status.Phase == velerov1api.BackupPhaseFailedValidation { return nil } @@ -195,12 +195,12 @@ func (c *backupController) processBackup(key string) error { log.Debug("Running backup") // execution & upload of backup - backupScheduleName := request.GetLabels()["ark-schedule"] + backupScheduleName := request.GetLabels()[velerov1api.ScheduleNameLabel] c.metrics.RegisterBackupAttempt(backupScheduleName) if err := c.runBackup(request); err != nil { log.WithError(err).Error("backup failed") - request.Status.Phase = api.BackupPhaseFailed + request.Status.Phase = velerov1api.BackupPhaseFailed c.metrics.RegisterBackupFailed(backupScheduleName) } else { c.metrics.RegisterBackupSuccess(backupScheduleName) @@ -214,7 +214,7 @@ func (c *backupController) processBackup(key string) error { return nil } -func patchBackup(original, updated *api.Backup, client arkv1client.BackupsGetter) (*api.Backup, error) { +func patchBackup(original, updated *velerov1api.Backup, client velerov1client.BackupsGetter) (*velerov1api.Backup, error) { origBytes, err := json.Marshal(original) if err != nil { return nil, errors.Wrap(err, "error marshalling original backup") @@ -238,7 +238,7 @@ func patchBackup(original, updated *api.Backup, client arkv1client.BackupsGetter return res, nil } -func (c *backupController) prepareBackupRequest(backup *api.Backup) *pkgbackup.Request { +func (c *backupController) prepareBackupRequest(backup *velerov1api.Backup) *pkgbackup.Request { request := &pkgbackup.Request{ Backup: backup.DeepCopy(), // don't modify items in the cache } @@ -260,7 +260,7 @@ func (c *backupController) prepareBackupRequest(backup *api.Backup) *pkgbackup.R if request.Labels == nil { request.Labels = make(map[string]string) } - request.Labels[api.StorageLocationLabel] = request.Spec.StorageLocation + request.Labels[velerov1api.StorageLocationLabel] = request.Spec.StorageLocation // validate the included/excluded resources and namespaces for _, err := range collections.ValidateIncludesExcludes(request.Spec.IncludedResources, request.Spec.ExcludedResources) { @@ -304,9 +304,9 @@ func (c *backupController) prepareBackupRequest(backup *api.Backup) *pkgbackup.R // - a given provider's default location name is added to .spec.volumeSnapshotLocations if one // is not explicitly specified for the provider (if there's only one location for the provider, // it will automatically be used) -func (c *backupController) validateAndGetSnapshotLocations(backup *api.Backup) (map[string]*api.VolumeSnapshotLocation, []string) { +func (c *backupController) validateAndGetSnapshotLocations(backup *velerov1api.Backup) (map[string]*velerov1api.VolumeSnapshotLocation, []string) { errors := []string{} - providerLocations := make(map[string]*api.VolumeSnapshotLocation) + providerLocations := make(map[string]*velerov1api.VolumeSnapshotLocation) for _, locationName := range backup.Spec.VolumeSnapshotLocations { // validate each locationName exists as a VolumeSnapshotLocation @@ -344,7 +344,7 @@ func (c *backupController) validateAndGetSnapshotLocations(backup *api.Backup) ( } // build a map of provider->list of all locations for the provider - allProviderLocations := make(map[string][]*api.VolumeSnapshotLocation) + allProviderLocations := make(map[string][]*velerov1api.VolumeSnapshotLocation) for i := range allLocations { loc := allLocations[i] allProviderLocations[loc.Spec.Provider] = append(allProviderLocations[loc.Spec.Provider], loc) @@ -434,9 +434,9 @@ func (c *backupController) runBackup(backup *pkgbackup.Request) error { // Do the actual backup if err := c.backupper.Backup(log, backup, backupFile, actions, pluginManager); err != nil { errs = append(errs, err) - backup.Status.Phase = api.BackupPhaseFailed + backup.Status.Phase = velerov1api.BackupPhaseFailed } else { - backup.Status.Phase = api.BackupPhaseCompleted + backup.Status.Phase = velerov1api.BackupPhaseCompleted } if err := gzippedLogFile.Close(); err != nil { @@ -462,8 +462,8 @@ func (c *backupController) runBackup(backup *pkgbackup.Request) error { return kerrors.NewAggregate(errs) } -func recordBackupMetrics(backup *api.Backup, backupFile *os.File, serverMetrics *metrics.ServerMetrics) error { - backupScheduleName := backup.GetLabels()["ark-schedule"] +func recordBackupMetrics(backup *velerov1api.Backup, backupFile *os.File, serverMetrics *metrics.ServerMetrics) error { + backupScheduleName := backup.GetLabels()[velerov1api.ScheduleNameLabel] var backupSizeBytes int64 var err error diff --git a/pkg/controller/backup_controller_test.go b/pkg/controller/backup_controller_test.go index 41dfb4f2b2..edf4fa5aa0 100644 --- a/pkg/controller/backup_controller_test.go +++ b/pkg/controller/backup_controller_test.go @@ -33,17 +33,17 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/clock" - "github.com/heptio/ark/pkg/apis/ark/v1" - pkgbackup "github.com/heptio/ark/pkg/backup" - "github.com/heptio/ark/pkg/generated/clientset/versioned/fake" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions" - "github.com/heptio/ark/pkg/metrics" - "github.com/heptio/ark/pkg/persistence" - persistencemocks "github.com/heptio/ark/pkg/persistence/mocks" - "github.com/heptio/ark/pkg/plugin" - pluginmocks "github.com/heptio/ark/pkg/plugin/mocks" - "github.com/heptio/ark/pkg/util/logging" - arktest "github.com/heptio/ark/pkg/util/test" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + pkgbackup "github.com/heptio/velero/pkg/backup" + "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions" + "github.com/heptio/velero/pkg/metrics" + "github.com/heptio/velero/pkg/persistence" + persistencemocks "github.com/heptio/velero/pkg/persistence/mocks" + "github.com/heptio/velero/pkg/plugin" + pluginmocks "github.com/heptio/velero/pkg/plugin/mocks" + "github.com/heptio/velero/pkg/util/logging" + velerotest "github.com/heptio/velero/pkg/util/test" ) type fakeBackupper struct { @@ -70,27 +70,27 @@ func TestProcessBackupNonProcessedItems(t *testing.T) { { name: "backup not found in lister returns error", key: "nonexistent/backup", - expectedErr: "error getting backup: backup.ark.heptio.com \"backup\" not found", + expectedErr: "error getting backup: backup.velero.io \"backup\" not found", }, { name: "FailedValidation backup is not processed", - key: "heptio-ark/backup-1", - backup: arktest.NewTestBackup().WithName("backup-1").WithPhase(v1.BackupPhaseFailedValidation).Backup, + key: "velero/backup-1", + backup: velerotest.NewTestBackup().WithName("backup-1").WithPhase(v1.BackupPhaseFailedValidation).Backup, }, { name: "InProgress backup is not processed", - key: "heptio-ark/backup-1", - backup: arktest.NewTestBackup().WithName("backup-1").WithPhase(v1.BackupPhaseInProgress).Backup, + key: "velero/backup-1", + backup: velerotest.NewTestBackup().WithName("backup-1").WithPhase(v1.BackupPhaseInProgress).Backup, }, { name: "Completed backup is not processed", - key: "heptio-ark/backup-1", - backup: arktest.NewTestBackup().WithName("backup-1").WithPhase(v1.BackupPhaseCompleted).Backup, + key: "velero/backup-1", + backup: velerotest.NewTestBackup().WithName("backup-1").WithPhase(v1.BackupPhaseCompleted).Backup, }, { name: "Failed backup is not processed", - key: "heptio-ark/backup-1", - backup: arktest.NewTestBackup().WithName("backup-1").WithPhase(v1.BackupPhaseFailed).Backup, + key: "velero/backup-1", + backup: velerotest.NewTestBackup().WithName("backup-1").WithPhase(v1.BackupPhaseFailed).Backup, }, } @@ -103,11 +103,11 @@ func TestProcessBackupNonProcessedItems(t *testing.T) { c := &backupController{ genericController: newGenericController("backup-test", logger), - lister: sharedInformers.Ark().V1().Backups().Lister(), + lister: sharedInformers.Velero().V1().Backups().Lister(), } if test.backup != nil { - require.NoError(t, sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(test.backup)) + require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(test.backup)) } err := c.processBackup(test.key) @@ -127,7 +127,7 @@ func TestProcessBackupNonProcessedItems(t *testing.T) { } func TestProcessBackupValidationFailures(t *testing.T) { - defaultBackupLocation := arktest.NewTestBackupStorageLocation().WithName("loc-1").BackupStorageLocation + defaultBackupLocation := velerotest.NewTestBackupStorageLocation().WithName("loc-1").BackupStorageLocation tests := []struct { name string @@ -137,20 +137,20 @@ func TestProcessBackupValidationFailures(t *testing.T) { }{ { name: "invalid included/excluded resources fails validation", - backup: arktest.NewTestBackup().WithName("backup-1").WithIncludedResources("foo").WithExcludedResources("foo").Backup, + backup: velerotest.NewTestBackup().WithName("backup-1").WithIncludedResources("foo").WithExcludedResources("foo").Backup, backupLocation: defaultBackupLocation, expectedErrs: []string{"Invalid included/excluded resource lists: excludes list cannot contain an item in the includes list: foo"}, }, { name: "invalid included/excluded namespaces fails validation", - backup: arktest.NewTestBackup().WithName("backup-1").WithIncludedNamespaces("foo").WithExcludedNamespaces("foo").Backup, + backup: velerotest.NewTestBackup().WithName("backup-1").WithIncludedNamespaces("foo").WithExcludedNamespaces("foo").Backup, backupLocation: defaultBackupLocation, expectedErrs: []string{"Invalid included/excluded namespace lists: excludes list cannot contain an item in the includes list: foo"}, }, { name: "non-existent backup location fails validation", - backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("nonexistent").Backup, - expectedErrs: []string{"a BackupStorageLocation CRD with the name specified in the backup spec needs to be created before this backup can be executed. Error: backupstoragelocation.ark.heptio.com \"nonexistent\" not found"}, + backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("nonexistent").Backup, + expectedErrs: []string{"a BackupStorageLocation CRD with the name specified in the backup spec needs to be created before this backup can be executed. Error: backupstoragelocation.velero.io \"nonexistent\" not found"}, }, } @@ -164,26 +164,26 @@ func TestProcessBackupValidationFailures(t *testing.T) { c := &backupController{ genericController: newGenericController("backup-test", logger), - client: clientset.ArkV1(), - lister: sharedInformers.Ark().V1().Backups().Lister(), - backupLocationLister: sharedInformers.Ark().V1().BackupStorageLocations().Lister(), - snapshotLocationLister: sharedInformers.Ark().V1().VolumeSnapshotLocations().Lister(), + client: clientset.VeleroV1(), + lister: sharedInformers.Velero().V1().Backups().Lister(), + backupLocationLister: sharedInformers.Velero().V1().BackupStorageLocations().Lister(), + snapshotLocationLister: sharedInformers.Velero().V1().VolumeSnapshotLocations().Lister(), defaultBackupLocation: defaultBackupLocation.Name, } require.NotNil(t, test.backup) - require.NoError(t, sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(test.backup)) + require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(test.backup)) if test.backupLocation != nil { - _, err := clientset.ArkV1().BackupStorageLocations(test.backupLocation.Namespace).Create(test.backupLocation) + _, err := clientset.VeleroV1().BackupStorageLocations(test.backupLocation.Namespace).Create(test.backupLocation) require.NoError(t, err) - require.NoError(t, sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(test.backupLocation)) + require.NoError(t, sharedInformers.Velero().V1().BackupStorageLocations().Informer().GetStore().Add(test.backupLocation)) } require.NoError(t, c.processBackup(fmt.Sprintf("%s/%s", test.backup.Namespace, test.backup.Name))) - res, err := clientset.ArkV1().Backups(test.backup.Namespace).Get(test.backup.Name, metav1.GetOptions{}) + res, err := clientset.VeleroV1().Backups(test.backup.Namespace).Get(test.backup.Name, metav1.GetOptions{}) require.NoError(t, err) assert.Equal(t, v1.BackupPhaseFailedValidation, res.Status.Phase) @@ -198,7 +198,7 @@ func TestProcessBackupValidationFailures(t *testing.T) { } func TestProcessBackupCompletions(t *testing.T) { - defaultBackupLocation := arktest.NewTestBackupStorageLocation().WithName("loc-1").BackupStorageLocation + defaultBackupLocation := velerotest.NewTestBackupStorageLocation().WithName("loc-1").BackupStorageLocation now, err := time.Parse(time.RFC1123Z, time.RFC1123Z) require.NoError(t, err) @@ -212,14 +212,14 @@ func TestProcessBackupCompletions(t *testing.T) { }{ { name: "backup with no backup location gets the default", - backup: arktest.NewTestBackup().WithName("backup-1").Backup, + backup: velerotest.NewTestBackup().WithName("backup-1").Backup, backupLocation: defaultBackupLocation, expectedResult: &v1.Backup{ ObjectMeta: metav1.ObjectMeta{ Namespace: v1.DefaultNamespace, Name: "backup-1", Labels: map[string]string{ - "ark.heptio.com/storage-location": "loc-1", + "velero.io/storage-location": "loc-1", }, }, Spec: v1.BackupSpec{ @@ -235,14 +235,14 @@ func TestProcessBackupCompletions(t *testing.T) { }, { name: "backup with a specific backup location keeps it", - backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("alt-loc").Backup, - backupLocation: arktest.NewTestBackupStorageLocation().WithName("alt-loc").BackupStorageLocation, + backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("alt-loc").Backup, + backupLocation: velerotest.NewTestBackupStorageLocation().WithName("alt-loc").BackupStorageLocation, expectedResult: &v1.Backup{ ObjectMeta: metav1.ObjectMeta{ Namespace: v1.DefaultNamespace, Name: "backup-1", Labels: map[string]string{ - "ark.heptio.com/storage-location": "alt-loc", + "velero.io/storage-location": "alt-loc", }, }, Spec: v1.BackupSpec{ @@ -258,14 +258,14 @@ func TestProcessBackupCompletions(t *testing.T) { }, { name: "backup with a TTL has expiration set", - backup: arktest.NewTestBackup().WithName("backup-1").WithTTL(10 * time.Minute).Backup, + backup: velerotest.NewTestBackup().WithName("backup-1").WithTTL(10 * time.Minute).Backup, backupLocation: defaultBackupLocation, expectedResult: &v1.Backup{ ObjectMeta: metav1.ObjectMeta{ Namespace: v1.DefaultNamespace, Name: "backup-1", Labels: map[string]string{ - "ark.heptio.com/storage-location": "loc-1", + "velero.io/storage-location": "loc-1", }, }, Spec: v1.BackupSpec{ @@ -296,10 +296,10 @@ func TestProcessBackupCompletions(t *testing.T) { c := &backupController{ genericController: newGenericController("backup-test", logger), - client: clientset.ArkV1(), - lister: sharedInformers.Ark().V1().Backups().Lister(), - backupLocationLister: sharedInformers.Ark().V1().BackupStorageLocations().Lister(), - snapshotLocationLister: sharedInformers.Ark().V1().VolumeSnapshotLocations().Lister(), + client: clientset.VeleroV1(), + lister: sharedInformers.Velero().V1().Backups().Lister(), + backupLocationLister: sharedInformers.Velero().V1().BackupStorageLocations().Lister(), + snapshotLocationLister: sharedInformers.Velero().V1().VolumeSnapshotLocations().Lister(), defaultBackupLocation: defaultBackupLocation.Name, backupTracker: NewBackupTracker(), metrics: metrics.NewServerMetrics(), @@ -325,26 +325,26 @@ func TestProcessBackupCompletions(t *testing.T) { // add the test's backup to the informer/lister store require.NotNil(t, test.backup) - require.NoError(t, sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(test.backup)) + require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(test.backup)) // add the default backup storage location to the clientset and the informer/lister store - _, err := clientset.ArkV1().BackupStorageLocations(defaultBackupLocation.Namespace).Create(defaultBackupLocation) + _, err := clientset.VeleroV1().BackupStorageLocations(defaultBackupLocation.Namespace).Create(defaultBackupLocation) require.NoError(t, err) - require.NoError(t, sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(defaultBackupLocation)) + require.NoError(t, sharedInformers.Velero().V1().BackupStorageLocations().Informer().GetStore().Add(defaultBackupLocation)) // add the test's backup storage location to the clientset and the informer/lister store // if it's different than the default if test.backupLocation != nil && test.backupLocation != defaultBackupLocation { - _, err := clientset.ArkV1().BackupStorageLocations(test.backupLocation.Namespace).Create(test.backupLocation) + _, err := clientset.VeleroV1().BackupStorageLocations(test.backupLocation.Namespace).Create(test.backupLocation) require.NoError(t, err) - require.NoError(t, sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(test.backupLocation)) + require.NoError(t, sharedInformers.Velero().V1().BackupStorageLocations().Informer().GetStore().Add(test.backupLocation)) } require.NoError(t, c.processBackup(fmt.Sprintf("%s/%s", test.backup.Namespace, test.backup.Name))) - res, err := clientset.ArkV1().Backups(test.backup.Namespace).Get(test.backup.Name, metav1.GetOptions{}) + res, err := clientset.VeleroV1().Backups(test.backup.Namespace).Get(test.backup.Name, metav1.GetOptions{}) require.NoError(t, err) assert.Equal(t, test.expectedResult, res) @@ -355,8 +355,8 @@ func TestProcessBackupCompletions(t *testing.T) { func TestValidateAndGetSnapshotLocations(t *testing.T) { tests := []struct { name string - backup *arktest.TestBackup - locations []*arktest.TestVolumeSnapshotLocation + backup *velerotest.TestBackup + locations []*velerotest.TestVolumeSnapshotLocation defaultLocations map[string]string expectedVolumeSnapshotLocationNames []string // adding these in the expected order will allow to test with better msgs in case of a test failure expectedErrors string @@ -364,76 +364,76 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) { }{ { name: "location name does not correspond to any existing location", - backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithVolumeSnapshotLocations("random-name"), - locations: []*arktest.TestVolumeSnapshotLocation{ - arktest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-east-1"), - arktest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-west-1"), - arktest.NewTestVolumeSnapshotLocation().WithProvider("fake-provider").WithName("some-name"), + backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithVolumeSnapshotLocations("random-name"), + locations: []*velerotest.TestVolumeSnapshotLocation{ + velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-east-1"), + velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-west-1"), + velerotest.NewTestVolumeSnapshotLocation().WithProvider("fake-provider").WithName("some-name"), }, - expectedErrors: "a VolumeSnapshotLocation CRD for the location random-name with the name specified in the backup spec needs to be created before this snapshot can be executed. Error: volumesnapshotlocation.ark.heptio.com \"random-name\" not found", expectedSuccess: false, + expectedErrors: "a VolumeSnapshotLocation CRD for the location random-name with the name specified in the backup spec needs to be created before this snapshot can be executed. Error: volumesnapshotlocation.velero.io \"random-name\" not found", expectedSuccess: false, }, { name: "duplicate locationName per provider: should filter out dups", - backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithVolumeSnapshotLocations("aws-us-west-1", "aws-us-west-1"), - locations: []*arktest.TestVolumeSnapshotLocation{ - arktest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-east-1"), - arktest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-west-1"), + backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithVolumeSnapshotLocations("aws-us-west-1", "aws-us-west-1"), + locations: []*velerotest.TestVolumeSnapshotLocation{ + velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-east-1"), + velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-west-1"), }, expectedVolumeSnapshotLocationNames: []string{"aws-us-west-1"}, expectedSuccess: true, }, { name: "multiple non-dupe location names per provider should error", - backup: arktest.NewTestBackup().WithName("backup1").WithVolumeSnapshotLocations("aws-us-east-1", "aws-us-west-1"), - locations: []*arktest.TestVolumeSnapshotLocation{ - arktest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-east-1"), - arktest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-west-1"), - arktest.NewTestVolumeSnapshotLocation().WithProvider("fake-provider").WithName("some-name"), + backup: velerotest.NewTestBackup().WithName("backup1").WithVolumeSnapshotLocations("aws-us-east-1", "aws-us-west-1"), + locations: []*velerotest.TestVolumeSnapshotLocation{ + velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-east-1"), + velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-west-1"), + velerotest.NewTestVolumeSnapshotLocation().WithProvider("fake-provider").WithName("some-name"), }, expectedErrors: "more than one VolumeSnapshotLocation name specified for provider aws: aws-us-west-1; unexpected name was aws-us-east-1", expectedSuccess: false, }, { name: "no location name for the provider exists, only one VSL for the provider: use it", - backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew), - locations: []*arktest.TestVolumeSnapshotLocation{ - arktest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-east-1"), + backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew), + locations: []*velerotest.TestVolumeSnapshotLocation{ + velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-east-1"), }, expectedVolumeSnapshotLocationNames: []string{"aws-us-east-1"}, expectedSuccess: true, }, { name: "no location name for the provider exists, no default, more than one VSL for the provider: error", - backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew), - locations: []*arktest.TestVolumeSnapshotLocation{ - arktest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-east-1"), - arktest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-west-1"), + backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew), + locations: []*velerotest.TestVolumeSnapshotLocation{ + velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-east-1"), + velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-west-1"), }, expectedErrors: "provider aws has more than one possible volume snapshot location, and none were specified explicitly or as a default", }, { name: "no location name for the provider exists, more than one VSL for the provider: the provider's default should be added", - backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew), + backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew), defaultLocations: map[string]string{"aws": "aws-us-east-1"}, - locations: []*arktest.TestVolumeSnapshotLocation{ - arktest.NewTestVolumeSnapshotLocation().WithName("aws-us-east-1").WithProvider("aws"), - arktest.NewTestVolumeSnapshotLocation().WithName("aws-us-west-1").WithProvider("aws"), + locations: []*velerotest.TestVolumeSnapshotLocation{ + velerotest.NewTestVolumeSnapshotLocation().WithName("aws-us-east-1").WithProvider("aws"), + velerotest.NewTestVolumeSnapshotLocation().WithName("aws-us-west-1").WithProvider("aws"), }, expectedVolumeSnapshotLocationNames: []string{"aws-us-east-1"}, expectedSuccess: true, }, { name: "no existing location name and no default location name given", - backup: arktest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew), + backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew), expectedSuccess: true, }, { name: "multiple location names for a provider, default location name for another provider", - backup: arktest.NewTestBackup().WithName("backup1").WithVolumeSnapshotLocations("aws-us-west-1", "aws-us-west-1"), + backup: velerotest.NewTestBackup().WithName("backup1").WithVolumeSnapshotLocations("aws-us-west-1", "aws-us-west-1"), defaultLocations: map[string]string{"fake-provider": "some-name"}, - locations: []*arktest.TestVolumeSnapshotLocation{ - arktest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-west-1"), - arktest.NewTestVolumeSnapshotLocation().WithProvider("fake-provider").WithName("some-name"), + locations: []*velerotest.TestVolumeSnapshotLocation{ + velerotest.NewTestVolumeSnapshotLocation().WithProvider("aws").WithName("aws-us-west-1"), + velerotest.NewTestVolumeSnapshotLocation().WithProvider("fake-provider").WithName("some-name"), }, expectedVolumeSnapshotLocationNames: []string{"aws-us-west-1", "some-name"}, expectedSuccess: true, @@ -448,7 +448,7 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) { ) c := &backupController{ - snapshotLocationLister: sharedInformers.Ark().V1().VolumeSnapshotLocations().Lister(), + snapshotLocationLister: sharedInformers.Velero().V1().VolumeSnapshotLocations().Lister(), defaultSnapshotLocations: test.defaultLocations, } @@ -456,7 +456,7 @@ func TestValidateAndGetSnapshotLocations(t *testing.T) { backup := test.backup.DeepCopy() backup.Spec.VolumeSnapshotLocations = test.backup.Spec.VolumeSnapshotLocations for _, location := range test.locations { - require.NoError(t, sharedInformers.Ark().V1().VolumeSnapshotLocations().Informer().GetStore().Add(location.VolumeSnapshotLocation)) + require.NoError(t, sharedInformers.Velero().V1().VolumeSnapshotLocations().Informer().GetStore().Add(location.VolumeSnapshotLocation)) } providerLocations, errs := c.validateAndGetSnapshotLocations(backup) diff --git a/pkg/controller/backup_deletion_controller.go b/pkg/controller/backup_deletion_controller.go index 6e4f90f461..00abda099c 100644 --- a/pkg/controller/backup_deletion_controller.go +++ b/pkg/controller/backup_deletion_controller.go @@ -32,16 +32,16 @@ import ( kubeerrs "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/cache" - "github.com/heptio/ark/pkg/apis/ark/v1" - pkgbackup "github.com/heptio/ark/pkg/backup" - "github.com/heptio/ark/pkg/cloudprovider" - arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" - listers "github.com/heptio/ark/pkg/generated/listers/ark/v1" - "github.com/heptio/ark/pkg/persistence" - "github.com/heptio/ark/pkg/plugin" - "github.com/heptio/ark/pkg/restic" - "github.com/heptio/ark/pkg/util/kube" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + pkgbackup "github.com/heptio/velero/pkg/backup" + "github.com/heptio/velero/pkg/cloudprovider" + velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions/velero/v1" + listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" + "github.com/heptio/velero/pkg/persistence" + "github.com/heptio/velero/pkg/plugin" + "github.com/heptio/velero/pkg/restic" + "github.com/heptio/velero/pkg/util/kube" ) const resticTimeout = time.Minute @@ -49,11 +49,11 @@ const resticTimeout = time.Minute type backupDeletionController struct { *genericController - deleteBackupRequestClient arkv1client.DeleteBackupRequestsGetter + deleteBackupRequestClient velerov1client.DeleteBackupRequestsGetter deleteBackupRequestLister listers.DeleteBackupRequestLister - backupClient arkv1client.BackupsGetter + backupClient velerov1client.BackupsGetter restoreLister listers.RestoreLister - restoreClient arkv1client.RestoresGetter + restoreClient velerov1client.RestoresGetter backupTracker BackupTracker resticMgr restic.RepositoryManager podvolumeBackupLister listers.PodVolumeBackupLister @@ -69,10 +69,10 @@ type backupDeletionController struct { func NewBackupDeletionController( logger logrus.FieldLogger, deleteBackupRequestInformer informers.DeleteBackupRequestInformer, - deleteBackupRequestClient arkv1client.DeleteBackupRequestsGetter, - backupClient arkv1client.BackupsGetter, + deleteBackupRequestClient velerov1client.DeleteBackupRequestsGetter, + backupClient velerov1client.BackupsGetter, restoreInformer informers.RestoreInformer, - restoreClient arkv1client.RestoresGetter, + restoreClient velerov1client.RestoresGetter, backupTracker BackupTracker, resticMgr restic.RepositoryManager, podvolumeBackupInformer informers.PodVolumeBackupInformer, diff --git a/pkg/controller/backup_deletion_controller_test.go b/pkg/controller/backup_deletion_controller_test.go index eed72aafa8..64f4cb0ae9 100644 --- a/pkg/controller/backup_deletion_controller_test.go +++ b/pkg/controller/backup_deletion_controller_test.go @@ -32,16 +32,16 @@ import ( "k8s.io/apimachinery/pkg/util/sets" core "k8s.io/client-go/testing" - "github.com/heptio/ark/pkg/apis/ark/v1" - pkgbackup "github.com/heptio/ark/pkg/backup" - "github.com/heptio/ark/pkg/generated/clientset/versioned/fake" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions" - "github.com/heptio/ark/pkg/persistence" - persistencemocks "github.com/heptio/ark/pkg/persistence/mocks" - "github.com/heptio/ark/pkg/plugin" - pluginmocks "github.com/heptio/ark/pkg/plugin/mocks" - arktest "github.com/heptio/ark/pkg/util/test" - "github.com/heptio/ark/pkg/volume" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + pkgbackup "github.com/heptio/velero/pkg/backup" + "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions" + "github.com/heptio/velero/pkg/persistence" + persistencemocks "github.com/heptio/velero/pkg/persistence/mocks" + "github.com/heptio/velero/pkg/plugin" + pluginmocks "github.com/heptio/velero/pkg/plugin/mocks" + velerotest "github.com/heptio/velero/pkg/util/test" + "github.com/heptio/velero/pkg/volume" ) func TestBackupDeletionControllerProcessQueueItem(t *testing.T) { @@ -49,17 +49,17 @@ func TestBackupDeletionControllerProcessQueueItem(t *testing.T) { sharedInformers := informers.NewSharedInformerFactory(client, 0) controller := NewBackupDeletionController( - arktest.NewLogger(), - sharedInformers.Ark().V1().DeleteBackupRequests(), - client.ArkV1(), // deleteBackupRequestClient - client.ArkV1(), // backupClient - sharedInformers.Ark().V1().Restores(), - client.ArkV1(), // restoreClient + velerotest.NewLogger(), + sharedInformers.Velero().V1().DeleteBackupRequests(), + client.VeleroV1(), // deleteBackupRequestClient + client.VeleroV1(), // backupClient + sharedInformers.Velero().V1().Restores(), + client.VeleroV1(), // restoreClient NewBackupTracker(), nil, // restic repository manager - sharedInformers.Ark().V1().PodVolumeBackups(), - sharedInformers.Ark().V1().BackupStorageLocations(), - sharedInformers.Ark().V1().VolumeSnapshotLocations(), + sharedInformers.Velero().V1().PodVolumeBackups(), + sharedInformers.Velero().V1().BackupStorageLocations(), + sharedInformers.Velero().V1().VolumeSnapshotLocations(), nil, // new plugin manager func ).(*backupDeletionController) @@ -84,7 +84,7 @@ func TestBackupDeletionControllerProcessQueueItem(t *testing.T) { for _, phase := range []v1.DeleteBackupRequestPhase{"", v1.DeleteBackupRequestPhaseNew, v1.DeleteBackupRequestPhaseInProgress} { t.Run(fmt.Sprintf("phase=%s", phase), func(t *testing.T) { req.Status.Phase = phase - sharedInformers.Ark().V1().DeleteBackupRequests().Informer().GetStore().Add(req) + sharedInformers.Velero().V1().DeleteBackupRequests().Informer().GetStore().Add(req) var errorToReturn error var actual *v1.DeleteBackupRequest @@ -113,7 +113,7 @@ func TestBackupDeletionControllerProcessQueueItem(t *testing.T) { type backupDeletionControllerTestData struct { client *fake.Clientset sharedInformers informers.SharedInformerFactory - blockStore *arktest.FakeBlockStore + blockStore *velerotest.FakeBlockStore backupStore *persistencemocks.BackupStore controller *backupDeletionController req *v1.DeleteBackupRequest @@ -123,7 +123,7 @@ func setupBackupDeletionControllerTest(objects ...runtime.Object) *backupDeletio var ( client = fake.NewSimpleClientset(objects...) sharedInformers = informers.NewSharedInformerFactory(client, 0) - blockStore = &arktest.FakeBlockStore{SnapshotsTaken: sets.NewString()} + blockStore = &velerotest.FakeBlockStore{SnapshotsTaken: sets.NewString()} pluginManager = &pluginmocks.Manager{} backupStore = &persistencemocks.BackupStore{} req = pkgbackup.NewDeleteBackupRequest("foo", "uid") @@ -135,17 +135,17 @@ func setupBackupDeletionControllerTest(objects ...runtime.Object) *backupDeletio blockStore: blockStore, backupStore: backupStore, controller: NewBackupDeletionController( - arktest.NewLogger(), - sharedInformers.Ark().V1().DeleteBackupRequests(), - client.ArkV1(), // deleteBackupRequestClient - client.ArkV1(), // backupClient - sharedInformers.Ark().V1().Restores(), - client.ArkV1(), // restoreClient + velerotest.NewLogger(), + sharedInformers.Velero().V1().DeleteBackupRequests(), + client.VeleroV1(), // deleteBackupRequestClient + client.VeleroV1(), // backupClient + sharedInformers.Velero().V1().Restores(), + client.VeleroV1(), // restoreClient NewBackupTracker(), nil, // restic repository manager - sharedInformers.Ark().V1().PodVolumeBackups(), - sharedInformers.Ark().V1().BackupStorageLocations(), - sharedInformers.Ark().V1().VolumeSnapshotLocations(), + sharedInformers.Velero().V1().PodVolumeBackups(), + sharedInformers.Velero().V1().BackupStorageLocations(), + sharedInformers.Velero().V1().VolumeSnapshotLocations(), func(logrus.FieldLogger) plugin.Manager { return pluginManager }, ).(*backupDeletionController), @@ -158,7 +158,7 @@ func setupBackupDeletionControllerTest(objects ...runtime.Object) *backupDeletio pluginManager.On("CleanupClients").Return(nil) - req.Namespace = "heptio-ark" + req.Namespace = "velero" req.Name = "foo-abcde" return data @@ -192,7 +192,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { // past checking for an in-progress backup. this makes validation easier. td.controller.backupTracker.Add(td.req.Namespace, td.req.Spec.BackupName) - require.NoError(t, td.sharedInformers.Ark().V1().DeleteBackupRequests().Informer().GetStore().Add(td.req)) + require.NoError(t, td.sharedInformers.Velero().V1().DeleteBackupRequests().Informer().GetStore().Add(td.req)) existing := &v1.DeleteBackupRequest{ ObjectMeta: metav1.ObjectMeta{ @@ -206,11 +206,11 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { BackupName: td.req.Spec.BackupName, }, } - require.NoError(t, td.sharedInformers.Ark().V1().DeleteBackupRequests().Informer().GetStore().Add(existing)) - _, err := td.client.ArkV1().DeleteBackupRequests(td.req.Namespace).Create(existing) + require.NoError(t, td.sharedInformers.Velero().V1().DeleteBackupRequests().Informer().GetStore().Add(existing)) + _, err := td.client.VeleroV1().DeleteBackupRequests(td.req.Namespace).Create(existing) require.NoError(t, err) - require.NoError(t, td.sharedInformers.Ark().V1().DeleteBackupRequests().Informer().GetStore().Add( + require.NoError(t, td.sharedInformers.Velero().V1().DeleteBackupRequests().Informer().GetStore().Add( &v1.DeleteBackupRequest{ ObjectMeta: metav1.ObjectMeta{ Namespace: td.req.Namespace, @@ -272,7 +272,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { }) t.Run("patching backup to Deleting fails", func(t *testing.T) { - backup := arktest.NewTestBackup().WithName("foo").WithSnapshot("pv-1", "snap-1").Backup + backup := velerotest.NewTestBackup().WithName("foo").WithSnapshot("pv-1", "snap-1").Backup td := setupBackupDeletionControllerTest(backup) td.client.PrependReactor("patch", "deletebackuprequests", func(action core.Action) (bool, runtime.Object, error) { @@ -324,7 +324,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { }) t.Run("pre-v0.10 backup with snapshots, no errors", func(t *testing.T) { - backup := arktest.NewTestBackup().WithName("foo").Backup + backup := velerotest.NewTestBackup().WithName("foo").Backup backup.UID = "uid" backup.Spec.StorageLocation = "primary" backup.Status.VolumeBackups = map[string]*v1.VolumeBackupInfo{ @@ -333,15 +333,15 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { }, } - restore1 := arktest.NewTestRestore("heptio-ark", "restore-1", v1.RestorePhaseCompleted).WithBackup("foo").Restore - restore2 := arktest.NewTestRestore("heptio-ark", "restore-2", v1.RestorePhaseCompleted).WithBackup("foo").Restore - restore3 := arktest.NewTestRestore("heptio-ark", "restore-3", v1.RestorePhaseCompleted).WithBackup("some-other-backup").Restore + restore1 := velerotest.NewTestRestore("velero", "restore-1", v1.RestorePhaseCompleted).WithBackup("foo").Restore + restore2 := velerotest.NewTestRestore("velero", "restore-2", v1.RestorePhaseCompleted).WithBackup("foo").Restore + restore3 := velerotest.NewTestRestore("velero", "restore-3", v1.RestorePhaseCompleted).WithBackup("some-other-backup").Restore td := setupBackupDeletionControllerTest(backup, restore1, restore2, restore3) - td.sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(restore1) - td.sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(restore2) - td.sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(restore3) + td.sharedInformers.Velero().V1().Restores().Informer().GetStore().Add(restore1) + td.sharedInformers.Velero().V1().Restores().Informer().GetStore().Add(restore2) + td.sharedInformers.Velero().V1().Restores().Informer().GetStore().Add(restore3) location := &v1.BackupStorageLocation{ ObjectMeta: metav1.ObjectMeta{ @@ -357,7 +357,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { }, }, } - require.NoError(t, td.sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(location)) + require.NoError(t, td.sharedInformers.Velero().V1().BackupStorageLocations().Informer().GetStore().Add(location)) snapshotLocation := &v1.VolumeSnapshotLocation{ ObjectMeta: metav1.ObjectMeta{ @@ -368,7 +368,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { Provider: "provider-1", }, } - require.NoError(t, td.sharedInformers.Ark().V1().VolumeSnapshotLocations().Informer().GetStore().Add(snapshotLocation)) + require.NoError(t, td.sharedInformers.Velero().V1().VolumeSnapshotLocations().Informer().GetStore().Add(snapshotLocation)) // Clear out req labels to make sure the controller adds them td.req.Labels = make(map[string]string) @@ -403,7 +403,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { v1.SchemeGroupVersion.WithResource("deletebackuprequests"), td.req.Namespace, td.req.Name, - []byte(`{"metadata":{"labels":{"ark.heptio.com/backup-name":"foo"}},"status":{"phase":"InProgress"}}`), + []byte(`{"metadata":{"labels":{"velero.io/backup-name":"foo"}},"status":{"phase":"InProgress"}}`), ), core.NewGetAction( v1.SchemeGroupVersion.WithResource("backups"), @@ -414,7 +414,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { v1.SchemeGroupVersion.WithResource("deletebackuprequests"), td.req.Namespace, td.req.Name, - []byte(`{"metadata":{"labels":{"ark.heptio.com/backup-uid":"uid"}}}`), + []byte(`{"metadata":{"labels":{"velero.io/backup-uid":"uid"}}}`), ), core.NewPatchAction( v1.SchemeGroupVersion.WithResource("backups"), @@ -450,26 +450,26 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { ), } - arktest.CompareActions(t, expectedActions, td.client.Actions()) + velerotest.CompareActions(t, expectedActions, td.client.Actions()) // Make sure snapshot was deleted assert.Equal(t, 0, td.blockStore.SnapshotsTaken.Len()) }) t.Run("full delete, no errors", func(t *testing.T) { - backup := arktest.NewTestBackup().WithName("foo").Backup + backup := velerotest.NewTestBackup().WithName("foo").Backup backup.UID = "uid" backup.Spec.StorageLocation = "primary" - restore1 := arktest.NewTestRestore("heptio-ark", "restore-1", v1.RestorePhaseCompleted).WithBackup("foo").Restore - restore2 := arktest.NewTestRestore("heptio-ark", "restore-2", v1.RestorePhaseCompleted).WithBackup("foo").Restore - restore3 := arktest.NewTestRestore("heptio-ark", "restore-3", v1.RestorePhaseCompleted).WithBackup("some-other-backup").Restore + restore1 := velerotest.NewTestRestore("velero", "restore-1", v1.RestorePhaseCompleted).WithBackup("foo").Restore + restore2 := velerotest.NewTestRestore("velero", "restore-2", v1.RestorePhaseCompleted).WithBackup("foo").Restore + restore3 := velerotest.NewTestRestore("velero", "restore-3", v1.RestorePhaseCompleted).WithBackup("some-other-backup").Restore td := setupBackupDeletionControllerTest(backup, restore1, restore2, restore3) - td.sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(restore1) - td.sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(restore2) - td.sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(restore3) + td.sharedInformers.Velero().V1().Restores().Informer().GetStore().Add(restore1) + td.sharedInformers.Velero().V1().Restores().Informer().GetStore().Add(restore2) + td.sharedInformers.Velero().V1().Restores().Informer().GetStore().Add(restore3) location := &v1.BackupStorageLocation{ ObjectMeta: metav1.ObjectMeta{ @@ -485,7 +485,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { }, }, } - require.NoError(t, td.sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(location)) + require.NoError(t, td.sharedInformers.Velero().V1().BackupStorageLocations().Informer().GetStore().Add(location)) snapshotLocation := &v1.VolumeSnapshotLocation{ ObjectMeta: metav1.ObjectMeta{ @@ -496,7 +496,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { Provider: "provider-1", }, } - require.NoError(t, td.sharedInformers.Ark().V1().VolumeSnapshotLocations().Informer().GetStore().Add(snapshotLocation)) + require.NoError(t, td.sharedInformers.Velero().V1().VolumeSnapshotLocations().Informer().GetStore().Add(snapshotLocation)) // Clear out req labels to make sure the controller adds them td.req.Labels = make(map[string]string) @@ -543,7 +543,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { v1.SchemeGroupVersion.WithResource("deletebackuprequests"), td.req.Namespace, td.req.Name, - []byte(`{"metadata":{"labels":{"ark.heptio.com/backup-name":"foo"}},"status":{"phase":"InProgress"}}`), + []byte(`{"metadata":{"labels":{"velero.io/backup-name":"foo"}},"status":{"phase":"InProgress"}}`), ), core.NewGetAction( v1.SchemeGroupVersion.WithResource("backups"), @@ -554,7 +554,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { v1.SchemeGroupVersion.WithResource("deletebackuprequests"), td.req.Namespace, td.req.Name, - []byte(`{"metadata":{"labels":{"ark.heptio.com/backup-uid":"uid"}}}`), + []byte(`{"metadata":{"labels":{"velero.io/backup-uid":"uid"}}}`), ), core.NewPatchAction( v1.SchemeGroupVersion.WithResource("backups"), @@ -590,7 +590,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) { ), } - arktest.CompareActions(t, expectedActions, td.client.Actions()) + velerotest.CompareActions(t, expectedActions, td.client.Actions()) // Make sure snapshot was deleted assert.Equal(t, 0, td.blockStore.SnapshotsTaken.Len()) @@ -711,17 +711,17 @@ func TestBackupDeletionControllerDeleteExpiredRequests(t *testing.T) { sharedInformers := informers.NewSharedInformerFactory(client, 0) controller := NewBackupDeletionController( - arktest.NewLogger(), - sharedInformers.Ark().V1().DeleteBackupRequests(), - client.ArkV1(), // deleteBackupRequestClient - client.ArkV1(), // backupClient - sharedInformers.Ark().V1().Restores(), - client.ArkV1(), // restoreClient + velerotest.NewLogger(), + sharedInformers.Velero().V1().DeleteBackupRequests(), + client.VeleroV1(), // deleteBackupRequestClient + client.VeleroV1(), // backupClient + sharedInformers.Velero().V1().Restores(), + client.VeleroV1(), // restoreClient NewBackupTracker(), nil, - sharedInformers.Ark().V1().PodVolumeBackups(), - sharedInformers.Ark().V1().BackupStorageLocations(), - sharedInformers.Ark().V1().VolumeSnapshotLocations(), + sharedInformers.Velero().V1().PodVolumeBackups(), + sharedInformers.Velero().V1().BackupStorageLocations(), + sharedInformers.Velero().V1().VolumeSnapshotLocations(), nil, // new plugin manager func ).(*backupDeletionController) @@ -730,7 +730,7 @@ func TestBackupDeletionControllerDeleteExpiredRequests(t *testing.T) { controller.clock = fakeClock for i := range test.requests { - sharedInformers.Ark().V1().DeleteBackupRequests().Informer().GetStore().Add(test.requests[i]) + sharedInformers.Velero().V1().DeleteBackupRequests().Informer().GetStore().Add(test.requests[i]) } controller.deleteExpiredRequests() @@ -740,7 +740,7 @@ func TestBackupDeletionControllerDeleteExpiredRequests(t *testing.T) { expectedActions = append(expectedActions, core.NewDeleteAction(v1.SchemeGroupVersion.WithResource("deletebackuprequests"), "ns", name)) } - arktest.CompareActions(t, expectedActions, client.Actions()) + velerotest.CompareActions(t, expectedActions, client.Actions()) }) } } diff --git a/pkg/controller/backup_sync_controller.go b/pkg/controller/backup_sync_controller.go index 26042b9463..6769fec12b 100644 --- a/pkg/controller/backup_sync_controller.go +++ b/pkg/controller/backup_sync_controller.go @@ -29,31 +29,31 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/cache" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" - listers "github.com/heptio/ark/pkg/generated/listers/ark/v1" - "github.com/heptio/ark/pkg/persistence" - "github.com/heptio/ark/pkg/plugin" - "github.com/heptio/ark/pkg/util/stringslice" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions/velero/v1" + listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" + "github.com/heptio/velero/pkg/persistence" + "github.com/heptio/velero/pkg/plugin" + "github.com/heptio/velero/pkg/util/stringslice" ) type backupSyncController struct { *genericController - backupClient arkv1client.BackupsGetter - backupLocationClient arkv1client.BackupStorageLocationsGetter + backupClient velerov1client.BackupsGetter + backupLocationClient velerov1client.BackupStorageLocationsGetter backupLister listers.BackupLister backupStorageLocationLister listers.BackupStorageLocationLister namespace string defaultBackupLocation string newPluginManager func(logrus.FieldLogger) plugin.Manager - newBackupStore func(*arkv1api.BackupStorageLocation, persistence.ObjectStoreGetter, logrus.FieldLogger) (persistence.BackupStore, error) + newBackupStore func(*velerov1api.BackupStorageLocation, persistence.ObjectStoreGetter, logrus.FieldLogger) (persistence.BackupStore, error) } func NewBackupSyncController( - backupClient arkv1client.BackupsGetter, - backupLocationClient arkv1client.BackupStorageLocationsGetter, + backupClient velerov1client.BackupsGetter, + backupLocationClient velerov1client.BackupStorageLocationsGetter, backupInformer informers.BackupInformer, backupStorageLocationInformer informers.BackupStorageLocationInformer, syncPeriod time.Duration, @@ -92,9 +92,10 @@ func NewBackupSyncController( return c } +// TODO(1.0): remove this const gcFinalizer = "gc.ark.heptio.com" -func shouldSync(location *arkv1api.BackupStorageLocation, now time.Time, backupStore persistence.BackupStore, log logrus.FieldLogger) (bool, string) { +func shouldSync(location *velerov1api.BackupStorageLocation, now time.Time, backupStore persistence.BackupStore, log logrus.FieldLogger) (bool, string) { log = log.WithFields(map[string]interface{}{ "lastSyncedRevision": location.Status.LastSyncedRevision, "lastSyncedTime": location.Status.LastSyncedTime.Time.Format(time.RFC1123Z), @@ -123,8 +124,8 @@ func shouldSync(location *arkv1api.BackupStorageLocation, now time.Time, backupS // orderedBackupLocations returns a new slice with the default backup location first (if it exists), // followed by the rest of the locations in no particular order. -func orderedBackupLocations(locations []*arkv1api.BackupStorageLocation, defaultLocationName string) []*arkv1api.BackupStorageLocation { - var result []*arkv1api.BackupStorageLocation +func orderedBackupLocations(locations []*velerov1api.BackupStorageLocation, defaultLocationName string) []*velerov1api.BackupStorageLocation { + var result []*velerov1api.BackupStorageLocation for i := range locations { if locations[i].Name == defaultLocationName { @@ -223,7 +224,7 @@ func (c *backupSyncController) run() { if backup.Labels == nil { backup.Labels = make(map[string]string) } - backup.Labels[arkv1api.StorageLocationLabel] = backup.Spec.StorageLocation + backup.Labels[velerov1api.StorageLocationLabel] = backup.Spec.StorageLocation _, err = c.backupClient.Backups(backup.Namespace).Create(backup) switch { @@ -265,7 +266,7 @@ func (c *backupSyncController) run() { } } -func patchStorageLocation(backup *arkv1api.Backup, client arkv1client.BackupInterface, location string) error { +func patchStorageLocation(backup *velerov1api.Backup, client velerov1client.BackupInterface, location string) error { patch := map[string]interface{}{ "spec": map[string]interface{}{ "storageLocation": location, @@ -288,7 +289,7 @@ func patchStorageLocation(backup *arkv1api.Backup, client arkv1client.BackupInte // and a phase of Completed, but no corresponding backup in object storage. func (c *backupSyncController) deleteOrphanedBackups(locationName string, cloudBackupNames sets.String, log logrus.FieldLogger) { locationSelector := labels.Set(map[string]string{ - arkv1api.StorageLocationLabel: locationName, + velerov1api.StorageLocationLabel: locationName, }).AsSelector() backups, err := c.backupLister.Backups(c.namespace).List(locationSelector) @@ -302,7 +303,7 @@ func (c *backupSyncController) deleteOrphanedBackups(locationName string, cloudB for _, backup := range backups { log = log.WithField("backup", backup.Name) - if backup.Status.Phase != arkv1api.BackupPhaseCompleted || cloudBackupNames.Has(backup.Name) { + if backup.Status.Phase != velerov1api.BackupPhaseCompleted || cloudBackupNames.Has(backup.Name) { continue } diff --git a/pkg/controller/backup_sync_controller_test.go b/pkg/controller/backup_sync_controller_test.go index f6d8688360..ba82cd0ade 100644 --- a/pkg/controller/backup_sync_controller_test.go +++ b/pkg/controller/backup_sync_controller_test.go @@ -30,28 +30,28 @@ import ( "k8s.io/apimachinery/pkg/util/sets" core "k8s.io/client-go/testing" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/generated/clientset/versioned/fake" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions" - "github.com/heptio/ark/pkg/persistence" - persistencemocks "github.com/heptio/ark/pkg/persistence/mocks" - "github.com/heptio/ark/pkg/plugin" - pluginmocks "github.com/heptio/ark/pkg/plugin/mocks" - "github.com/heptio/ark/pkg/util/stringslice" - arktest "github.com/heptio/ark/pkg/util/test" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions" + "github.com/heptio/velero/pkg/persistence" + persistencemocks "github.com/heptio/velero/pkg/persistence/mocks" + "github.com/heptio/velero/pkg/plugin" + pluginmocks "github.com/heptio/velero/pkg/plugin/mocks" + "github.com/heptio/velero/pkg/util/stringslice" + velerotest "github.com/heptio/velero/pkg/util/test" ) -func defaultLocationsList(namespace string) []*arkv1api.BackupStorageLocation { - return []*arkv1api.BackupStorageLocation{ +func defaultLocationsList(namespace string) []*velerov1api.BackupStorageLocation { + return []*velerov1api.BackupStorageLocation{ { ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, Name: "location-1", }, - Spec: arkv1api.BackupStorageLocationSpec{ + Spec: velerov1api.BackupStorageLocationSpec{ Provider: "objStoreProvider", - StorageType: arkv1api.StorageType{ - ObjectStorage: &arkv1api.ObjectStorageLocation{ + StorageType: velerov1api.StorageType{ + ObjectStorage: &velerov1api.ObjectStorageLocation{ Bucket: "bucket-1", }, }, @@ -62,10 +62,10 @@ func defaultLocationsList(namespace string) []*arkv1api.BackupStorageLocation { Namespace: namespace, Name: "location-2", }, - Spec: arkv1api.BackupStorageLocationSpec{ + Spec: velerov1api.BackupStorageLocationSpec{ Provider: "objStoreProvider", - StorageType: arkv1api.StorageType{ - ObjectStorage: &arkv1api.ObjectStorageLocation{ + StorageType: velerov1api.StorageType{ + ObjectStorage: &velerov1api.ObjectStorageLocation{ Bucket: "bucket-2", }, }, @@ -78,9 +78,9 @@ func TestBackupSyncControllerRun(t *testing.T) { tests := []struct { name string namespace string - locations []*arkv1api.BackupStorageLocation - cloudBackups map[string][]*arkv1api.Backup - existingBackups []*arkv1api.Backup + locations []*velerov1api.BackupStorageLocation + cloudBackups map[string][]*velerov1api.Backup + existingBackups []*velerov1api.Backup }{ { name: "no cloud backups", @@ -89,13 +89,13 @@ func TestBackupSyncControllerRun(t *testing.T) { name: "normal case", namespace: "ns-1", locations: defaultLocationsList("ns-1"), - cloudBackups: map[string][]*arkv1api.Backup{ + cloudBackups: map[string][]*velerov1api.Backup{ "bucket-1": { - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup, - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup, + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup, + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup, }, "bucket-2": { - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").Backup, + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").Backup, }, }, }, @@ -103,24 +103,24 @@ func TestBackupSyncControllerRun(t *testing.T) { name: "gcFinalizer (only) gets removed on sync", namespace: "ns-1", locations: defaultLocationsList("ns-1"), - cloudBackups: map[string][]*arkv1api.Backup{ + cloudBackups: map[string][]*velerov1api.Backup{ "bucket-1": { - arktest.NewTestBackup().WithNamespace("ns-1").WithFinalizers("a-finalizer", gcFinalizer, "some-other-finalizer").Backup, + velerotest.NewTestBackup().WithNamespace("ns-1").WithFinalizers("a-finalizer", gcFinalizer, "some-other-finalizer").Backup, }, }, }, { - name: "all synced backups get created in Ark server's namespace", - namespace: "heptio-ark", - locations: defaultLocationsList("heptio-ark"), - cloudBackups: map[string][]*arkv1api.Backup{ + name: "all synced backups get created in Velero server's namespace", + namespace: "velero", + locations: defaultLocationsList("velero"), + cloudBackups: map[string][]*velerov1api.Backup{ "bucket-1": { - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup, - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup, + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup, + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup, }, "bucket-2": { - arktest.NewTestBackup().WithNamespace("ns-2").WithName("backup-3").Backup, - arktest.NewTestBackup().WithNamespace("heptio-ark").WithName("backup-4").Backup, + velerotest.NewTestBackup().WithNamespace("ns-2").WithName("backup-3").Backup, + velerotest.NewTestBackup().WithNamespace("velero").WithName("backup-4").Backup, }, }, }, @@ -128,49 +128,49 @@ func TestBackupSyncControllerRun(t *testing.T) { name: "new backups get synced when some cloud backups already exist in the cluster", namespace: "ns-1", locations: defaultLocationsList("ns-1"), - cloudBackups: map[string][]*arkv1api.Backup{ + cloudBackups: map[string][]*velerov1api.Backup{ "bucket-1": { - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup, - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup, + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup, + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup, }, "bucket-2": { - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").Backup, - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-4").Backup, + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").Backup, + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-4").Backup, }, }, - existingBackups: []*arkv1api.Backup{ + existingBackups: []*velerov1api.Backup{ // add a label to each existing backup so we can differentiate it from the cloud // backup during verification - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel("i-exist", "true").WithStorageLocation("location-1").Backup, - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithLabel("i-exist", "true").WithStorageLocation("location-2").Backup, + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel("i-exist", "true").WithStorageLocation("location-1").Backup, + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithLabel("i-exist", "true").WithStorageLocation("location-2").Backup, }, }, { name: "existing backups without a StorageLocation get it filled in", namespace: "ns-1", locations: defaultLocationsList("ns-1"), - cloudBackups: map[string][]*arkv1api.Backup{ + cloudBackups: map[string][]*velerov1api.Backup{ "bucket-1": { - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup, + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup, }, }, - existingBackups: []*arkv1api.Backup{ + existingBackups: []*velerov1api.Backup{ // add a label to each existing backup so we can differentiate it from the cloud // backup during verification - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel("i-exist", "true").Backup, + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel("i-exist", "true").Backup, }, }, { name: "backup storage location names and labels get updated", namespace: "ns-1", locations: defaultLocationsList("ns-1"), - cloudBackups: map[string][]*arkv1api.Backup{ + cloudBackups: map[string][]*velerov1api.Backup{ "bucket-1": { - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithStorageLocation("foo").WithLabel(arkv1api.StorageLocationLabel, "foo").Backup, - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup, + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithStorageLocation("foo").WithLabel(velerov1api.StorageLocationLabel, "foo").Backup, + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup, }, "bucket-2": { - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithStorageLocation("bar").WithLabel(arkv1api.StorageLocationLabel, "bar").Backup, + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithStorageLocation("bar").WithLabel(velerov1api.StorageLocationLabel, "bar").Backup, }, }, }, @@ -186,18 +186,18 @@ func TestBackupSyncControllerRun(t *testing.T) { ) c := NewBackupSyncController( - client.ArkV1(), - client.ArkV1(), - sharedInformers.Ark().V1().Backups(), - sharedInformers.Ark().V1().BackupStorageLocations(), + client.VeleroV1(), + client.VeleroV1(), + sharedInformers.Velero().V1().Backups(), + sharedInformers.Velero().V1().BackupStorageLocations(), time.Duration(0), test.namespace, "", func(logrus.FieldLogger) plugin.Manager { return pluginManager }, - arktest.NewLogger(), + velerotest.NewLogger(), ).(*backupSyncController) - c.newBackupStore = func(loc *arkv1api.BackupStorageLocation, _ persistence.ObjectStoreGetter, _ logrus.FieldLogger) (persistence.BackupStore, error) { + c.newBackupStore = func(loc *velerov1api.BackupStorageLocation, _ persistence.ObjectStoreGetter, _ logrus.FieldLogger) (persistence.BackupStore, error) { // this gets populated just below, prior to exercising the method under test return backupStores[loc.Name], nil } @@ -205,7 +205,7 @@ func TestBackupSyncControllerRun(t *testing.T) { pluginManager.On("CleanupClients").Return(nil) for _, location := range test.locations { - require.NoError(t, sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(location)) + require.NoError(t, sharedInformers.Velero().V1().BackupStorageLocations().Informer().GetStore().Add(location)) backupStores[location.Name] = &persistencemocks.BackupStore{} } @@ -224,9 +224,9 @@ func TestBackupSyncControllerRun(t *testing.T) { } for _, existingBackup := range test.existingBackups { - require.NoError(t, sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(existingBackup)) + require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(existingBackup)) - _, err := client.ArkV1().Backups(test.namespace).Create(existingBackup) + _, err := client.VeleroV1().Backups(test.namespace).Create(existingBackup) require.NoError(t, err) } client.ClearActions() @@ -236,7 +236,7 @@ func TestBackupSyncControllerRun(t *testing.T) { for bucket, backups := range test.cloudBackups { // figure out which location this bucket is for; we need this for verification // purposes later - var location *arkv1api.BackupStorageLocation + var location *velerov1api.BackupStorageLocation for _, loc := range test.locations { if loc.Spec.ObjectStorage.Bucket == bucket { location = loc @@ -246,11 +246,11 @@ func TestBackupSyncControllerRun(t *testing.T) { require.NotNil(t, location) for _, cloudBackup := range backups { - obj, err := client.ArkV1().Backups(test.namespace).Get(cloudBackup.Name, metav1.GetOptions{}) + obj, err := client.VeleroV1().Backups(test.namespace).Get(cloudBackup.Name, metav1.GetOptions{}) require.NoError(t, err) // did this cloud backup already exist in the cluster? - var existing *arkv1api.Backup + var existing *velerov1api.Backup for _, obj := range test.existingBackups { if obj.Name == cloudBackup.Name { existing = obj @@ -273,7 +273,7 @@ func TestBackupSyncControllerRun(t *testing.T) { // verify that the storage location field and label are set properly assert.Equal(t, location.Name, obj.Spec.StorageLocation) - assert.Equal(t, location.Name, obj.Labels[arkv1api.StorageLocationLabel]) + assert.Equal(t, location.Name, obj.Labels[velerov1api.StorageLocationLabel]) } } } @@ -285,7 +285,7 @@ func TestDeleteOrphanedBackups(t *testing.T) { tests := []struct { name string cloudBackups sets.String - k8sBackups []*arktest.TestBackup + k8sBackups []*velerotest.TestBackup namespace string expectedDeletes sets.String }{ @@ -293,10 +293,10 @@ func TestDeleteOrphanedBackups(t *testing.T) { name: "no overlapping backups", namespace: "ns-1", cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*arktest.TestBackup{ - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backupA").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted), - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backupB").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted), - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backupC").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted), + k8sBackups: []*velerotest.TestBackup{ + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backupA").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backupB").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backupC").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), }, expectedDeletes: sets.NewString("backupA", "backupB", "backupC"), }, @@ -304,10 +304,10 @@ func TestDeleteOrphanedBackups(t *testing.T) { name: "some overlapping backups", namespace: "ns-1", cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*arktest.TestBackup{ - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted), - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted), - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-C").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted), + k8sBackups: []*velerotest.TestBackup{ + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-C").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), }, expectedDeletes: sets.NewString("backup-C"), }, @@ -315,10 +315,10 @@ func TestDeleteOrphanedBackups(t *testing.T) { name: "all overlapping backups", namespace: "ns-1", cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*arktest.TestBackup{ - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted), - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted), - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted), + k8sBackups: []*velerotest.TestBackup{ + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), }, expectedDeletes: sets.NewString(), }, @@ -326,13 +326,13 @@ func TestDeleteOrphanedBackups(t *testing.T) { name: "no overlapping backups but including backups that are not complete", namespace: "ns-1", cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*arktest.TestBackup{ - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backupA").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted), - arktest.NewTestBackup().WithNamespace("ns-1").WithName("Deleting").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseDeleting), - arktest.NewTestBackup().WithNamespace("ns-1").WithName("Failed").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseFailed), - arktest.NewTestBackup().WithNamespace("ns-1").WithName("FailedValidation").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseFailedValidation), - arktest.NewTestBackup().WithNamespace("ns-1").WithName("InProgress").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseInProgress), - arktest.NewTestBackup().WithNamespace("ns-1").WithName("New").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseNew), + k8sBackups: []*velerotest.TestBackup{ + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backupA").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("Deleting").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseDeleting), + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("Failed").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseFailed), + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("FailedValidation").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseFailedValidation), + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("InProgress").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseInProgress), + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("New").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseNew), }, expectedDeletes: sets.NewString("backupA"), }, @@ -340,10 +340,10 @@ func TestDeleteOrphanedBackups(t *testing.T) { name: "all overlapping backups and all backups that are not complete", namespace: "ns-1", cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*arktest.TestBackup{ - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseFailed), - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseFailedValidation), - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseInProgress), + k8sBackups: []*velerotest.TestBackup{ + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseFailed), + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseFailedValidation), + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-3").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseInProgress), }, expectedDeletes: sets.NewString(), }, @@ -351,13 +351,13 @@ func TestDeleteOrphanedBackups(t *testing.T) { name: "no completed backups in other locations are deleted", namespace: "ns-1", cloudBackups: sets.NewString("backup-1", "backup-2", "backup-3"), - k8sBackups: []*arktest.TestBackup{ - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted), - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted), - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-C").WithLabel(arkv1api.StorageLocationLabel, "default").WithPhase(arkv1api.BackupPhaseCompleted), - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-4").WithLabel(arkv1api.StorageLocationLabel, "alternate").WithPhase(arkv1api.BackupPhaseCompleted), - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-5").WithLabel(arkv1api.StorageLocationLabel, "alternate").WithPhase(arkv1api.BackupPhaseCompleted), - arktest.NewTestBackup().WithNamespace("ns-1").WithName("backup-6").WithLabel(arkv1api.StorageLocationLabel, "alternate").WithPhase(arkv1api.BackupPhaseCompleted), + k8sBackups: []*velerotest.TestBackup{ + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-1").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-2").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-C").WithLabel(velerov1api.StorageLocationLabel, "default").WithPhase(velerov1api.BackupPhaseCompleted), + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-4").WithLabel(velerov1api.StorageLocationLabel, "alternate").WithPhase(velerov1api.BackupPhaseCompleted), + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-5").WithLabel(velerov1api.StorageLocationLabel, "alternate").WithPhase(velerov1api.BackupPhaseCompleted), + velerotest.NewTestBackup().WithNamespace("ns-1").WithName("backup-6").WithLabel(velerov1api.StorageLocationLabel, "alternate").WithPhase(velerov1api.BackupPhaseCompleted), }, expectedDeletes: sets.NewString("backup-C"), }, @@ -371,31 +371,31 @@ func TestDeleteOrphanedBackups(t *testing.T) { ) c := NewBackupSyncController( - client.ArkV1(), - client.ArkV1(), - sharedInformers.Ark().V1().Backups(), - sharedInformers.Ark().V1().BackupStorageLocations(), + client.VeleroV1(), + client.VeleroV1(), + sharedInformers.Velero().V1().Backups(), + sharedInformers.Velero().V1().BackupStorageLocations(), time.Duration(0), test.namespace, "", nil, // new plugin manager func - arktest.NewLogger(), + velerotest.NewLogger(), ).(*backupSyncController) expectedDeleteActions := make([]core.Action, 0) for _, backup := range test.k8sBackups { // add test backup to informer - require.NoError(t, sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(backup.Backup), "Error adding backup to informer") + require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(backup.Backup), "Error adding backup to informer") // add test backup to client - _, err := client.Ark().Backups(test.namespace).Create(backup.Backup) + _, err := client.VeleroV1().Backups(test.namespace).Create(backup.Backup) require.NoError(t, err, "Error adding backup to clientset") // if we expect this backup to be deleted, set up the expected DeleteAction if test.expectedDeletes.Has(backup.Name) { actionDelete := core.NewDeleteAction( - arkv1api.SchemeGroupVersion.WithResource("backups"), + velerov1api.SchemeGroupVersion.WithResource("backups"), test.namespace, backup.Name, ) @@ -403,7 +403,7 @@ func TestDeleteOrphanedBackups(t *testing.T) { } } - c.deleteOrphanedBackups("default", test.cloudBackups, arktest.NewLogger()) + c.deleteOrphanedBackups("default", test.cloudBackups, velerotest.NewLogger()) numBackups, err := numBackups(t, client, c.namespace) assert.NoError(t, err) @@ -411,7 +411,7 @@ func TestDeleteOrphanedBackups(t *testing.T) { expected := len(test.k8sBackups) - len(test.expectedDeletes) assert.Equal(t, expected, numBackups) - arktest.CompareActions(t, expectedDeleteActions, getDeleteActions(client.Actions())) + velerotest.CompareActions(t, expectedDeleteActions, getDeleteActions(client.Actions())) }) } } @@ -421,7 +421,7 @@ func TestShouldSync(t *testing.T) { tests := []struct { name string - location *arkv1api.BackupStorageLocation + location *velerov1api.BackupStorageLocation backupStoreRevision string now time.Time expectSync bool @@ -429,7 +429,7 @@ func TestShouldSync(t *testing.T) { }{ { name: "BSL with no last-synced metadata should sync", - location: &arkv1api.BackupStorageLocation{}, + location: &velerov1api.BackupStorageLocation{}, backupStoreRevision: "foo", now: c.Now(), expectSync: true, @@ -437,8 +437,8 @@ func TestShouldSync(t *testing.T) { }, { name: "BSL with unchanged revision last synced more than an hour ago should sync", - location: &arkv1api.BackupStorageLocation{ - Status: arkv1api.BackupStorageLocationStatus{ + location: &velerov1api.BackupStorageLocation{ + Status: velerov1api.BackupStorageLocationStatus{ LastSyncedRevision: types.UID("foo"), LastSyncedTime: metav1.Time{Time: c.Now().Add(-61 * time.Minute)}, }, @@ -450,8 +450,8 @@ func TestShouldSync(t *testing.T) { }, { name: "BSL with unchanged revision last synced less than an hour ago should not sync", - location: &arkv1api.BackupStorageLocation{ - Status: arkv1api.BackupStorageLocationStatus{ + location: &velerov1api.BackupStorageLocation{ + Status: velerov1api.BackupStorageLocationStatus{ LastSyncedRevision: types.UID("foo"), LastSyncedTime: metav1.Time{Time: c.Now().Add(-59 * time.Minute)}, }, @@ -462,8 +462,8 @@ func TestShouldSync(t *testing.T) { }, { name: "BSL with different revision than backup store last synced less than an hour ago should sync", - location: &arkv1api.BackupStorageLocation{ - Status: arkv1api.BackupStorageLocationStatus{ + location: &velerov1api.BackupStorageLocation{ + Status: velerov1api.BackupStorageLocationStatus{ LastSyncedRevision: types.UID("foo"), LastSyncedTime: metav1.Time{Time: c.Now().Add(-time.Minute)}, }, @@ -475,8 +475,8 @@ func TestShouldSync(t *testing.T) { }, { name: "BSL with different revision than backup store last synced more than an hour ago should sync", - location: &arkv1api.BackupStorageLocation{ - Status: arkv1api.BackupStorageLocationStatus{ + location: &velerov1api.BackupStorageLocation{ + Status: velerov1api.BackupStorageLocationStatus{ LastSyncedRevision: types.UID("foo"), LastSyncedTime: metav1.Time{Time: c.Now().Add(-61 * time.Minute)}, }, @@ -497,7 +497,7 @@ func TestShouldSync(t *testing.T) { backupStore.On("GetRevision").Return("", errors.New("object revision not found")) } - shouldSync, rev := shouldSync(test.location, test.now, backupStore, arktest.NewLogger()) + shouldSync, rev := shouldSync(test.location, test.now, backupStore, velerotest.NewLogger()) assert.Equal(t, test.expectSync, shouldSync) assert.Equal(t, test.expectedRevision, rev) }) @@ -517,7 +517,7 @@ func getDeleteActions(actions []core.Action) []core.Action { func numBackups(t *testing.T, c *fake.Clientset, ns string) (int, error) { t.Helper() - existingK8SBackups, err := c.ArkV1().Backups(ns).List(metav1.ListOptions{}) + existingK8SBackups, err := c.VeleroV1().Backups(ns).List(metav1.ListOptions{}) if err != nil { return 0, err } diff --git a/pkg/controller/download_request_controller.go b/pkg/controller/download_request_controller.go index 9c5ea495cd..698055da5d 100644 --- a/pkg/controller/download_request_controller.go +++ b/pkg/controller/download_request_controller.go @@ -30,19 +30,19 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "k8s.io/client-go/tools/cache" - "github.com/heptio/ark/pkg/apis/ark/v1" - arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" - listers "github.com/heptio/ark/pkg/generated/listers/ark/v1" - "github.com/heptio/ark/pkg/persistence" - "github.com/heptio/ark/pkg/plugin" - "github.com/heptio/ark/pkg/util/kube" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions/velero/v1" + listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" + "github.com/heptio/velero/pkg/persistence" + "github.com/heptio/velero/pkg/plugin" + "github.com/heptio/velero/pkg/util/kube" ) type downloadRequestController struct { *genericController - downloadRequestClient arkv1client.DownloadRequestsGetter + downloadRequestClient velerov1client.DownloadRequestsGetter downloadRequestLister listers.DownloadRequestLister restoreLister listers.RestoreLister clock clock.Clock @@ -54,7 +54,7 @@ type downloadRequestController struct { // NewDownloadRequestController creates a new DownloadRequestController. func NewDownloadRequestController( - downloadRequestClient arkv1client.DownloadRequestsGetter, + downloadRequestClient velerov1client.DownloadRequestsGetter, downloadRequestInformer informers.DownloadRequestInformer, restoreInformer informers.RestoreInformer, backupLocationInformer informers.BackupStorageLocationInformer, @@ -223,7 +223,7 @@ func (c *downloadRequestController) resync() { } } -func patchDownloadRequest(original, updated *v1.DownloadRequest, client arkv1client.DownloadRequestsGetter) (*v1.DownloadRequest, error) { +func patchDownloadRequest(original, updated *v1.DownloadRequest, client velerov1client.DownloadRequestsGetter) (*v1.DownloadRequest, error) { origBytes, err := json.Marshal(original) if err != nil { return nil, errors.Wrap(err, "error marshalling original download request") diff --git a/pkg/controller/download_request_controller_test.go b/pkg/controller/download_request_controller_test.go index 7890e256c1..54a2296562 100644 --- a/pkg/controller/download_request_controller_test.go +++ b/pkg/controller/download_request_controller_test.go @@ -27,15 +27,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/clock" - "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/generated/clientset/versioned/fake" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions" - "github.com/heptio/ark/pkg/persistence" - persistencemocks "github.com/heptio/ark/pkg/persistence/mocks" - "github.com/heptio/ark/pkg/plugin" - pluginmocks "github.com/heptio/ark/pkg/plugin/mocks" - kubeutil "github.com/heptio/ark/pkg/util/kube" - arktest "github.com/heptio/ark/pkg/util/test" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions" + "github.com/heptio/velero/pkg/persistence" + persistencemocks "github.com/heptio/velero/pkg/persistence/mocks" + "github.com/heptio/velero/pkg/plugin" + pluginmocks "github.com/heptio/velero/pkg/plugin/mocks" + kubeutil "github.com/heptio/velero/pkg/util/kube" + velerotest "github.com/heptio/velero/pkg/util/test" ) type downloadRequestTestHarness struct { @@ -54,13 +54,13 @@ func newDownloadRequestTestHarness(t *testing.T) *downloadRequestTestHarness { pluginManager = new(pluginmocks.Manager) backupStore = new(persistencemocks.BackupStore) controller = NewDownloadRequestController( - client.ArkV1(), - informerFactory.Ark().V1().DownloadRequests(), - informerFactory.Ark().V1().Restores(), - informerFactory.Ark().V1().BackupStorageLocations(), - informerFactory.Ark().V1().Backups(), + client.VeleroV1(), + informerFactory.Velero().V1().DownloadRequests(), + informerFactory.Velero().V1().Restores(), + informerFactory.Velero().V1().BackupStorageLocations(), + informerFactory.Velero().V1().Backups(), func(logrus.FieldLogger) plugin.Manager { return pluginManager }, - arktest.NewLogger(), + velerotest.NewLogger(), ).(*downloadRequestController) ) @@ -145,94 +145,94 @@ func TestProcessDownloadRequest(t *testing.T) { { name: "backup contents request for nonexistent backup returns an error", downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupContents, "a-backup"), - backup: arktest.NewTestBackup().WithName("non-matching-backup").WithStorageLocation("a-location").Backup, + backup: velerotest.NewTestBackup().WithName("non-matching-backup").WithStorageLocation("a-location").Backup, backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), - expectedErr: "backup.ark.heptio.com \"a-backup\" not found", + expectedErr: "backup.velero.io \"a-backup\" not found", }, { name: "restore log request for nonexistent restore returns an error", downloadRequest: newDownloadRequest("", v1.DownloadTargetKindRestoreLog, "a-backup-20170912150214"), - restore: arktest.NewTestRestore(v1.DefaultNamespace, "non-matching-restore", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore, - backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + restore: velerotest.NewTestRestore(v1.DefaultNamespace, "non-matching-restore", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore, + backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), - expectedErr: "error getting Restore: restore.ark.heptio.com \"a-backup-20170912150214\" not found", + expectedErr: "error getting Restore: restore.velero.io \"a-backup-20170912150214\" not found", }, { name: "backup contents request for backup with nonexistent location returns an error", downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupContents, "a-backup"), - backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, backupLocation: newBackupLocation("non-matching-location", "a-provider", "a-bucket"), - expectedErr: "backupstoragelocation.ark.heptio.com \"a-location\" not found", + expectedErr: "backupstoragelocation.velero.io \"a-location\" not found", }, { name: "backup contents request with phase '' gets a url", downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupContents, "a-backup"), - backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), expectGetsURL: true, }, { name: "backup contents request with phase 'New' gets a url", downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindBackupContents, "a-backup"), - backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), expectGetsURL: true, }, { name: "backup log request with phase '' gets a url", downloadRequest: newDownloadRequest("", v1.DownloadTargetKindBackupLog, "a-backup"), - backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), expectGetsURL: true, }, { name: "backup log request with phase 'New' gets a url", downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindBackupLog, "a-backup"), - backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), expectGetsURL: true, }, { name: "restore log request with phase '' gets a url", downloadRequest: newDownloadRequest("", v1.DownloadTargetKindRestoreLog, "a-backup-20170912150214"), - restore: arktest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore, - backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + restore: velerotest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore, + backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), expectGetsURL: true, }, { name: "restore log request with phase 'New' gets a url", downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindRestoreLog, "a-backup-20170912150214"), - restore: arktest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore, - backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + restore: velerotest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore, + backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), expectGetsURL: true, }, { name: "restore results request with phase '' gets a url", downloadRequest: newDownloadRequest("", v1.DownloadTargetKindRestoreResults, "a-backup-20170912150214"), - restore: arktest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore, - backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + restore: velerotest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore, + backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), expectGetsURL: true, }, { name: "restore results request with phase 'New' gets a url", downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseNew, v1.DownloadTargetKindRestoreResults, "a-backup-20170912150214"), - restore: arktest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore, - backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + restore: velerotest.NewTestRestore(v1.DefaultNamespace, "a-backup-20170912150214", v1.RestorePhaseCompleted).WithBackup("a-backup").Restore, + backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, backupLocation: newBackupLocation("a-location", "a-provider", "a-bucket"), expectGetsURL: true, }, { name: "request with phase 'Processed' is not deleted if not expired", downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseProcessed, v1.DownloadTargetKindBackupLog, "a-backup-20170912150214"), - backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, }, { name: "request with phase 'Processed' is deleted if expired", downloadRequest: newDownloadRequest(v1.DownloadRequestPhaseProcessed, v1.DownloadTargetKindBackupLog, "a-backup-20170912150214"), - backup: arktest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, + backup: velerotest.NewTestBackup().WithName("a-backup").WithStorageLocation("a-location").Backup, expired: true, }, } @@ -254,22 +254,22 @@ func TestProcessDownloadRequest(t *testing.T) { } if tc.downloadRequest != nil { - require.NoError(t, harness.informerFactory.Ark().V1().DownloadRequests().Informer().GetStore().Add(tc.downloadRequest)) + require.NoError(t, harness.informerFactory.Velero().V1().DownloadRequests().Informer().GetStore().Add(tc.downloadRequest)) - _, err := harness.client.ArkV1().DownloadRequests(tc.downloadRequest.Namespace).Create(tc.downloadRequest) + _, err := harness.client.VeleroV1().DownloadRequests(tc.downloadRequest.Namespace).Create(tc.downloadRequest) require.NoError(t, err) } if tc.restore != nil { - require.NoError(t, harness.informerFactory.Ark().V1().Restores().Informer().GetStore().Add(tc.restore)) + require.NoError(t, harness.informerFactory.Velero().V1().Restores().Informer().GetStore().Add(tc.restore)) } if tc.backup != nil { - require.NoError(t, harness.informerFactory.Ark().V1().Backups().Informer().GetStore().Add(tc.backup)) + require.NoError(t, harness.informerFactory.Velero().V1().Backups().Informer().GetStore().Add(tc.backup)) } if tc.backupLocation != nil { - require.NoError(t, harness.informerFactory.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(tc.backupLocation)) + require.NoError(t, harness.informerFactory.Velero().V1().BackupStorageLocations().Informer().GetStore().Add(tc.backupLocation)) } if tc.expectGetsURL { @@ -292,16 +292,16 @@ func TestProcessDownloadRequest(t *testing.T) { } if tc.expectGetsURL { - output, err := harness.client.ArkV1().DownloadRequests(tc.downloadRequest.Namespace).Get(tc.downloadRequest.Name, metav1.GetOptions{}) + output, err := harness.client.VeleroV1().DownloadRequests(tc.downloadRequest.Namespace).Get(tc.downloadRequest.Name, metav1.GetOptions{}) require.NoError(t, err) assert.Equal(t, string(v1.DownloadRequestPhaseProcessed), string(output.Status.Phase)) assert.Equal(t, "a-url", output.Status.DownloadURL) - assert.True(t, arktest.TimesAreEqual(harness.controller.clock.Now().Add(signedURLTTL), output.Status.Expiration.Time), "expiration does not match") + assert.True(t, velerotest.TimesAreEqual(harness.controller.clock.Now().Add(signedURLTTL), output.Status.Expiration.Time), "expiration does not match") } if tc.downloadRequest != nil && tc.downloadRequest.Status.Phase == v1.DownloadRequestPhaseProcessed { - res, err := harness.client.ArkV1().DownloadRequests(tc.downloadRequest.Namespace).Get(tc.downloadRequest.Name, metav1.GetOptions{}) + res, err := harness.client.VeleroV1().DownloadRequests(tc.downloadRequest.Namespace).Get(tc.downloadRequest.Name, metav1.GetOptions{}) if tc.expired { assert.True(t, apierrors.IsNotFound(err)) diff --git a/pkg/controller/gc_controller.go b/pkg/controller/gc_controller.go index 823b587a3f..71854b306e 100644 --- a/pkg/controller/gc_controller.go +++ b/pkg/controller/gc_controller.go @@ -26,11 +26,11 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "k8s.io/client-go/tools/cache" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - pkgbackup "github.com/heptio/ark/pkg/backup" - arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" - listers "github.com/heptio/ark/pkg/generated/listers/ark/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + pkgbackup "github.com/heptio/velero/pkg/backup" + velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions/velero/v1" + listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" ) const ( @@ -43,7 +43,7 @@ type gcController struct { backupLister listers.BackupLister deleteBackupRequestLister listers.DeleteBackupRequestLister - deleteBackupRequestClient arkv1client.DeleteBackupRequestsGetter + deleteBackupRequestClient velerov1client.DeleteBackupRequestsGetter clock clock.Clock } @@ -53,7 +53,7 @@ func NewGCController( logger logrus.FieldLogger, backupInformer informers.BackupInformer, deleteBackupRequestInformer informers.DeleteBackupRequestInformer, - deleteBackupRequestClient arkv1client.DeleteBackupRequestsGetter, + deleteBackupRequestClient velerov1client.DeleteBackupRequestsGetter, ) Interface { c := &gcController{ genericController: newGenericController("gc-controller", logger), @@ -133,8 +133,8 @@ func (c *gcController) processQueueItem(key string) error { log.Info("Backup has expired") selector := labels.SelectorFromSet(labels.Set(map[string]string{ - arkv1api.BackupNameLabel: backup.Name, - arkv1api.BackupUIDLabel: string(backup.UID), + velerov1api.BackupNameLabel: backup.Name, + velerov1api.BackupUIDLabel: string(backup.UID), })) dbrs, err := c.deleteBackupRequestLister.DeleteBackupRequests(ns).List(selector) @@ -146,7 +146,7 @@ func (c *gcController) processQueueItem(key string) error { // another one for _, dbr := range dbrs { switch dbr.Status.Phase { - case "", arkv1api.DeleteBackupRequestPhaseNew, arkv1api.DeleteBackupRequestPhaseInProgress: + case "", velerov1api.DeleteBackupRequestPhaseNew, velerov1api.DeleteBackupRequestPhaseInProgress: log.Info("Backup already has a pending deletion request") return nil } diff --git a/pkg/controller/gc_controller_test.go b/pkg/controller/gc_controller_test.go index 8f90e29b6b..b43f2b5860 100644 --- a/pkg/controller/gc_controller_test.go +++ b/pkg/controller/gc_controller_test.go @@ -32,11 +32,11 @@ import ( "k8s.io/apimachinery/pkg/watch" core "k8s.io/client-go/testing" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/generated/clientset/versioned/fake" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions" - "github.com/heptio/ark/pkg/util/kube" - arktest "github.com/heptio/ark/pkg/util/test" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions" + "github.com/heptio/velero/pkg/util/kube" + velerotest "github.com/heptio/velero/pkg/util/test" ) func TestGCControllerEnqueueAllBackups(t *testing.T) { @@ -45,10 +45,10 @@ func TestGCControllerEnqueueAllBackups(t *testing.T) { sharedInformers = informers.NewSharedInformerFactory(client, 0) controller = NewGCController( - arktest.NewLogger(), - sharedInformers.Ark().V1().Backups(), - sharedInformers.Ark().V1().DeleteBackupRequests(), - client.ArkV1(), + velerotest.NewLogger(), + sharedInformers.Velero().V1().Backups(), + sharedInformers.Velero().V1().DeleteBackupRequests(), + client.VeleroV1(), ).(*gcController) ) @@ -65,8 +65,8 @@ func TestGCControllerEnqueueAllBackups(t *testing.T) { var expected []string for i := 0; i < 3; i++ { - backup := arktest.NewTestBackup().WithName(fmt.Sprintf("backup-%d", i)).Backup - sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(backup) + backup := velerotest.NewTestBackup().WithName(fmt.Sprintf("backup-%d", i)).Backup + sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(backup) expected = append(expected, kube.NamespaceAndName(backup)) } @@ -96,7 +96,7 @@ Loop: } func TestGCControllerHasUpdateFunc(t *testing.T) { - backup := arktest.NewTestBackup().WithName("backup").Backup + backup := velerotest.NewTestBackup().WithName("backup").Backup expected := kube.NamespaceAndName(backup) client := fake.NewSimpleClientset(backup) @@ -108,10 +108,10 @@ func TestGCControllerHasUpdateFunc(t *testing.T) { sharedInformers := informers.NewSharedInformerFactory(client, 0) controller := NewGCController( - arktest.NewLogger(), - sharedInformers.Ark().V1().Backups(), - sharedInformers.Ark().V1().DeleteBackupRequests(), - client.ArkV1(), + velerotest.NewLogger(), + sharedInformers.Velero().V1().Backups(), + sharedInformers.Velero().V1().DeleteBackupRequests(), + client.VeleroV1(), ).(*gcController) keys := make(chan string) @@ -163,21 +163,21 @@ func TestGCControllerProcessQueueItem(t *testing.T) { }, { name: "unexpired backup is not deleted", - backup: arktest.NewTestBackup().WithName("backup-1"). + backup: velerotest.NewTestBackup().WithName("backup-1"). WithExpiration(fakeClock.Now().Add(1 * time.Minute)). Backup, expectDeletion: false, }, { name: "expired backup with no pending deletion requests is deleted", - backup: arktest.NewTestBackup().WithName("backup-1"). + backup: velerotest.NewTestBackup().WithName("backup-1"). WithExpiration(fakeClock.Now().Add(-1 * time.Second)). Backup, expectDeletion: true, }, { name: "expired backup with a pending deletion request is not deleted", - backup: arktest.NewTestBackup().WithName("backup-1"). + backup: velerotest.NewTestBackup().WithName("backup-1"). WithExpiration(fakeClock.Now().Add(-1 * time.Second)). Backup, deleteBackupRequests: []*api.DeleteBackupRequest{ @@ -199,7 +199,7 @@ func TestGCControllerProcessQueueItem(t *testing.T) { }, { name: "expired backup with only processed deletion requests is deleted", - backup: arktest.NewTestBackup().WithName("backup-1"). + backup: velerotest.NewTestBackup().WithName("backup-1"). WithExpiration(fakeClock.Now().Add(-1 * time.Second)). Backup, deleteBackupRequests: []*api.DeleteBackupRequest{ @@ -221,7 +221,7 @@ func TestGCControllerProcessQueueItem(t *testing.T) { }, { name: "create DeleteBackupRequest error returns an error", - backup: arktest.NewTestBackup().WithName("backup-1"). + backup: velerotest.NewTestBackup().WithName("backup-1"). WithExpiration(fakeClock.Now().Add(-1 * time.Second)). Backup, expectDeletion: true, @@ -238,21 +238,21 @@ func TestGCControllerProcessQueueItem(t *testing.T) { ) controller := NewGCController( - arktest.NewLogger(), - sharedInformers.Ark().V1().Backups(), - sharedInformers.Ark().V1().DeleteBackupRequests(), - client.ArkV1(), + velerotest.NewLogger(), + sharedInformers.Velero().V1().Backups(), + sharedInformers.Velero().V1().DeleteBackupRequests(), + client.VeleroV1(), ).(*gcController) controller.clock = fakeClock var key string if test.backup != nil { key = kube.NamespaceAndName(test.backup) - sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(test.backup) + sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(test.backup) } for _, dbr := range test.deleteBackupRequests { - sharedInformers.Ark().V1().DeleteBackupRequests().Informer().GetStore().Add(dbr) + sharedInformers.Velero().V1().DeleteBackupRequests().Informer().GetStore().Add(dbr) } if test.createDeleteBackupRequestError { diff --git a/pkg/controller/pod_volume_backup_controller.go b/pkg/controller/pod_volume_backup_controller.go index 8ef684549c..78d8e0cff1 100644 --- a/pkg/controller/pod_volume_backup_controller.go +++ b/pkg/controller/pod_volume_backup_controller.go @@ -32,20 +32,20 @@ import ( corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" - listers "github.com/heptio/ark/pkg/generated/listers/ark/v1" - "github.com/heptio/ark/pkg/restic" - arkexec "github.com/heptio/ark/pkg/util/exec" - "github.com/heptio/ark/pkg/util/filesystem" - "github.com/heptio/ark/pkg/util/kube" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions/velero/v1" + listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" + "github.com/heptio/velero/pkg/restic" + veleroexec "github.com/heptio/velero/pkg/util/exec" + "github.com/heptio/velero/pkg/util/filesystem" + "github.com/heptio/velero/pkg/util/kube" ) type podVolumeBackupController struct { *genericController - podVolumeBackupClient arkv1client.PodVolumeBackupsGetter + podVolumeBackupClient velerov1client.PodVolumeBackupsGetter podVolumeBackupLister listers.PodVolumeBackupLister secretLister corev1listers.SecretLister podLister corev1listers.PodLister @@ -53,7 +53,7 @@ type podVolumeBackupController struct { backupLocationLister listers.BackupStorageLocationLister nodeName string - processBackupFunc func(*arkv1api.PodVolumeBackup) error + processBackupFunc func(*velerov1api.PodVolumeBackup) error fileSystem filesystem.Interface } @@ -61,7 +61,7 @@ type podVolumeBackupController struct { func NewPodVolumeBackupController( logger logrus.FieldLogger, podVolumeBackupInformer informers.PodVolumeBackupInformer, - podVolumeBackupClient arkv1client.PodVolumeBackupsGetter, + podVolumeBackupClient velerov1client.PodVolumeBackupsGetter, podInformer cache.SharedIndexInformer, secretInformer cache.SharedIndexInformer, pvcInformer corev1informers.PersistentVolumeClaimInformer, @@ -103,7 +103,7 @@ func NewPodVolumeBackupController( } func (c *podVolumeBackupController) pvbHandler(obj interface{}) { - req := obj.(*arkv1api.PodVolumeBackup) + req := obj.(*velerov1api.PodVolumeBackup) // only enqueue items for this node if req.Spec.Node != c.nodeName { @@ -112,7 +112,7 @@ func (c *podVolumeBackupController) pvbHandler(obj interface{}) { log := loggerForPodVolumeBackup(c.logger, req) - if req.Status.Phase != "" && req.Status.Phase != arkv1api.PodVolumeBackupPhaseNew { + if req.Status.Phase != "" && req.Status.Phase != velerov1api.PodVolumeBackupPhaseNew { log.Debug("Backup is not new, not enqueuing") return } @@ -142,7 +142,7 @@ func (c *podVolumeBackupController) processQueueItem(key string) error { // only process new items switch req.Status.Phase { - case "", arkv1api.PodVolumeBackupPhaseNew: + case "", velerov1api.PodVolumeBackupPhaseNew: default: return nil } @@ -152,7 +152,7 @@ func (c *podVolumeBackupController) processQueueItem(key string) error { return c.processBackupFunc(reqCopy) } -func loggerForPodVolumeBackup(baseLogger logrus.FieldLogger, req *arkv1api.PodVolumeBackup) logrus.FieldLogger { +func loggerForPodVolumeBackup(baseLogger logrus.FieldLogger, req *velerov1api.PodVolumeBackup) logrus.FieldLogger { log := baseLogger.WithFields(logrus.Fields{ "namespace": req.Namespace, "name": req.Name, @@ -165,7 +165,7 @@ func loggerForPodVolumeBackup(baseLogger logrus.FieldLogger, req *arkv1api.PodVo return log } -func (c *podVolumeBackupController) processBackup(req *arkv1api.PodVolumeBackup) error { +func (c *podVolumeBackupController) processBackup(req *velerov1api.PodVolumeBackup) error { log := loggerForPodVolumeBackup(c.logger, req) log.Info("Backup starting") @@ -173,7 +173,7 @@ func (c *podVolumeBackupController) processBackup(req *arkv1api.PodVolumeBackup) var err error // update status to InProgress - req, err = c.patchPodVolumeBackup(req, updatePhaseFunc(arkv1api.PodVolumeBackupPhaseInProgress)) + req, err = c.patchPodVolumeBackup(req, updatePhaseFunc(velerov1api.PodVolumeBackupPhaseInProgress)) if err != nil { log.WithError(err).Error("Error setting phase to InProgress") return errors.WithStack(err) @@ -228,7 +228,7 @@ func (c *podVolumeBackupController) processBackup(req *arkv1api.PodVolumeBackup) var stdout, stderr string - if stdout, stderr, err = arkexec.RunCommand(resticCmd.Cmd()); err != nil { + if stdout, stderr, err = veleroexec.RunCommand(resticCmd.Cmd()); err != nil { log.WithError(errors.WithStack(err)).Errorf("Error running command=%s, stdout=%s, stderr=%s", resticCmd.String(), stdout, stderr) return c.fail(req, fmt.Sprintf("error running restic backup, stderr=%s: %s", stderr, err.Error()), log) } @@ -241,10 +241,10 @@ func (c *podVolumeBackupController) processBackup(req *arkv1api.PodVolumeBackup) } // update status to Completed with path & snapshot id - req, err = c.patchPodVolumeBackup(req, func(r *arkv1api.PodVolumeBackup) { + req, err = c.patchPodVolumeBackup(req, func(r *velerov1api.PodVolumeBackup) { r.Status.Path = path r.Status.SnapshotID = snapshotID - r.Status.Phase = arkv1api.PodVolumeBackupPhaseCompleted + r.Status.Phase = velerov1api.PodVolumeBackupPhaseCompleted }) if err != nil { log.WithError(err).Error("Error setting phase to Completed") @@ -256,7 +256,7 @@ func (c *podVolumeBackupController) processBackup(req *arkv1api.PodVolumeBackup) return nil } -func (c *podVolumeBackupController) patchPodVolumeBackup(req *arkv1api.PodVolumeBackup, mutate func(*arkv1api.PodVolumeBackup)) (*arkv1api.PodVolumeBackup, error) { +func (c *podVolumeBackupController) patchPodVolumeBackup(req *velerov1api.PodVolumeBackup, mutate func(*velerov1api.PodVolumeBackup)) (*velerov1api.PodVolumeBackup, error) { // Record original json oldData, err := json.Marshal(req) if err != nil { @@ -285,9 +285,9 @@ func (c *podVolumeBackupController) patchPodVolumeBackup(req *arkv1api.PodVolume return req, nil } -func (c *podVolumeBackupController) fail(req *arkv1api.PodVolumeBackup, msg string, log logrus.FieldLogger) error { - if _, err := c.patchPodVolumeBackup(req, func(r *arkv1api.PodVolumeBackup) { - r.Status.Phase = arkv1api.PodVolumeBackupPhaseFailed +func (c *podVolumeBackupController) fail(req *velerov1api.PodVolumeBackup, msg string, log logrus.FieldLogger) error { + if _, err := c.patchPodVolumeBackup(req, func(r *velerov1api.PodVolumeBackup) { + r.Status.Phase = velerov1api.PodVolumeBackupPhaseFailed r.Status.Message = msg }); err != nil { log.WithError(err).Error("Error setting phase to Failed") @@ -296,8 +296,8 @@ func (c *podVolumeBackupController) fail(req *arkv1api.PodVolumeBackup, msg stri return nil } -func updatePhaseFunc(phase arkv1api.PodVolumeBackupPhase) func(r *arkv1api.PodVolumeBackup) { - return func(r *arkv1api.PodVolumeBackup) { +func updatePhaseFunc(phase velerov1api.PodVolumeBackupPhase) func(r *velerov1api.PodVolumeBackup) { + return func(r *velerov1api.PodVolumeBackup) { r.Status.Phase = phase } } diff --git a/pkg/controller/pod_volume_backup_controller_test.go b/pkg/controller/pod_volume_backup_controller_test.go index a2cf392ba3..5020eee67e 100644 --- a/pkg/controller/pod_volume_backup_controller_test.go +++ b/pkg/controller/pod_volume_backup_controller_test.go @@ -22,8 +22,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - arktest "github.com/heptio/ark/pkg/util/test" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + velerotest "github.com/heptio/velero/pkg/util/test" ) func TestPVBHandler(t *testing.T) { @@ -31,13 +31,13 @@ func TestPVBHandler(t *testing.T) { tests := []struct { name string - obj *arkv1api.PodVolumeBackup + obj *velerov1api.PodVolumeBackup shouldEnqueue bool }{ { name: "Empty phase pvb on same node should be enqueued", - obj: &arkv1api.PodVolumeBackup{ - Spec: arkv1api.PodVolumeBackupSpec{ + obj: &velerov1api.PodVolumeBackup{ + Spec: velerov1api.PodVolumeBackupSpec{ Node: controllerNode, }, }, @@ -45,48 +45,48 @@ func TestPVBHandler(t *testing.T) { }, { name: "New phase pvb on same node should be enqueued", - obj: &arkv1api.PodVolumeBackup{ - Spec: arkv1api.PodVolumeBackupSpec{ + obj: &velerov1api.PodVolumeBackup{ + Spec: velerov1api.PodVolumeBackupSpec{ Node: controllerNode, }, - Status: arkv1api.PodVolumeBackupStatus{ - Phase: arkv1api.PodVolumeBackupPhaseNew, + Status: velerov1api.PodVolumeBackupStatus{ + Phase: velerov1api.PodVolumeBackupPhaseNew, }, }, shouldEnqueue: true, }, { name: "InProgress phase pvb on same node should not be enqueued", - obj: &arkv1api.PodVolumeBackup{ - Spec: arkv1api.PodVolumeBackupSpec{ + obj: &velerov1api.PodVolumeBackup{ + Spec: velerov1api.PodVolumeBackupSpec{ Node: controllerNode, }, - Status: arkv1api.PodVolumeBackupStatus{ - Phase: arkv1api.PodVolumeBackupPhaseInProgress, + Status: velerov1api.PodVolumeBackupStatus{ + Phase: velerov1api.PodVolumeBackupPhaseInProgress, }, }, shouldEnqueue: false, }, { name: "Completed phase pvb on same node should not be enqueued", - obj: &arkv1api.PodVolumeBackup{ - Spec: arkv1api.PodVolumeBackupSpec{ + obj: &velerov1api.PodVolumeBackup{ + Spec: velerov1api.PodVolumeBackupSpec{ Node: controllerNode, }, - Status: arkv1api.PodVolumeBackupStatus{ - Phase: arkv1api.PodVolumeBackupPhaseCompleted, + Status: velerov1api.PodVolumeBackupStatus{ + Phase: velerov1api.PodVolumeBackupPhaseCompleted, }, }, shouldEnqueue: false, }, { name: "Failed phase pvb on same node should not be enqueued", - obj: &arkv1api.PodVolumeBackup{ - Spec: arkv1api.PodVolumeBackupSpec{ + obj: &velerov1api.PodVolumeBackup{ + Spec: velerov1api.PodVolumeBackupSpec{ Node: controllerNode, }, - Status: arkv1api.PodVolumeBackupStatus{ - Phase: arkv1api.PodVolumeBackupPhaseFailed, + Status: velerov1api.PodVolumeBackupStatus{ + Phase: velerov1api.PodVolumeBackupPhaseFailed, }, }, shouldEnqueue: false, @@ -94,8 +94,8 @@ func TestPVBHandler(t *testing.T) { { name: "Empty phase pvb on different node should not be enqueued", - obj: &arkv1api.PodVolumeBackup{ - Spec: arkv1api.PodVolumeBackupSpec{ + obj: &velerov1api.PodVolumeBackup{ + Spec: velerov1api.PodVolumeBackupSpec{ Node: "some-other-node", }, }, @@ -103,48 +103,48 @@ func TestPVBHandler(t *testing.T) { }, { name: "New phase pvb on different node should not be enqueued", - obj: &arkv1api.PodVolumeBackup{ - Spec: arkv1api.PodVolumeBackupSpec{ + obj: &velerov1api.PodVolumeBackup{ + Spec: velerov1api.PodVolumeBackupSpec{ Node: "some-other-node", }, - Status: arkv1api.PodVolumeBackupStatus{ - Phase: arkv1api.PodVolumeBackupPhaseNew, + Status: velerov1api.PodVolumeBackupStatus{ + Phase: velerov1api.PodVolumeBackupPhaseNew, }, }, shouldEnqueue: false, }, { name: "InProgress phase pvb on different node should not be enqueued", - obj: &arkv1api.PodVolumeBackup{ - Spec: arkv1api.PodVolumeBackupSpec{ + obj: &velerov1api.PodVolumeBackup{ + Spec: velerov1api.PodVolumeBackupSpec{ Node: "some-other-node", }, - Status: arkv1api.PodVolumeBackupStatus{ - Phase: arkv1api.PodVolumeBackupPhaseInProgress, + Status: velerov1api.PodVolumeBackupStatus{ + Phase: velerov1api.PodVolumeBackupPhaseInProgress, }, }, shouldEnqueue: false, }, { name: "Completed phase pvb on different node should not be enqueued", - obj: &arkv1api.PodVolumeBackup{ - Spec: arkv1api.PodVolumeBackupSpec{ + obj: &velerov1api.PodVolumeBackup{ + Spec: velerov1api.PodVolumeBackupSpec{ Node: "some-other-node", }, - Status: arkv1api.PodVolumeBackupStatus{ - Phase: arkv1api.PodVolumeBackupPhaseCompleted, + Status: velerov1api.PodVolumeBackupStatus{ + Phase: velerov1api.PodVolumeBackupPhaseCompleted, }, }, shouldEnqueue: false, }, { name: "Failed phase pvb on different node should not be enqueued", - obj: &arkv1api.PodVolumeBackup{ - Spec: arkv1api.PodVolumeBackupSpec{ + obj: &velerov1api.PodVolumeBackup{ + Spec: velerov1api.PodVolumeBackupSpec{ Node: "some-other-node", }, - Status: arkv1api.PodVolumeBackupStatus{ - Phase: arkv1api.PodVolumeBackupPhaseFailed, + Status: velerov1api.PodVolumeBackupStatus{ + Phase: velerov1api.PodVolumeBackupPhaseFailed, }, }, shouldEnqueue: false, @@ -154,7 +154,7 @@ func TestPVBHandler(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { c := &podVolumeBackupController{ - genericController: newGenericController("pod-volume-backup", arktest.NewLogger()), + genericController: newGenericController("pod-volume-backup", velerotest.NewLogger()), nodeName: controllerNode, } diff --git a/pkg/controller/pod_volume_restore_controller.go b/pkg/controller/pod_volume_restore_controller.go index 046ac31c84..47b88802a4 100644 --- a/pkg/controller/pod_volume_restore_controller.go +++ b/pkg/controller/pod_volume_restore_controller.go @@ -35,21 +35,21 @@ import ( corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" - listers "github.com/heptio/ark/pkg/generated/listers/ark/v1" - "github.com/heptio/ark/pkg/restic" - "github.com/heptio/ark/pkg/util/boolptr" - arkexec "github.com/heptio/ark/pkg/util/exec" - "github.com/heptio/ark/pkg/util/filesystem" - "github.com/heptio/ark/pkg/util/kube" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions/velero/v1" + listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" + "github.com/heptio/velero/pkg/restic" + "github.com/heptio/velero/pkg/util/boolptr" + veleroexec "github.com/heptio/velero/pkg/util/exec" + "github.com/heptio/velero/pkg/util/filesystem" + "github.com/heptio/velero/pkg/util/kube" ) type podVolumeRestoreController struct { *genericController - podVolumeRestoreClient arkv1client.PodVolumeRestoresGetter + podVolumeRestoreClient velerov1client.PodVolumeRestoresGetter podVolumeRestoreLister listers.PodVolumeRestoreLister podLister corev1listers.PodLister secretLister corev1listers.SecretLister @@ -57,7 +57,7 @@ type podVolumeRestoreController struct { backupLocationLister listers.BackupStorageLocationLister nodeName string - processRestoreFunc func(*arkv1api.PodVolumeRestore) error + processRestoreFunc func(*velerov1api.PodVolumeRestore) error fileSystem filesystem.Interface } @@ -65,7 +65,7 @@ type podVolumeRestoreController struct { func NewPodVolumeRestoreController( logger logrus.FieldLogger, podVolumeRestoreInformer informers.PodVolumeRestoreInformer, - podVolumeRestoreClient arkv1client.PodVolumeRestoresGetter, + podVolumeRestoreClient velerov1client.PodVolumeRestoresGetter, podInformer cache.SharedIndexInformer, secretInformer cache.SharedIndexInformer, pvcInformer corev1informers.PersistentVolumeClaimInformer, @@ -118,7 +118,7 @@ func NewPodVolumeRestoreController( } func (c *podVolumeRestoreController) pvrHandler(obj interface{}) { - pvr := obj.(*arkv1api.PodVolumeRestore) + pvr := obj.(*velerov1api.PodVolumeRestore) log := loggerForPodVolumeRestore(c.logger, pvr) if !isPVRNew(pvr) { @@ -166,7 +166,7 @@ func (c *podVolumeRestoreController) podHandler(obj interface{}) { } selector := labels.Set(map[string]string{ - arkv1api.PodUIDLabel: string(pod.UID), + velerov1api.PodUIDLabel: string(pod.UID), }).AsSelector() pvrs, err := c.podVolumeRestoreLister.List(selector) @@ -190,8 +190,8 @@ func (c *podVolumeRestoreController) podHandler(obj interface{}) { } } -func isPVRNew(pvr *arkv1api.PodVolumeRestore) bool { - return pvr.Status.Phase == "" || pvr.Status.Phase == arkv1api.PodVolumeRestorePhaseNew +func isPVRNew(pvr *velerov1api.PodVolumeRestore) bool { + return pvr.Status.Phase == "" || pvr.Status.Phase == velerov1api.PodVolumeRestorePhaseNew } func isPodOnNode(pod *corev1api.Pod, node string) bool { @@ -199,7 +199,7 @@ func isPodOnNode(pod *corev1api.Pod, node string) bool { } func isResticInitContainerRunning(pod *corev1api.Pod) bool { - // no init containers, or the first one is not the ark restic one: return false + // no init containers, or the first one is not the velero restic one: return false if len(pod.Spec.InitContainers) == 0 || pod.Spec.InitContainers[0].Name != restic.InitContainer { return false } @@ -237,7 +237,7 @@ func (c *podVolumeRestoreController) processQueueItem(key string) error { return c.processRestoreFunc(reqCopy) } -func loggerForPodVolumeRestore(baseLogger logrus.FieldLogger, req *arkv1api.PodVolumeRestore) logrus.FieldLogger { +func loggerForPodVolumeRestore(baseLogger logrus.FieldLogger, req *velerov1api.PodVolumeRestore) logrus.FieldLogger { log := baseLogger.WithFields(logrus.Fields{ "namespace": req.Namespace, "name": req.Name, @@ -250,7 +250,7 @@ func loggerForPodVolumeRestore(baseLogger logrus.FieldLogger, req *arkv1api.PodV return log } -func (c *podVolumeRestoreController) processRestore(req *arkv1api.PodVolumeRestore) error { +func (c *podVolumeRestoreController) processRestore(req *velerov1api.PodVolumeRestore) error { log := loggerForPodVolumeRestore(c.logger, req) log.Info("Restore starting") @@ -258,7 +258,7 @@ func (c *podVolumeRestoreController) processRestore(req *arkv1api.PodVolumeResto var err error // update status to InProgress - req, err = c.patchPodVolumeRestore(req, updatePodVolumeRestorePhaseFunc(arkv1api.PodVolumeRestorePhaseInProgress)) + req, err = c.patchPodVolumeRestore(req, updatePodVolumeRestorePhaseFunc(velerov1api.PodVolumeRestorePhaseInProgress)) if err != nil { log.WithError(err).Error("Error setting phase to InProgress") return errors.WithStack(err) @@ -291,7 +291,7 @@ func (c *podVolumeRestoreController) processRestore(req *arkv1api.PodVolumeResto } // update status to Completed - if _, err = c.patchPodVolumeRestore(req, updatePodVolumeRestorePhaseFunc(arkv1api.PodVolumeRestorePhaseCompleted)); err != nil { + if _, err = c.patchPodVolumeRestore(req, updatePodVolumeRestorePhaseFunc(velerov1api.PodVolumeRestorePhaseCompleted)); err != nil { log.WithError(err).Error("Error setting phase to Completed") return err } @@ -301,7 +301,7 @@ func (c *podVolumeRestoreController) processRestore(req *arkv1api.PodVolumeResto return nil } -func (c *podVolumeRestoreController) restorePodVolume(req *arkv1api.PodVolumeRestore, credsFile, volumeDir string, log logrus.FieldLogger) error { +func (c *podVolumeRestoreController) restorePodVolume(req *velerov1api.PodVolumeRestore, credsFile, volumeDir string, log logrus.FieldLogger) error { // Get the full path of the new volume's directory as mounted in the daemonset pod, which // will look like: /host_pods//volumes// volumePath, err := singlePathMatch(fmt.Sprintf("/host_pods/%s/volumes/*/%s", string(req.Spec.Pod.UID), volumeDir)) @@ -327,17 +327,17 @@ func (c *podVolumeRestoreController) restorePodVolume(req *arkv1api.PodVolumeRes var stdout, stderr string - if stdout, stderr, err = arkexec.RunCommand(resticCmd.Cmd()); err != nil { + if stdout, stderr, err = veleroexec.RunCommand(resticCmd.Cmd()); err != nil { return errors.Wrapf(err, "error running restic restore, cmd=%s, stdout=%s, stderr=%s", resticCmd.String(), stdout, stderr) } log.Debugf("Ran command=%s, stdout=%s, stderr=%s", resticCmd.String(), stdout, stderr) - // Remove the .ark directory from the restored volume (it may contain done files from previous restores + // Remove the .velero directory from the restored volume (it may contain done files from previous restores // of this volume, which we don't want to carry over). If this fails for any reason, log and continue, since // this is non-essential cleanup (the done files are named based on restore UID and the init container looks // for the one specific to the restore being executed). - if err := os.RemoveAll(filepath.Join(volumePath, ".ark")); err != nil { - log.WithError(err).Warnf("error removing .ark directory from directory %s", volumePath) + if err := os.RemoveAll(filepath.Join(volumePath, ".velero")); err != nil { + log.WithError(err).Warnf("error removing .velero directory from directory %s", volumePath) } var restoreUID types.UID @@ -348,23 +348,23 @@ func (c *podVolumeRestoreController) restorePodVolume(req *arkv1api.PodVolumeRes } } - // Create the .ark directory within the volume dir so we can write a done file + // Create the .velero directory within the volume dir so we can write a done file // for this restore. - if err := os.MkdirAll(filepath.Join(volumePath, ".ark"), 0755); err != nil { - return errors.Wrap(err, "error creating .ark directory for done file") + if err := os.MkdirAll(filepath.Join(volumePath, ".velero"), 0755); err != nil { + return errors.Wrap(err, "error creating .velero directory for done file") } - // Write a done file with name= into the just-created .ark dir - // within the volume. The ark restic init container on the pod is waiting + // Write a done file with name= into the just-created .velero dir + // within the volume. The velero restic init container on the pod is waiting // for this file to exist in each restored volume before completing. - if err := ioutil.WriteFile(filepath.Join(volumePath, ".ark", string(restoreUID)), nil, 0644); err != nil { + if err := ioutil.WriteFile(filepath.Join(volumePath, ".velero", string(restoreUID)), nil, 0644); err != nil { return errors.Wrap(err, "error writing done file") } return nil } -func (c *podVolumeRestoreController) patchPodVolumeRestore(req *arkv1api.PodVolumeRestore, mutate func(*arkv1api.PodVolumeRestore)) (*arkv1api.PodVolumeRestore, error) { +func (c *podVolumeRestoreController) patchPodVolumeRestore(req *velerov1api.PodVolumeRestore, mutate func(*velerov1api.PodVolumeRestore)) (*velerov1api.PodVolumeRestore, error) { // Record original json oldData, err := json.Marshal(req) if err != nil { @@ -393,9 +393,9 @@ func (c *podVolumeRestoreController) patchPodVolumeRestore(req *arkv1api.PodVolu return req, nil } -func (c *podVolumeRestoreController) failRestore(req *arkv1api.PodVolumeRestore, msg string, log logrus.FieldLogger) error { - if _, err := c.patchPodVolumeRestore(req, func(pvr *arkv1api.PodVolumeRestore) { - pvr.Status.Phase = arkv1api.PodVolumeRestorePhaseFailed +func (c *podVolumeRestoreController) failRestore(req *velerov1api.PodVolumeRestore, msg string, log logrus.FieldLogger) error { + if _, err := c.patchPodVolumeRestore(req, func(pvr *velerov1api.PodVolumeRestore) { + pvr.Status.Phase = velerov1api.PodVolumeRestorePhaseFailed pvr.Status.Message = msg }); err != nil { log.WithError(err).Error("Error setting phase to Failed") @@ -404,8 +404,8 @@ func (c *podVolumeRestoreController) failRestore(req *arkv1api.PodVolumeRestore, return nil } -func updatePodVolumeRestorePhaseFunc(phase arkv1api.PodVolumeRestorePhase) func(r *arkv1api.PodVolumeRestore) { - return func(r *arkv1api.PodVolumeRestore) { +func updatePodVolumeRestorePhaseFunc(phase velerov1api.PodVolumeRestorePhase) func(r *velerov1api.PodVolumeRestore) { + return func(r *velerov1api.PodVolumeRestore) { r.Status.Phase = phase } } diff --git a/pkg/controller/pod_volume_restore_controller_test.go b/pkg/controller/pod_volume_restore_controller_test.go index 9a3ee3a19d..c129bd0487 100644 --- a/pkg/controller/pod_volume_restore_controller_test.go +++ b/pkg/controller/pod_volume_restore_controller_test.go @@ -29,12 +29,12 @@ import ( corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - arkfake "github.com/heptio/ark/pkg/generated/clientset/versioned/fake" - arkinformers "github.com/heptio/ark/pkg/generated/informers/externalversions" - arkv1listers "github.com/heptio/ark/pkg/generated/listers/ark/v1" - "github.com/heptio/ark/pkg/restic" - arktest "github.com/heptio/ark/pkg/util/test" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + velerofake "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" + veleroinformers "github.com/heptio/velero/pkg/generated/informers/externalversions" + velerov1listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" + "github.com/heptio/velero/pkg/restic" + velerotest "github.com/heptio/velero/pkg/util/test" ) func TestPVRHandler(t *testing.T) { @@ -42,47 +42,47 @@ func TestPVRHandler(t *testing.T) { tests := []struct { name string - obj *arkv1api.PodVolumeRestore + obj *velerov1api.PodVolumeRestore pod *corev1api.Pod shouldEnqueue bool }{ { name: "InProgress phase pvr should not be enqueued", - obj: &arkv1api.PodVolumeRestore{ - Status: arkv1api.PodVolumeRestoreStatus{ - Phase: arkv1api.PodVolumeRestorePhaseInProgress, + obj: &velerov1api.PodVolumeRestore{ + Status: velerov1api.PodVolumeRestoreStatus{ + Phase: velerov1api.PodVolumeRestorePhaseInProgress, }, }, shouldEnqueue: false, }, { name: "Completed phase pvr should not be enqueued", - obj: &arkv1api.PodVolumeRestore{ - Status: arkv1api.PodVolumeRestoreStatus{ - Phase: arkv1api.PodVolumeRestorePhaseCompleted, + obj: &velerov1api.PodVolumeRestore{ + Status: velerov1api.PodVolumeRestoreStatus{ + Phase: velerov1api.PodVolumeRestorePhaseCompleted, }, }, shouldEnqueue: false, }, { name: "Failed phase pvr should not be enqueued", - obj: &arkv1api.PodVolumeRestore{ - Status: arkv1api.PodVolumeRestoreStatus{ - Phase: arkv1api.PodVolumeRestorePhaseFailed, + obj: &velerov1api.PodVolumeRestore{ + Status: velerov1api.PodVolumeRestoreStatus{ + Phase: velerov1api.PodVolumeRestorePhaseFailed, }, }, shouldEnqueue: false, }, { name: "Unable to get pvr's pod should not be enqueued", - obj: &arkv1api.PodVolumeRestore{ - Spec: arkv1api.PodVolumeRestoreSpec{ + obj: &velerov1api.PodVolumeRestore{ + Spec: velerov1api.PodVolumeRestoreSpec{ Pod: corev1api.ObjectReference{ Namespace: "ns-1", Name: "pod-1", }, }, - Status: arkv1api.PodVolumeRestoreStatus{ + Status: velerov1api.PodVolumeRestoreStatus{ Phase: "", }, }, @@ -90,14 +90,14 @@ func TestPVRHandler(t *testing.T) { }, { name: "Empty phase pvr with pod not on node running init container should not be enqueued", - obj: &arkv1api.PodVolumeRestore{ - Spec: arkv1api.PodVolumeRestoreSpec{ + obj: &velerov1api.PodVolumeRestore{ + Spec: velerov1api.PodVolumeRestoreSpec{ Pod: corev1api.ObjectReference{ Namespace: "ns-1", Name: "pod-1", }, }, - Status: arkv1api.PodVolumeRestoreStatus{ + Status: velerov1api.PodVolumeRestoreStatus{ Phase: "", }, }, @@ -130,14 +130,14 @@ func TestPVRHandler(t *testing.T) { }, { name: "Empty phase pvr with pod on node not running init container should not be enqueued", - obj: &arkv1api.PodVolumeRestore{ - Spec: arkv1api.PodVolumeRestoreSpec{ + obj: &velerov1api.PodVolumeRestore{ + Spec: velerov1api.PodVolumeRestoreSpec{ Pod: corev1api.ObjectReference{ Namespace: "ns-1", Name: "pod-1", }, }, - Status: arkv1api.PodVolumeRestoreStatus{ + Status: velerov1api.PodVolumeRestoreStatus{ Phase: "", }, }, @@ -166,14 +166,14 @@ func TestPVRHandler(t *testing.T) { }, { name: "Empty phase pvr with pod on node running init container should be enqueued", - obj: &arkv1api.PodVolumeRestore{ - Spec: arkv1api.PodVolumeRestoreSpec{ + obj: &velerov1api.PodVolumeRestore{ + Spec: velerov1api.PodVolumeRestoreSpec{ Pod: corev1api.ObjectReference{ Namespace: "ns-1", Name: "pod-1", }, }, - Status: arkv1api.PodVolumeRestoreStatus{ + Status: velerov1api.PodVolumeRestoreStatus{ Phase: "", }, }, @@ -211,7 +211,7 @@ func TestPVRHandler(t *testing.T) { var ( podInformer = cache.NewSharedIndexInformer(nil, new(corev1api.Pod), 0, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) c = &podVolumeRestoreController{ - genericController: newGenericController("pod-volume-restore", arktest.NewLogger()), + genericController: newGenericController("pod-volume-restore", velerotest.NewLogger()), podLister: corev1listers.NewPodLister(podInformer.GetIndexer()), nodeName: controllerNode, } @@ -239,7 +239,7 @@ func TestPodHandler(t *testing.T) { tests := []struct { name string pod *corev1api.Pod - podVolumeRestores []*arkv1api.PodVolumeRestore + podVolumeRestores []*velerov1api.PodVolumeRestore expectedEnqueues sets.String }{ { @@ -268,13 +268,13 @@ func TestPodHandler(t *testing.T) { }, }, }, - podVolumeRestores: []*arkv1api.PodVolumeRestore{ + podVolumeRestores: []*velerov1api.PodVolumeRestore{ { ObjectMeta: metav1.ObjectMeta{ Namespace: "ns-1", Name: "pvr-1", Labels: map[string]string{ - arkv1api.PodUIDLabel: "uid", + velerov1api.PodUIDLabel: "uid", }, }, }, @@ -283,7 +283,7 @@ func TestPodHandler(t *testing.T) { Namespace: "ns-1", Name: "pvr-2", Labels: map[string]string{ - arkv1api.PodUIDLabel: "uid", + velerov1api.PodUIDLabel: "uid", }, }, }, @@ -292,11 +292,11 @@ func TestPodHandler(t *testing.T) { Namespace: "ns-1", Name: "pvr-3", Labels: map[string]string{ - arkv1api.PodUIDLabel: "uid", + velerov1api.PodUIDLabel: "uid", }, }, - Status: arkv1api.PodVolumeRestoreStatus{ - Phase: arkv1api.PodVolumeRestorePhaseInProgress, + Status: velerov1api.PodVolumeRestoreStatus{ + Phase: velerov1api.PodVolumeRestorePhaseInProgress, }, }, { @@ -304,7 +304,7 @@ func TestPodHandler(t *testing.T) { Namespace: "ns-1", Name: "pvr-4", Labels: map[string]string{ - arkv1api.PodUIDLabel: "some-other-pod", + velerov1api.PodUIDLabel: "some-other-pod", }, }, }, @@ -335,13 +335,13 @@ func TestPodHandler(t *testing.T) { }, }, }, - podVolumeRestores: []*arkv1api.PodVolumeRestore{ + podVolumeRestores: []*velerov1api.PodVolumeRestore{ { ObjectMeta: metav1.ObjectMeta{ Namespace: "ns-1", Name: "pvr-1", Labels: map[string]string{ - arkv1api.PodUIDLabel: "uid", + velerov1api.PodUIDLabel: "uid", }, }, }, @@ -373,13 +373,13 @@ func TestPodHandler(t *testing.T) { }, }, }, - podVolumeRestores: []*arkv1api.PodVolumeRestore{ + podVolumeRestores: []*velerov1api.PodVolumeRestore{ { ObjectMeta: metav1.ObjectMeta{ Namespace: "ns-1", Name: "pvr-1", Labels: map[string]string{ - arkv1api.PodUIDLabel: "uid", + velerov1api.PodUIDLabel: "uid", }, }, }, @@ -390,12 +390,12 @@ func TestPodHandler(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { var ( - client = arkfake.NewSimpleClientset() - informers = arkinformers.NewSharedInformerFactory(client, 0) - pvrInformer = informers.Ark().V1().PodVolumeRestores() + client = velerofake.NewSimpleClientset() + informers = veleroinformers.NewSharedInformerFactory(client, 0) + pvrInformer = informers.Velero().V1().PodVolumeRestores() c = &podVolumeRestoreController{ - genericController: newGenericController("pod-volume-restore", arktest.NewLogger()), - podVolumeRestoreLister: arkv1listers.NewPodVolumeRestoreLister(pvrInformer.Informer().GetIndexer()), + genericController: newGenericController("pod-volume-restore", velerotest.NewLogger()), + podVolumeRestoreLister: velerov1listers.NewPodVolumeRestoreLister(pvrInformer.Informer().GetIndexer()), nodeName: controllerNode, } ) @@ -421,14 +421,14 @@ func TestPodHandler(t *testing.T) { } func TestIsPVRNew(t *testing.T) { - pvr := &arkv1api.PodVolumeRestore{} + pvr := &velerov1api.PodVolumeRestore{} - expectationByStatus := map[arkv1api.PodVolumeRestorePhase]bool{ - "": true, - arkv1api.PodVolumeRestorePhaseNew: true, - arkv1api.PodVolumeRestorePhaseInProgress: false, - arkv1api.PodVolumeRestorePhaseCompleted: false, - arkv1api.PodVolumeRestorePhaseFailed: false, + expectationByStatus := map[velerov1api.PodVolumeRestorePhase]bool{ + "": true, + velerov1api.PodVolumeRestorePhaseNew: true, + velerov1api.PodVolumeRestorePhaseInProgress: false, + velerov1api.PodVolumeRestorePhaseCompleted: false, + velerov1api.PodVolumeRestorePhaseFailed: false, } for phase, expected := range expectationByStatus { diff --git a/pkg/controller/restic_repository_controller.go b/pkg/controller/restic_repository_controller.go index 9f550c8fa8..7b721a1520 100644 --- a/pkg/controller/restic_repository_controller.go +++ b/pkg/controller/restic_repository_controller.go @@ -30,17 +30,17 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "k8s.io/client-go/tools/cache" - "github.com/heptio/ark/pkg/apis/ark/v1" - arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" - listers "github.com/heptio/ark/pkg/generated/listers/ark/v1" - "github.com/heptio/ark/pkg/restic" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions/velero/v1" + listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" + "github.com/heptio/velero/pkg/restic" ) type resticRepositoryController struct { *genericController - resticRepositoryClient arkv1client.ResticRepositoriesGetter + resticRepositoryClient velerov1client.ResticRepositoriesGetter resticRepositoryLister listers.ResticRepositoryLister backupLocationLister listers.BackupStorageLocationLister repositoryManager restic.RepositoryManager @@ -52,7 +52,7 @@ type resticRepositoryController struct { func NewResticRepositoryController( logger logrus.FieldLogger, resticRepositoryInformer informers.ResticRepositoryInformer, - resticRepositoryClient arkv1client.ResticRepositoriesGetter, + resticRepositoryClient velerov1client.ResticRepositoriesGetter, backupLocationInformer informers.BackupStorageLocationInformer, repositoryManager restic.RepositoryManager, ) Interface { diff --git a/pkg/controller/restore_controller.go b/pkg/controller/restore_controller.go index b7826c432a..b860e29bad 100644 --- a/pkg/controller/restore_controller.go +++ b/pkg/controller/restore_controller.go @@ -33,17 +33,18 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/cache" - api "github.com/heptio/ark/pkg/apis/ark/v1" - arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" - listers "github.com/heptio/ark/pkg/generated/listers/ark/v1" - "github.com/heptio/ark/pkg/metrics" - "github.com/heptio/ark/pkg/persistence" - "github.com/heptio/ark/pkg/plugin" - "github.com/heptio/ark/pkg/restore" - "github.com/heptio/ark/pkg/util/collections" - kubeutil "github.com/heptio/ark/pkg/util/kube" - "github.com/heptio/ark/pkg/util/logging" + api "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions/velero/v1" + listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" + "github.com/heptio/velero/pkg/metrics" + "github.com/heptio/velero/pkg/persistence" + "github.com/heptio/velero/pkg/plugin" + "github.com/heptio/velero/pkg/restore" + "github.com/heptio/velero/pkg/util/collections" + kubeutil "github.com/heptio/velero/pkg/util/kube" + "github.com/heptio/velero/pkg/util/logging" ) // nonRestorableResources is a blacklist for the restoration process. Any resources @@ -54,20 +55,22 @@ var nonRestorableResources = []string{ "events.events.k8s.io", // Don't ever restore backups - if appropriate, they'll be synced in from object storage. - // https://github.com/heptio/ark/issues/622 + // https://github.com/heptio/velero/issues/622 "backups.ark.heptio.com", + "backups.velero.io", // Restores are cluster-specific, and don't have value moving across clusters. - // https://github.com/heptio/ark/issues/622 + // https://github.com/heptio/velero/issues/622 "restores.ark.heptio.com", + "restores.velero.io", } type restoreController struct { *genericController namespace string - restoreClient arkv1client.RestoresGetter - backupClient arkv1client.BackupsGetter + restoreClient velerov1client.RestoresGetter + backupClient velerov1client.BackupsGetter restorer restore.Restorer backupLister listers.BackupLister restoreLister listers.RestoreLister @@ -88,8 +91,8 @@ type restoreResult struct { func NewRestoreController( namespace string, restoreInformer informers.RestoreInformer, - restoreClient arkv1client.RestoresGetter, - backupClient arkv1client.BackupsGetter, + restoreClient velerov1client.RestoresGetter, + backupClient velerov1client.BackupsGetter, restorer restore.Restorer, backupInformer informers.BackupInformer, backupLocationInformer informers.BackupStorageLocationInformer, @@ -238,12 +241,14 @@ func (c *restoreController) processRestore(key string) error { pluginManager, ) - restore.Status.Warnings = len(restoreRes.warnings.Ark) + len(restoreRes.warnings.Cluster) + //TODO(1.0): Remove warnings.Ark + restore.Status.Warnings = len(restoreRes.warnings.Velero) + len(restoreRes.warnings.Cluster) + len(restoreRes.warnings.Ark) for _, w := range restoreRes.warnings.Namespaces { restore.Status.Warnings += len(w) } - restore.Status.Errors = len(restoreRes.errors.Ark) + len(restoreRes.errors.Cluster) + //TODO (1.0): Remove errors.Ark + restore.Status.Errors = len(restoreRes.errors.Velero) + len(restoreRes.errors.Cluster) + len(restoreRes.errors.Ark) for _, e := range restoreRes.errors.Namespaces { restore.Status.Errors += len(e) } @@ -310,7 +315,7 @@ func (c *restoreController) validateAndComplete(restore *api.Restore, pluginMana // the schedule if restore.Spec.ScheduleName != "" { selector := labels.SelectorFromSet(labels.Set(map[string]string{ - "ark-schedule": restore.Spec.ScheduleName, + velerov1api.ScheduleNameLabel: restore.Spec.ScheduleName, })) backups, err := c.backupLister.Backups(c.namespace).List(selector) @@ -358,7 +363,7 @@ func (c *restoreController) validateAndComplete(restore *api.Restore, pluginMana // Fill in the ScheduleName so it's easier to consume for metrics. if restore.Spec.ScheduleName == "" { - restore.Spec.ScheduleName = info.backup.GetLabels()["ark-schedule"] + restore.Spec.ScheduleName = info.backup.GetLabels()[velerov1api.ScheduleNameLabel] } return info @@ -419,33 +424,6 @@ func (c *restoreController) fetchBackupInfo(backupName string, pluginManager plu }, nil } -func (c *restoreController) backupInfoForLocation(location *api.BackupStorageLocation, backupName string, pluginManager plugin.Manager) (backupInfo, error) { - backupStore, err := persistence.NewObjectBackupStore(location, pluginManager, c.logger) - if err != nil { - return backupInfo{}, err - } - - backup, err := backupStore.GetBackupMetadata(backupName) - if err != nil { - return backupInfo{}, err - } - - // ResourceVersion needs to be cleared in order to create the object in the API - backup.ResourceVersion = "" - // Clear out the namespace, in case the backup was made in a different cluster, with a different namespace - backup.Namespace = "" - - backupCreated, err := c.backupClient.Backups(c.namespace).Create(backup) - if err != nil { - return backupInfo{}, errors.WithStack(err) - } - - return backupInfo{ - backup: backupCreated, - backupStore: backupStore, - }, nil -} - func (c *restoreController) runRestore( restore *api.Restore, actions []restore.ItemAction, @@ -465,7 +443,7 @@ func (c *restoreController) runRestore( ). WithError(errors.WithStack(err)). Error("Error creating log temp file") - restoreErrors.Ark = append(restoreErrors.Ark, err.Error()) + restoreErrors.Velero = append(restoreErrors.Velero, err.Error()) return restoreResult{warnings: restoreWarnings, errors: restoreErrors}, restoreFailure } gzippedLogFile := gzip.NewWriter(logFile) @@ -487,7 +465,7 @@ func (c *restoreController) runRestore( backupFile, err := downloadToTempFile(restore.Spec.BackupName, info.backupStore, c.logger) if err != nil { log.WithError(err).Error("Error downloading backup") - restoreErrors.Ark = append(restoreErrors.Ark, err.Error()) + restoreErrors.Velero = append(restoreErrors.Velero, err.Error()) restoreFailure = err return restoreResult{warnings: restoreWarnings, errors: restoreErrors}, restoreFailure } @@ -496,7 +474,7 @@ func (c *restoreController) runRestore( resultsFile, err := ioutil.TempFile("", "") if err != nil { log.WithError(errors.WithStack(err)).Error("Error creating results temp file") - restoreErrors.Ark = append(restoreErrors.Ark, err.Error()) + restoreErrors.Velero = append(restoreErrors.Velero, err.Error()) restoreFailure = err return restoreResult{warnings: restoreWarnings, errors: restoreErrors}, restoreFailure } @@ -505,7 +483,7 @@ func (c *restoreController) runRestore( volumeSnapshots, err := info.backupStore.GetBackupVolumeSnapshots(restore.Spec.BackupName) if err != nil { log.WithError(errors.WithStack(err)).Error("Error fetching volume snapshots") - restoreErrors.Ark = append(restoreErrors.Ark, err.Error()) + restoreErrors.Velero = append(restoreErrors.Velero, err.Error()) restoreFailure = err return restoreResult{warnings: restoreWarnings, errors: restoreErrors}, restoreFailure } @@ -516,13 +494,13 @@ func (c *restoreController) runRestore( restoreWarnings, restoreErrors = c.restorer.Restore(log, restore, info.backup, volumeSnapshots, backupFile, actions, c.snapshotLocationLister, pluginManager) log.Info("restore completed") - // Try to upload the log file. This is best-effort. If we fail, we'll add to the ark errors. + // Try to upload the log file. This is best-effort. If we fail, we'll add to the velero errors. if err := gzippedLogFile.Close(); err != nil { c.logger.WithError(err).Error("error closing gzippedLogFile") } // Reset the offset to 0 for reading if _, err = logFile.Seek(0, 0); err != nil { - restoreErrors.Ark = append(restoreErrors.Ark, fmt.Sprintf("error resetting log file offset to 0: %v", err)) + restoreErrors.Velero = append(restoreErrors.Velero, fmt.Sprintf("error resetting log file offset to 0: %v", err)) return restoreResult{warnings: restoreWarnings, errors: restoreErrors}, restoreFailure } @@ -589,7 +567,7 @@ func downloadToTempFile( return file, nil } -func patchRestore(original, updated *api.Restore, client arkv1client.RestoresGetter) (*api.Restore, error) { +func patchRestore(original, updated *api.Restore, client velerov1client.RestoresGetter) (*api.Restore, error) { origBytes, err := json.Marshal(original) if err != nil { return nil, errors.Wrap(err, "error marshalling original restore") diff --git a/pkg/controller/restore_controller_test.go b/pkg/controller/restore_controller_test.go index f51c5627ce..2042f294d4 100644 --- a/pkg/controller/restore_controller_test.go +++ b/pkg/controller/restore_controller_test.go @@ -34,19 +34,20 @@ import ( core "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/generated/clientset/versioned/fake" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions" - listers "github.com/heptio/ark/pkg/generated/listers/ark/v1" - "github.com/heptio/ark/pkg/metrics" - "github.com/heptio/ark/pkg/persistence" - persistencemocks "github.com/heptio/ark/pkg/persistence/mocks" - "github.com/heptio/ark/pkg/plugin" - pluginmocks "github.com/heptio/ark/pkg/plugin/mocks" - "github.com/heptio/ark/pkg/restore" - "github.com/heptio/ark/pkg/util/collections" - arktest "github.com/heptio/ark/pkg/util/test" - "github.com/heptio/ark/pkg/volume" + api "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions" + listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" + "github.com/heptio/velero/pkg/metrics" + "github.com/heptio/velero/pkg/persistence" + persistencemocks "github.com/heptio/velero/pkg/persistence/mocks" + "github.com/heptio/velero/pkg/plugin" + pluginmocks "github.com/heptio/velero/pkg/plugin/mocks" + "github.com/heptio/velero/pkg/restore" + "github.com/heptio/velero/pkg/util/collections" + velerotest "github.com/heptio/velero/pkg/util/test" + "github.com/heptio/velero/pkg/volume" ) func TestFetchBackupInfo(t *testing.T) { @@ -63,17 +64,17 @@ func TestFetchBackupInfo(t *testing.T) { { name: "lister has backup", backupName: "backup-1", - informerLocations: []*api.BackupStorageLocation{arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation}, - informerBackups: []*api.Backup{arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup}, - expectedRes: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + informerLocations: []*api.BackupStorageLocation{velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation}, + informerBackups: []*api.Backup{velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup}, + expectedRes: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, }, { name: "lister does not have a backup, but backupSvc does", backupName: "backup-1", - backupStoreBackup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, - informerLocations: []*api.BackupStorageLocation{arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation}, - informerBackups: []*api.Backup{arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup}, - expectedRes: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + backupStoreBackup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + informerLocations: []*api.BackupStorageLocation{velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation}, + informerBackups: []*api.Backup{velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup}, + expectedRes: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, }, { name: "no backup", @@ -89,7 +90,7 @@ func TestFetchBackupInfo(t *testing.T) { client = fake.NewSimpleClientset() restorer = &fakeRestorer{} sharedInformers = informers.NewSharedInformerFactory(client, 0) - logger = arktest.NewLogger() + logger = velerotest.NewLogger() pluginManager = &pluginmocks.Manager{} backupStore = &persistencemocks.BackupStore{} ) @@ -99,13 +100,13 @@ func TestFetchBackupInfo(t *testing.T) { c := NewRestoreController( api.DefaultNamespace, - sharedInformers.Ark().V1().Restores(), - client.ArkV1(), - client.ArkV1(), + sharedInformers.Velero().V1().Restores(), + client.VeleroV1(), + client.VeleroV1(), restorer, - sharedInformers.Ark().V1().Backups(), - sharedInformers.Ark().V1().BackupStorageLocations(), - sharedInformers.Ark().V1().VolumeSnapshotLocations(), + sharedInformers.Velero().V1().Backups(), + sharedInformers.Velero().V1().BackupStorageLocations(), + sharedInformers.Velero().V1().VolumeSnapshotLocations(), logger, logrus.InfoLevel, func(logrus.FieldLogger) plugin.Manager { return pluginManager }, @@ -119,11 +120,11 @@ func TestFetchBackupInfo(t *testing.T) { if test.backupStoreError == nil { for _, itm := range test.informerLocations { - sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(itm) + sharedInformers.Velero().V1().BackupStorageLocations().Informer().GetStore().Add(itm) } for _, itm := range test.informerBackups { - sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(itm) + sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(itm) } } @@ -167,17 +168,17 @@ func TestProcessRestoreSkips(t *testing.T) { { name: "restore with phase InProgress does not get processed", restoreKey: "foo/bar", - restore: arktest.NewTestRestore("foo", "bar", api.RestorePhaseInProgress).Restore, + restore: velerotest.NewTestRestore("foo", "bar", api.RestorePhaseInProgress).Restore, }, { name: "restore with phase Completed does not get processed", restoreKey: "foo/bar", - restore: arktest.NewTestRestore("foo", "bar", api.RestorePhaseCompleted).Restore, + restore: velerotest.NewTestRestore("foo", "bar", api.RestorePhaseCompleted).Restore, }, { name: "restore with phase FailedValidation does not get processed", restoreKey: "foo/bar", - restore: arktest.NewTestRestore("foo", "bar", api.RestorePhaseFailedValidation).Restore, + restore: velerotest.NewTestRestore("foo", "bar", api.RestorePhaseFailedValidation).Restore, }, } @@ -187,18 +188,18 @@ func TestProcessRestoreSkips(t *testing.T) { client = fake.NewSimpleClientset() restorer = &fakeRestorer{} sharedInformers = informers.NewSharedInformerFactory(client, 0) - logger = arktest.NewLogger() + logger = velerotest.NewLogger() ) c := NewRestoreController( api.DefaultNamespace, - sharedInformers.Ark().V1().Restores(), - client.ArkV1(), - client.ArkV1(), + sharedInformers.Velero().V1().Restores(), + client.VeleroV1(), + client.VeleroV1(), restorer, - sharedInformers.Ark().V1().Backups(), - sharedInformers.Ark().V1().BackupStorageLocations(), - sharedInformers.Ark().V1().VolumeSnapshotLocations(), + sharedInformers.Velero().V1().Backups(), + sharedInformers.Velero().V1().BackupStorageLocations(), + sharedInformers.Velero().V1().VolumeSnapshotLocations(), logger, logrus.InfoLevel, nil, @@ -207,7 +208,7 @@ func TestProcessRestoreSkips(t *testing.T) { ).(*restoreController) if test.restore != nil { - sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(test.restore) + sharedInformers.Velero().V1().Restores().Informer().GetStore().Add(test.restore) } err := c.processRestore(test.restoreKey) @@ -237,18 +238,18 @@ func TestProcessRestore(t *testing.T) { }{ { name: "restore with both namespace in both includedNamespaces and excludedNamespaces fails validation", - location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, + location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, restore: NewRestore("foo", "bar", "backup-1", "another-1", "*", api.RestorePhaseNew).WithExcludedNamespace("another-1").Restore, - backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, expectedErr: false, expectedPhase: string(api.RestorePhaseFailedValidation), expectedValidationErrors: []string{"Invalid included/excluded namespace lists: excludes list cannot contain an item in the includes list: another-1"}, }, { name: "restore with resource in both includedResources and excludedResources fails validation", - location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, + location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, restore: NewRestore("foo", "bar", "backup-1", "*", "a-resource", api.RestorePhaseNew).WithExcludedResource("a-resource").Restore, - backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, expectedErr: false, expectedPhase: string(api.RestorePhaseFailedValidation), expectedValidationErrors: []string{"Invalid included/excluded resource lists: excludes list cannot contain an item in the includes list: a-resource"}, @@ -269,13 +270,13 @@ func TestProcessRestore(t *testing.T) { }, { name: "valid restore with schedule name gets executed", - location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, + location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, restore: NewRestore("foo", "bar", "", "ns-1", "", api.RestorePhaseNew).WithSchedule("sched-1").Restore, - backup: arktest. + backup: velerotest. NewTestBackup(). WithName("backup-1"). WithStorageLocation("default"). - WithLabel("ark-schedule", "sched-1"). + WithLabel(velerov1api.ScheduleNameLabel, "sched-1"). WithPhase(api.BackupPhaseCompleted). Backup, expectedErr: false, @@ -287,14 +288,14 @@ func TestProcessRestore(t *testing.T) { restore: NewRestore("foo", "bar", "backup-1", "ns-1", "*", api.RestorePhaseNew).Restore, expectedErr: false, expectedPhase: string(api.RestorePhaseFailedValidation), - expectedValidationErrors: []string{"Error retrieving backup: backup.ark.heptio.com \"backup-1\" not found"}, + expectedValidationErrors: []string{"Error retrieving backup: backup.velero.io \"backup-1\" not found"}, backupStoreGetBackupMetadataErr: errors.New("no backup here"), }, { name: "restorer throwing an error causes the restore to fail", - location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, + location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).Restore, - backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, restorerError: errors.New("blarg"), expectedErr: false, expectedPhase: string(api.RestorePhaseInProgress), @@ -303,18 +304,18 @@ func TestProcessRestore(t *testing.T) { }, { name: "valid restore gets executed", - location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, + location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, restore: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).Restore, - backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, expectedErr: false, expectedPhase: string(api.RestorePhaseInProgress), expectedRestorerCall: NewRestore("foo", "bar", "backup-1", "ns-1", "", api.RestorePhaseInProgress).Restore, }, { name: "restoration of nodes is not supported", - location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, + location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, restore: NewRestore("foo", "bar", "backup-1", "ns-1", "nodes", api.RestorePhaseNew).Restore, - backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, expectedErr: false, expectedPhase: string(api.RestorePhaseFailedValidation), expectedValidationErrors: []string{ @@ -324,9 +325,9 @@ func TestProcessRestore(t *testing.T) { }, { name: "restoration of events is not supported", - location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, + location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, restore: NewRestore("foo", "bar", "backup-1", "ns-1", "events", api.RestorePhaseNew).Restore, - backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, expectedErr: false, expectedPhase: string(api.RestorePhaseFailedValidation), expectedValidationErrors: []string{ @@ -336,9 +337,9 @@ func TestProcessRestore(t *testing.T) { }, { name: "restoration of events.events.k8s.io is not supported", - location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, + location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, restore: NewRestore("foo", "bar", "backup-1", "ns-1", "events.events.k8s.io", api.RestorePhaseNew).Restore, - backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, expectedErr: false, expectedPhase: string(api.RestorePhaseFailedValidation), expectedValidationErrors: []string{ @@ -347,37 +348,37 @@ func TestProcessRestore(t *testing.T) { }, }, { - name: "restoration of backups.ark.heptio.com is not supported", - location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, - restore: NewRestore("foo", "bar", "backup-1", "ns-1", "backups.ark.heptio.com", api.RestorePhaseNew).Restore, - backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + name: "restoration of backups.velero.io is not supported", + location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, + restore: NewRestore("foo", "bar", "backup-1", "ns-1", "backups.velero.io", api.RestorePhaseNew).Restore, + backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, expectedErr: false, expectedPhase: string(api.RestorePhaseFailedValidation), expectedValidationErrors: []string{ - "backups.ark.heptio.com are non-restorable resources", - "Invalid included/excluded resource lists: excludes list cannot contain an item in the includes list: backups.ark.heptio.com", + "backups.velero.io are non-restorable resources", + "Invalid included/excluded resource lists: excludes list cannot contain an item in the includes list: backups.velero.io", }, }, { - name: "restoration of restores.ark.heptio.com is not supported", - location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, - restore: NewRestore("foo", "bar", "backup-1", "ns-1", "restores.ark.heptio.com", api.RestorePhaseNew).Restore, - backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + name: "restoration of restores.velero.io is not supported", + location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, + restore: NewRestore("foo", "bar", "backup-1", "ns-1", "restores.velero.io", api.RestorePhaseNew).Restore, + backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, expectedErr: false, expectedPhase: string(api.RestorePhaseFailedValidation), expectedValidationErrors: []string{ - "restores.ark.heptio.com are non-restorable resources", - "Invalid included/excluded resource lists: excludes list cannot contain an item in the includes list: restores.ark.heptio.com", + "restores.velero.io are non-restorable resources", + "Invalid included/excluded resource lists: excludes list cannot contain an item in the includes list: restores.velero.io", }, }, { name: "backup download error results in failed restore", - location: arktest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, + location: velerotest.NewTestBackupStorageLocation().WithName("default").WithProvider("myCloud").WithObjectStorage("bucket").BackupStorageLocation, restore: NewRestore(api.DefaultNamespace, "bar", "backup-1", "ns-1", "", api.RestorePhaseNew).Restore, expectedPhase: string(api.RestorePhaseInProgress), expectedFinalPhase: string(api.RestorePhaseFailed), backupStoreGetBackupContentsErr: errors.New("Couldn't download backup"), - backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, + backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("default").Backup, }, } @@ -387,7 +388,7 @@ func TestProcessRestore(t *testing.T) { client = fake.NewSimpleClientset() restorer = &fakeRestorer{} sharedInformers = informers.NewSharedInformerFactory(client, 0) - logger = arktest.NewLogger() + logger = velerotest.NewLogger() pluginManager = &pluginmocks.Manager{} backupStore = &persistencemocks.BackupStore{} ) @@ -397,13 +398,13 @@ func TestProcessRestore(t *testing.T) { c := NewRestoreController( api.DefaultNamespace, - sharedInformers.Ark().V1().Restores(), - client.ArkV1(), - client.ArkV1(), + sharedInformers.Velero().V1().Restores(), + client.VeleroV1(), + client.VeleroV1(), restorer, - sharedInformers.Ark().V1().Backups(), - sharedInformers.Ark().V1().BackupStorageLocations(), - sharedInformers.Ark().V1().VolumeSnapshotLocations(), + sharedInformers.Velero().V1().Backups(), + sharedInformers.Velero().V1().BackupStorageLocations(), + sharedInformers.Velero().V1().VolumeSnapshotLocations(), logger, logrus.InfoLevel, func(logrus.FieldLogger) plugin.Manager { return pluginManager }, @@ -416,14 +417,14 @@ func TestProcessRestore(t *testing.T) { } if test.location != nil { - sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(test.location) + sharedInformers.Velero().V1().BackupStorageLocations().Informer().GetStore().Add(test.location) } if test.backup != nil { - sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(test.backup) + sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(test.backup) } if test.restore != nil { - sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(test.restore) + sharedInformers.Velero().V1().Restores().Informer().GetStore().Add(test.restore) // this is necessary so the Patch() call returns the appropriate object client.PrependReactor("patch", "restores", func(action core.Action) (bool, runtime.Object, error) { @@ -461,7 +462,7 @@ func TestProcessRestore(t *testing.T) { } if test.backup != nil { - sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(test.backup) + sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(test.backup) } var warnings, errors api.RestoreResult @@ -469,7 +470,7 @@ func TestProcessRestore(t *testing.T) { errors.Namespaces = map[string][]string{"ns-1": {test.restorerError.Error()}} } if test.putRestoreLogErr != nil { - errors.Ark = append(errors.Ark, "error uploading log file to object storage: "+test.putRestoreLogErr.Error()) + errors.Velero = append(errors.Velero, "error uploading log file to object storage: "+test.putRestoreLogErr.Error()) } if test.expectedRestorerCall != nil { backupStore.On("GetBackupContents", test.backup.Name).Return(ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), nil) @@ -566,7 +567,7 @@ func TestProcessRestore(t *testing.T) { } } - arktest.ValidatePatch(t, actions[0], expected, decode) + velerotest.ValidatePatch(t, actions[0], expected, decode) // if we don't expect a restore, validate it wasn't called and exit the test if test.expectedRestorerCall == nil { @@ -594,7 +595,7 @@ func TestProcessRestore(t *testing.T) { } } - arktest.ValidatePatch(t, actions[1], expected, decode) + velerotest.ValidatePatch(t, actions[1], expected, decode) // explicitly capturing the argument passed to Restore myself because // I want to validate the called arg as of the time of calling, but @@ -616,31 +617,31 @@ func TestValidateAndComplete(t *testing.T) { }{ { name: "backup with .status.volumeBackups and no volumesnapshots.json file does not error", - storageLocation: arktest.NewTestBackupStorageLocation().WithName("loc-1").BackupStorageLocation, - backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("loc-1").WithSnapshot("pv-1", "snap-1").Backup, + storageLocation: velerotest.NewTestBackupStorageLocation().WithName("loc-1").BackupStorageLocation, + backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("loc-1").WithSnapshot("pv-1", "snap-1").Backup, volumeSnapshots: nil, - restore: arktest.NewDefaultTestRestore().WithBackup("backup-1").Restore, + restore: velerotest.NewDefaultTestRestore().WithBackup("backup-1").Restore, expectedErrs: nil, }, { name: "backup with no .status.volumeBackups and volumesnapshots.json file does not error", - storageLocation: arktest.NewTestBackupStorageLocation().WithName("loc-1").BackupStorageLocation, - backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("loc-1").Backup, + storageLocation: velerotest.NewTestBackupStorageLocation().WithName("loc-1").BackupStorageLocation, + backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("loc-1").Backup, volumeSnapshots: []*volume.Snapshot{{}}, - restore: arktest.NewDefaultTestRestore().WithBackup("backup-1").Restore, + restore: velerotest.NewDefaultTestRestore().WithBackup("backup-1").Restore, expectedErrs: nil, }, { name: "backup with both .status.volumeBackups and volumesnapshots.json file errors", - storageLocation: arktest.NewTestBackupStorageLocation().WithName("loc-1").BackupStorageLocation, - backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("loc-1").WithSnapshot("pv-1", "snap-1").Backup, + storageLocation: velerotest.NewTestBackupStorageLocation().WithName("loc-1").BackupStorageLocation, + backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("loc-1").WithSnapshot("pv-1", "snap-1").Backup, volumeSnapshots: []*volume.Snapshot{{}}, - restore: arktest.NewDefaultTestRestore().WithBackup("backup-1").Restore, + restore: velerotest.NewDefaultTestRestore().WithBackup("backup-1").Restore, expectedErrs: []string{"Backup must not have both .status.volumeBackups and a volumesnapshots.json.gz file in object storage"}, }, { name: "backup with .status.volumeBackups, and >1 volume snapshot locations exist, errors", - storageLocation: arktest.NewTestBackupStorageLocation().WithName("loc-1").BackupStorageLocation, + storageLocation: velerotest.NewTestBackupStorageLocation().WithName("loc-1").BackupStorageLocation, snapshotLocations: []*api.VolumeSnapshotLocation{ { ObjectMeta: metav1.ObjectMeta{ @@ -655,13 +656,13 @@ func TestValidateAndComplete(t *testing.T) { }, }, }, - backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("loc-1").WithSnapshot("pv-1", "snap-1").Backup, - restore: arktest.NewDefaultTestRestore().WithBackup("backup-1").Restore, + backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("loc-1").WithSnapshot("pv-1", "snap-1").Backup, + restore: velerotest.NewDefaultTestRestore().WithBackup("backup-1").Restore, expectedErrs: []string{"Cannot restore backup with .status.volumeBackups when more than one volume snapshot location exists"}, }, { name: "backup with .status.volumeBackups, and 1 volume snapshot location exists, does not error", - storageLocation: arktest.NewTestBackupStorageLocation().WithName("loc-1").BackupStorageLocation, + storageLocation: velerotest.NewTestBackupStorageLocation().WithName("loc-1").BackupStorageLocation, snapshotLocations: []*api.VolumeSnapshotLocation{ { ObjectMeta: metav1.ObjectMeta{ @@ -670,8 +671,8 @@ func TestValidateAndComplete(t *testing.T) { }, }, }, - backup: arktest.NewTestBackup().WithName("backup-1").WithStorageLocation("loc-1").WithSnapshot("pv-1", "snap-1").Backup, - restore: arktest.NewDefaultTestRestore().WithBackup("backup-1").Restore, + backup: velerotest.NewTestBackup().WithName("backup-1").WithStorageLocation("loc-1").WithSnapshot("pv-1", "snap-1").Backup, + restore: velerotest.NewDefaultTestRestore().WithBackup("backup-1").Restore, expectedErrs: nil, }, } @@ -681,26 +682,26 @@ func TestValidateAndComplete(t *testing.T) { var ( clientset = fake.NewSimpleClientset() sharedInformers = informers.NewSharedInformerFactory(clientset, 0) - logger = arktest.NewLogger() + logger = velerotest.NewLogger() backupStore = &persistencemocks.BackupStore{} controller = &restoreController{ genericController: &genericController{ logger: logger, }, namespace: api.DefaultNamespace, - backupLister: sharedInformers.Ark().V1().Backups().Lister(), - backupLocationLister: sharedInformers.Ark().V1().BackupStorageLocations().Lister(), - snapshotLocationLister: sharedInformers.Ark().V1().VolumeSnapshotLocations().Lister(), + backupLister: sharedInformers.Velero().V1().Backups().Lister(), + backupLocationLister: sharedInformers.Velero().V1().BackupStorageLocations().Lister(), + snapshotLocationLister: sharedInformers.Velero().V1().VolumeSnapshotLocations().Lister(), newBackupStore: func(*api.BackupStorageLocation, persistence.ObjectStoreGetter, logrus.FieldLogger) (persistence.BackupStore, error) { return backupStore, nil }, } ) - require.NoError(t, sharedInformers.Ark().V1().BackupStorageLocations().Informer().GetStore().Add(tc.storageLocation)) - require.NoError(t, sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(tc.backup)) + require.NoError(t, sharedInformers.Velero().V1().BackupStorageLocations().Informer().GetStore().Add(tc.storageLocation)) + require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(tc.backup)) for _, loc := range tc.snapshotLocations { - require.NoError(t, sharedInformers.Ark().V1().VolumeSnapshotLocations().Informer().GetStore().Add(loc)) + require.NoError(t, sharedInformers.Velero().V1().VolumeSnapshotLocations().Informer().GetStore().Add(loc)) } backupStore.On("GetBackupVolumeSnapshots", tc.backup.Name).Return(tc.volumeSnapshots, nil) @@ -716,19 +717,19 @@ func TestvalidateAndCompleteWhenScheduleNameSpecified(t *testing.T) { var ( client = fake.NewSimpleClientset() sharedInformers = informers.NewSharedInformerFactory(client, 0) - logger = arktest.NewLogger() + logger = velerotest.NewLogger() pluginManager = &pluginmocks.Manager{} ) c := NewRestoreController( api.DefaultNamespace, - sharedInformers.Ark().V1().Restores(), - client.ArkV1(), - client.ArkV1(), + sharedInformers.Velero().V1().Restores(), + client.VeleroV1(), + client.VeleroV1(), nil, - sharedInformers.Ark().V1().Backups(), - sharedInformers.Ark().V1().BackupStorageLocations(), - sharedInformers.Ark().V1().VolumeSnapshotLocations(), + sharedInformers.Velero().V1().Backups(), + sharedInformers.Velero().V1().BackupStorageLocations(), + sharedInformers.Velero().V1().VolumeSnapshotLocations(), logger, logrus.DebugLevel, nil, @@ -747,10 +748,10 @@ func TestvalidateAndCompleteWhenScheduleNameSpecified(t *testing.T) { } // no backups created from the schedule: fail validation - require.NoError(t, sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(arktest. + require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(velerotest. NewTestBackup(). WithName("backup-1"). - WithLabel("ark-schedule", "non-matching-schedule"). + WithLabel(velerov1api.ScheduleNameLabel, "non-matching-schedule"). WithPhase(api.BackupPhaseCompleted). Backup, )) @@ -760,10 +761,10 @@ func TestvalidateAndCompleteWhenScheduleNameSpecified(t *testing.T) { assert.Empty(t, restore.Spec.BackupName) // no completed backups created from the schedule: fail validation - require.NoError(t, sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(arktest. + require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(velerotest. NewTestBackup(). WithName("backup-2"). - WithLabel("ark-schedule", "schedule-1"). + WithLabel(velerov1api.ScheduleNameLabel, "schedule-1"). WithPhase(api.BackupPhaseInProgress). Backup, )) @@ -775,18 +776,18 @@ func TestvalidateAndCompleteWhenScheduleNameSpecified(t *testing.T) { // multiple completed backups created from the schedule: use most recent now := time.Now() - require.NoError(t, sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(arktest. + require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(velerotest. NewTestBackup(). WithName("foo"). - WithLabel("ark-schedule", "schedule-1"). + WithLabel(velerov1api.ScheduleNameLabel, "schedule-1"). WithPhase(api.BackupPhaseCompleted). WithStartTimestamp(now). Backup, )) - require.NoError(t, sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(arktest. + require.NoError(t, sharedInformers.Velero().V1().Backups().Informer().GetStore().Add(velerotest. NewTestBackup(). WithName("bar"). - WithLabel("ark-schedule", "schedule-1"). + WithLabel(velerov1api.ScheduleNameLabel, "schedule-1"). WithPhase(api.BackupPhaseCompleted). WithStartTimestamp(now.Add(time.Second)). Backup, @@ -887,8 +888,8 @@ func TestMostRecentCompletedBackup(t *testing.T) { assert.Equal(t, expected, mostRecentCompletedBackup(backups)) } -func NewRestore(ns, name, backup, includeNS, includeResource string, phase api.RestorePhase) *arktest.TestRestore { - restore := arktest.NewTestRestore(ns, name, phase).WithBackup(backup) +func NewRestore(ns, name, backup, includeNS, includeResource string, phase api.RestorePhase) *velerotest.TestRestore { + restore := velerotest.NewTestRestore(ns, name, phase).WithBackup(backup) if includeNS != "" { restore = restore.WithIncludedNamespace(includeNS) diff --git a/pkg/controller/schedule_controller.go b/pkg/controller/schedule_controller.go index d0aaac19f2..316705f714 100644 --- a/pkg/controller/schedule_controller.go +++ b/pkg/controller/schedule_controller.go @@ -32,12 +32,13 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "k8s.io/client-go/tools/cache" - api "github.com/heptio/ark/pkg/apis/ark/v1" - arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" - listers "github.com/heptio/ark/pkg/generated/listers/ark/v1" - "github.com/heptio/ark/pkg/metrics" - kubeutil "github.com/heptio/ark/pkg/util/kube" + api "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions/velero/v1" + listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" + "github.com/heptio/velero/pkg/metrics" + kubeutil "github.com/heptio/velero/pkg/util/kube" ) const ( @@ -48,8 +49,8 @@ type scheduleController struct { *genericController namespace string - schedulesClient arkv1client.SchedulesGetter - backupsClient arkv1client.BackupsGetter + schedulesClient velerov1client.SchedulesGetter + backupsClient velerov1client.BackupsGetter schedulesLister listers.ScheduleLister clock clock.Clock metrics *metrics.ServerMetrics @@ -57,8 +58,8 @@ type scheduleController struct { func NewScheduleController( namespace string, - schedulesClient arkv1client.SchedulesGetter, - backupsClient arkv1client.BackupsGetter, + schedulesClient velerov1client.SchedulesGetter, + backupsClient velerov1client.BackupsGetter, schedulesInformer informers.ScheduleInformer, logger logrus.FieldLogger, metrics *metrics.ServerMetrics, @@ -293,7 +294,6 @@ func getBackup(item *api.Schedule, timestamp time.Time) *api.Backup { }, } - // add schedule labels and 'ark-schedule' label to the backup addLabelsToBackup(item, backup) return backup @@ -304,12 +304,12 @@ func addLabelsToBackup(item *api.Schedule, backup *api.Backup) { if labels == nil { labels = make(map[string]string) } - labels["ark-schedule"] = item.Name + labels[velerov1api.ScheduleNameLabel] = item.Name backup.Labels = labels } -func patchSchedule(original, updated *api.Schedule, client arkv1client.SchedulesGetter) (*api.Schedule, error) { +func patchSchedule(original, updated *api.Schedule, client velerov1client.SchedulesGetter) (*api.Schedule, error) { origBytes, err := json.Marshal(original) if err != nil { return nil, errors.Wrap(err, "error marshalling original schedule") diff --git a/pkg/controller/schedule_controller_test.go b/pkg/controller/schedule_controller_test.go index aa7ac13f37..d490e53652 100644 --- a/pkg/controller/schedule_controller_test.go +++ b/pkg/controller/schedule_controller_test.go @@ -30,12 +30,13 @@ import ( core "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/generated/clientset/versioned/fake" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions" - "github.com/heptio/ark/pkg/metrics" - "github.com/heptio/ark/pkg/util/collections" - arktest "github.com/heptio/ark/pkg/util/test" + api "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions" + "github.com/heptio/velero/pkg/metrics" + "github.com/heptio/velero/pkg/util/collections" + velerotest "github.com/heptio/velero/pkg/util/test" ) func TestProcessSchedule(t *testing.T) { @@ -62,54 +63,54 @@ func TestProcessSchedule(t *testing.T) { }, { name: "schedule with phase FailedValidation does not get processed", - schedule: arktest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseFailedValidation).Schedule, + schedule: velerotest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseFailedValidation).Schedule, expectedErr: false, }, { name: "schedule with phase New gets validated and failed if invalid", - schedule: arktest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseNew).Schedule, + schedule: velerotest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseNew).Schedule, expectedErr: false, expectedPhase: string(api.SchedulePhaseFailedValidation), expectedValidationErrors: []string{"Schedule must be a non-empty valid Cron expression"}, }, { name: "schedule with phase gets validated and failed if invalid", - schedule: arktest.NewTestSchedule("ns", "name").Schedule, + schedule: velerotest.NewTestSchedule("ns", "name").Schedule, expectedErr: false, expectedPhase: string(api.SchedulePhaseFailedValidation), expectedValidationErrors: []string{"Schedule must be a non-empty valid Cron expression"}, }, { name: "schedule with phase Enabled gets re-validated and failed if invalid", - schedule: arktest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).Schedule, + schedule: velerotest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).Schedule, expectedErr: false, expectedPhase: string(api.SchedulePhaseFailedValidation), expectedValidationErrors: []string{"Schedule must be a non-empty valid Cron expression"}, }, { name: "schedule with phase New gets validated and triggers a backup", - schedule: arktest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseNew).WithCronSchedule("@every 5m").Schedule, + schedule: velerotest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseNew).WithCronSchedule("@every 5m").Schedule, fakeClockTime: "2017-01-01 12:00:00", expectedErr: false, expectedPhase: string(api.SchedulePhaseEnabled), - expectedBackupCreate: arktest.NewTestBackup().WithNamespace("ns").WithName("name-20170101120000").WithLabel("ark-schedule", "name").Backup, + expectedBackupCreate: velerotest.NewTestBackup().WithNamespace("ns").WithName("name-20170101120000").WithLabel(velerov1api.ScheduleNameLabel, "name").Backup, expectedLastBackup: "2017-01-01 12:00:00", }, { name: "schedule with phase Enabled gets re-validated and triggers a backup if valid", - schedule: arktest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).WithCronSchedule("@every 5m").Schedule, + schedule: velerotest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).WithCronSchedule("@every 5m").Schedule, fakeClockTime: "2017-01-01 12:00:00", expectedErr: false, - expectedBackupCreate: arktest.NewTestBackup().WithNamespace("ns").WithName("name-20170101120000").WithLabel("ark-schedule", "name").Backup, + expectedBackupCreate: velerotest.NewTestBackup().WithNamespace("ns").WithName("name-20170101120000").WithLabel(velerov1api.ScheduleNameLabel, "name").Backup, expectedLastBackup: "2017-01-01 12:00:00", }, { name: "schedule that's already run gets LastBackup updated", - schedule: arktest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled). + schedule: velerotest.NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled). WithCronSchedule("@every 5m").WithLastBackupTime("2000-01-01 00:00:00").Schedule, fakeClockTime: "2017-01-01 12:00:00", expectedErr: false, - expectedBackupCreate: arktest.NewTestBackup().WithNamespace("ns").WithName("name-20170101120000").WithLabel("ark-schedule", "name").Backup, + expectedBackupCreate: velerotest.NewTestBackup().WithNamespace("ns").WithName("name-20170101120000").WithLabel(velerov1api.ScheduleNameLabel, "name").Backup, expectedLastBackup: "2017-01-01 12:00:00", }, } @@ -119,14 +120,14 @@ func TestProcessSchedule(t *testing.T) { var ( client = fake.NewSimpleClientset() sharedInformers = informers.NewSharedInformerFactory(client, 0) - logger = arktest.NewLogger() + logger = velerotest.NewLogger() ) c := NewScheduleController( "namespace", - client.ArkV1(), - client.ArkV1(), - sharedInformers.Ark().V1().Schedules(), + client.VeleroV1(), + client.VeleroV1(), + sharedInformers.Velero().V1().Schedules(), logger, metrics.NewServerMetrics(), ) @@ -142,7 +143,7 @@ func TestProcessSchedule(t *testing.T) { c.clock = clock.NewFakeClock(testTime) if test.schedule != nil { - sharedInformers.Ark().V1().Schedules().Informer().GetStore().Add(test.schedule) + sharedInformers.Velero().V1().Schedules().Informer().GetStore().Add(test.schedule) // this is necessary so the Patch() call returns the appropriate object client.PrependReactor("patch", "schedules", func(action core.Action) (bool, runtime.Object, error) { @@ -217,7 +218,7 @@ func TestProcessSchedule(t *testing.T) { }, } - arktest.ValidatePatch(t, actions[index], expected, decode) + velerotest.ValidatePatch(t, actions[index], expected, decode) index++ } @@ -244,7 +245,7 @@ func TestProcessSchedule(t *testing.T) { }, } - arktest.ValidatePatch(t, actions[index], expected, decode) + velerotest.ValidatePatch(t, actions[index], expected, decode) } }) } @@ -329,7 +330,7 @@ func TestGetNextRunTime(t *testing.T) { } func TestParseCronSchedule(t *testing.T) { - // From https://github.com/heptio/ark/issues/30, where we originally were using cron.Parse(), + // From https://github.com/heptio/velero/issues/30, where we originally were using cron.Parse(), // which treats the first field as seconds, and not minutes. We want to use cron.ParseStandard() // instead, which has the first field as minutes. @@ -347,7 +348,7 @@ func TestParseCronSchedule(t *testing.T) { }, } - logger := arktest.NewLogger() + logger := velerotest.NewLogger() c, errs := parseCronSchedule(s, logger) require.Empty(t, errs) @@ -403,7 +404,7 @@ func TestGetBackup(t *testing.T) { Namespace: "foo", Name: "bar-20170725091500", Labels: map[string]string{ - "ark-schedule": "bar", + velerov1api.ScheduleNameLabel: "bar", }, }, Spec: api.BackupSpec{}, @@ -426,7 +427,7 @@ func TestGetBackup(t *testing.T) { Namespace: "foo", Name: "bar-20170725141500", Labels: map[string]string{ - "ark-schedule": "bar", + velerov1api.ScheduleNameLabel: "bar", }, }, Spec: api.BackupSpec{}, @@ -456,7 +457,7 @@ func TestGetBackup(t *testing.T) { Namespace: "foo", Name: "bar-20170725091500", Labels: map[string]string{ - "ark-schedule": "bar", + velerov1api.ScheduleNameLabel: "bar", }, }, Spec: api.BackupSpec{ @@ -490,9 +491,9 @@ func TestGetBackup(t *testing.T) { Namespace: "foo", Name: "bar-20170725141500", Labels: map[string]string{ - "ark-schedule": "bar", - "bar": "baz", - "foo": "bar", + velerov1api.ScheduleNameLabel: "bar", + "bar": "baz", + "foo": "bar", }, }, Spec: api.BackupSpec{}, diff --git a/pkg/controller/server_status_request_controller.go b/pkg/controller/server_status_request_controller.go index be3150d078..9c908d7069 100644 --- a/pkg/controller/server_status_request_controller.go +++ b/pkg/controller/server_status_request_controller.go @@ -26,12 +26,12 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "k8s.io/client-go/tools/cache" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" - arkv1informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" - arkv1listers "github.com/heptio/ark/pkg/generated/listers/ark/v1" - "github.com/heptio/ark/pkg/serverstatusrequest" - kubeutil "github.com/heptio/ark/pkg/util/kube" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" + velerov1informers "github.com/heptio/velero/pkg/generated/informers/externalversions/velero/v1" + velerov1listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" + "github.com/heptio/velero/pkg/serverstatusrequest" + kubeutil "github.com/heptio/velero/pkg/util/kube" ) const statusRequestResyncPeriod = 5 * time.Minute @@ -39,15 +39,15 @@ const statusRequestResyncPeriod = 5 * time.Minute type statusRequestController struct { *genericController - client arkv1client.ServerStatusRequestsGetter - lister arkv1listers.ServerStatusRequestLister + client velerov1client.ServerStatusRequestsGetter + lister velerov1listers.ServerStatusRequestLister clock clock.Clock } func NewServerStatusRequestController( logger logrus.FieldLogger, - client arkv1client.ServerStatusRequestsGetter, - informer arkv1informers.ServerStatusRequestInformer, + client velerov1client.ServerStatusRequestsGetter, + informer velerov1informers.ServerStatusRequestInformer, ) *statusRequestController { c := &statusRequestController{ genericController: newGenericController("serverstatusrequest", logger), @@ -66,7 +66,7 @@ func NewServerStatusRequestController( informer.Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - req := obj.(*arkv1api.ServerStatusRequest) + req := obj.(*velerov1api.ServerStatusRequest) key := kubeutil.NamespaceAndName(req) c.logger.WithFields(logrus.Fields{ diff --git a/pkg/discovery/helper.go b/pkg/discovery/helper.go index ca055a4761..c85e50c1ae 100644 --- a/pkg/discovery/helper.go +++ b/pkg/discovery/helper.go @@ -28,21 +28,21 @@ import ( "k8s.io/client-go/discovery" "k8s.io/client-go/restmapper" - kcmdutil "github.com/heptio/ark/third_party/kubernetes/pkg/kubectl/cmd/util" + kcmdutil "github.com/heptio/velero/third_party/kubernetes/pkg/kubectl/cmd/util" ) // Helper exposes functions for interacting with the Kubernetes discovery // API. type Helper interface { // Resources gets the current set of resources retrieved from discovery - // that are backuppable by Ark. + // that are backuppable by Velero. Resources() []*metav1.APIResourceList // ResourceFor gets a fully-resolved GroupVersionResource and an // APIResource for the provided partially-specified GroupVersionResource. ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, metav1.APIResource, error) - // Refresh pulls an updated set of Ark-backuppable resources from the + // Refresh pulls an updated set of Velero-backuppable resources from the // discovery API. Refresh() error diff --git a/pkg/generated/clientset/versioned/clientset.go b/pkg/generated/clientset/versioned/clientset.go index 2dd376820b..5395ab2925 100644 --- a/pkg/generated/clientset/versioned/clientset.go +++ b/pkg/generated/clientset/versioned/clientset.go @@ -19,7 +19,8 @@ limitations under the License. package versioned import ( - arkv1 "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" + arkv1 "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/ark/v1" + velerov1 "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" @@ -30,13 +31,17 @@ type Interface interface { ArkV1() arkv1.ArkV1Interface // Deprecated: please explicitly pick a version if possible. Ark() arkv1.ArkV1Interface + VeleroV1() velerov1.VeleroV1Interface + // Deprecated: please explicitly pick a version if possible. + Velero() velerov1.VeleroV1Interface } // Clientset contains the clients for groups. Each group has exactly one // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient - arkV1 *arkv1.ArkV1Client + arkV1 *arkv1.ArkV1Client + veleroV1 *velerov1.VeleroV1Client } // ArkV1 retrieves the ArkV1Client @@ -50,6 +55,17 @@ func (c *Clientset) Ark() arkv1.ArkV1Interface { return c.arkV1 } +// VeleroV1 retrieves the VeleroV1Client +func (c *Clientset) VeleroV1() velerov1.VeleroV1Interface { + return c.veleroV1 +} + +// Deprecated: Velero retrieves the default version of VeleroClient. +// Please explicitly pick a version. +func (c *Clientset) Velero() velerov1.VeleroV1Interface { + return c.veleroV1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -70,6 +86,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.veleroV1, err = velerov1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { @@ -83,6 +103,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset cs.arkV1 = arkv1.NewForConfigOrDie(c) + cs.veleroV1 = velerov1.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs @@ -92,6 +113,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { func New(c rest.Interface) *Clientset { var cs Clientset cs.arkV1 = arkv1.New(c) + cs.veleroV1 = velerov1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/pkg/generated/clientset/versioned/fake/clientset_generated.go b/pkg/generated/clientset/versioned/fake/clientset_generated.go index d9024de0f0..86c5428e48 100644 --- a/pkg/generated/clientset/versioned/fake/clientset_generated.go +++ b/pkg/generated/clientset/versioned/fake/clientset_generated.go @@ -19,9 +19,11 @@ limitations under the License. package fake import ( - clientset "github.com/heptio/ark/pkg/generated/clientset/versioned" - arkv1 "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" - fakearkv1 "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1/fake" + clientset "github.com/heptio/velero/pkg/generated/clientset/versioned" + arkv1 "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/ark/v1" + fakearkv1 "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/ark/v1/fake" + velerov1 "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" + fakevelerov1 "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1/fake" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/discovery" @@ -80,3 +82,13 @@ func (c *Clientset) ArkV1() arkv1.ArkV1Interface { func (c *Clientset) Ark() arkv1.ArkV1Interface { return &fakearkv1.FakeArkV1{Fake: &c.Fake} } + +// VeleroV1 retrieves the VeleroV1Client +func (c *Clientset) VeleroV1() velerov1.VeleroV1Interface { + return &fakevelerov1.FakeVeleroV1{Fake: &c.Fake} +} + +// Velero retrieves the VeleroV1Client +func (c *Clientset) Velero() velerov1.VeleroV1Interface { + return &fakevelerov1.FakeVeleroV1{Fake: &c.Fake} +} diff --git a/pkg/generated/clientset/versioned/fake/register.go b/pkg/generated/clientset/versioned/fake/register.go index 9ea2f718e4..ad24eceaec 100644 --- a/pkg/generated/clientset/versioned/fake/register.go +++ b/pkg/generated/clientset/versioned/fake/register.go @@ -19,7 +19,8 @@ limitations under the License. package fake import ( - arkv1 "github.com/heptio/ark/pkg/apis/ark/v1" + arkv1 "github.com/heptio/velero/pkg/apis/ark/v1" + velerov1 "github.com/heptio/velero/pkg/apis/velero/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -51,4 +52,5 @@ func init() { // correctly. func AddToScheme(scheme *runtime.Scheme) { arkv1.AddToScheme(scheme) + velerov1.AddToScheme(scheme) } diff --git a/pkg/generated/clientset/versioned/scheme/register.go b/pkg/generated/clientset/versioned/scheme/register.go index 6663d78e36..aa98223923 100644 --- a/pkg/generated/clientset/versioned/scheme/register.go +++ b/pkg/generated/clientset/versioned/scheme/register.go @@ -19,7 +19,8 @@ limitations under the License. package scheme import ( - arkv1 "github.com/heptio/ark/pkg/apis/ark/v1" + arkv1 "github.com/heptio/velero/pkg/apis/ark/v1" + velerov1 "github.com/heptio/velero/pkg/apis/velero/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -51,4 +52,5 @@ func init() { // correctly. func AddToScheme(scheme *runtime.Scheme) { arkv1.AddToScheme(scheme) + velerov1.AddToScheme(scheme) } diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/ark_client.go b/pkg/generated/clientset/versioned/typed/ark/v1/ark_client.go index b638a900ae..5e8f78bc4e 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/ark_client.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/ark_client.go @@ -19,8 +19,8 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/generated/clientset/versioned/scheme" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" + "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" ) diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/backup.go b/pkg/generated/clientset/versioned/typed/ark/v1/backup.go index d7c4d77386..eac0352c34 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/backup.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/backup.go @@ -19,8 +19,8 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" - scheme "github.com/heptio/ark/pkg/generated/clientset/versioned/scheme" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/backupstoragelocation.go b/pkg/generated/clientset/versioned/typed/ark/v1/backupstoragelocation.go index ca8608a654..f282049d5e 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/backupstoragelocation.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/backupstoragelocation.go @@ -19,8 +19,8 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" - scheme "github.com/heptio/ark/pkg/generated/clientset/versioned/scheme" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/deletebackuprequest.go b/pkg/generated/clientset/versioned/typed/ark/v1/deletebackuprequest.go index 0fdd902620..b743f6bba5 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/deletebackuprequest.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/deletebackuprequest.go @@ -19,8 +19,8 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" - scheme "github.com/heptio/ark/pkg/generated/clientset/versioned/scheme" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/downloadrequest.go b/pkg/generated/clientset/versioned/typed/ark/v1/downloadrequest.go index f608933b32..e549fac2f8 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/downloadrequest.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/downloadrequest.go @@ -19,8 +19,8 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" - scheme "github.com/heptio/ark/pkg/generated/clientset/versioned/scheme" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_ark_client.go b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_ark_client.go index ebf5461dbf..73ec88a8d3 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_ark_client.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_ark_client.go @@ -19,7 +19,7 @@ limitations under the License. package fake import ( - v1 "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" + v1 "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/ark/v1" rest "k8s.io/client-go/rest" testing "k8s.io/client-go/testing" ) diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_backup.go b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_backup.go index 2ddd9b6a9b..cba17ac1d5 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_backup.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_backup.go @@ -19,7 +19,7 @@ limitations under the License. package fake import ( - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_backupstoragelocation.go b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_backupstoragelocation.go index f63fe07cb5..f2b149ff13 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_backupstoragelocation.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_backupstoragelocation.go @@ -19,7 +19,7 @@ limitations under the License. package fake import ( - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_deletebackuprequest.go b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_deletebackuprequest.go index 31488643a1..e6cdf958fd 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_deletebackuprequest.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_deletebackuprequest.go @@ -19,7 +19,7 @@ limitations under the License. package fake import ( - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_downloadrequest.go b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_downloadrequest.go index 593131d578..fa1f9d221d 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_downloadrequest.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_downloadrequest.go @@ -19,7 +19,7 @@ limitations under the License. package fake import ( - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_podvolumebackup.go b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_podvolumebackup.go index 182342ac5e..34dfb2251a 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_podvolumebackup.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_podvolumebackup.go @@ -19,7 +19,7 @@ limitations under the License. package fake import ( - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_podvolumerestore.go b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_podvolumerestore.go index 79a32e4fb6..2f2e137174 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_podvolumerestore.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_podvolumerestore.go @@ -19,7 +19,7 @@ limitations under the License. package fake import ( - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_resticrepository.go b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_resticrepository.go index a2f041e07e..bf34ff6342 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_resticrepository.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_resticrepository.go @@ -19,7 +19,7 @@ limitations under the License. package fake import ( - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_restore.go b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_restore.go index af10680229..a90e30e5d4 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_restore.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_restore.go @@ -19,7 +19,7 @@ limitations under the License. package fake import ( - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_schedule.go b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_schedule.go index 88f59320d4..3f759e6668 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_schedule.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_schedule.go @@ -19,7 +19,7 @@ limitations under the License. package fake import ( - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_serverstatusrequest.go b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_serverstatusrequest.go index a4c3caacb9..f5046d9720 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_serverstatusrequest.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_serverstatusrequest.go @@ -19,7 +19,7 @@ limitations under the License. package fake import ( - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_volumesnapshotlocation.go b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_volumesnapshotlocation.go index c6b614361e..4f6867d002 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_volumesnapshotlocation.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/fake/fake_volumesnapshotlocation.go @@ -19,7 +19,7 @@ limitations under the License. package fake import ( - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" labels "k8s.io/apimachinery/pkg/labels" schema "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/podvolumebackup.go b/pkg/generated/clientset/versioned/typed/ark/v1/podvolumebackup.go index 240b6da779..90aa0fd4e0 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/podvolumebackup.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/podvolumebackup.go @@ -19,8 +19,8 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" - scheme "github.com/heptio/ark/pkg/generated/clientset/versioned/scheme" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/podvolumerestore.go b/pkg/generated/clientset/versioned/typed/ark/v1/podvolumerestore.go index b1d64e67f6..92e1bd7890 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/podvolumerestore.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/podvolumerestore.go @@ -19,8 +19,8 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" - scheme "github.com/heptio/ark/pkg/generated/clientset/versioned/scheme" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/resticrepository.go b/pkg/generated/clientset/versioned/typed/ark/v1/resticrepository.go index 14e334e678..800b7978ab 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/resticrepository.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/resticrepository.go @@ -19,8 +19,8 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" - scheme "github.com/heptio/ark/pkg/generated/clientset/versioned/scheme" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/restore.go b/pkg/generated/clientset/versioned/typed/ark/v1/restore.go index dbc7ab0e70..8b158ef264 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/restore.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/restore.go @@ -19,8 +19,8 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" - scheme "github.com/heptio/ark/pkg/generated/clientset/versioned/scheme" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/schedule.go b/pkg/generated/clientset/versioned/typed/ark/v1/schedule.go index 58e0c50f2e..35f462c3f1 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/schedule.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/schedule.go @@ -19,8 +19,8 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" - scheme "github.com/heptio/ark/pkg/generated/clientset/versioned/scheme" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/serverstatusrequest.go b/pkg/generated/clientset/versioned/typed/ark/v1/serverstatusrequest.go index c232952c09..1a6d9ae8fc 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/serverstatusrequest.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/serverstatusrequest.go @@ -19,8 +19,8 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" - scheme "github.com/heptio/ark/pkg/generated/clientset/versioned/scheme" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/clientset/versioned/typed/ark/v1/volumesnapshotlocation.go b/pkg/generated/clientset/versioned/typed/ark/v1/volumesnapshotlocation.go index f539f02aa6..5c474f5c9f 100644 --- a/pkg/generated/clientset/versioned/typed/ark/v1/volumesnapshotlocation.go +++ b/pkg/generated/clientset/versioned/typed/ark/v1/volumesnapshotlocation.go @@ -19,8 +19,8 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" - scheme "github.com/heptio/ark/pkg/generated/clientset/versioned/scheme" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/backup.go b/pkg/generated/clientset/versioned/typed/velero/v1/backup.go new file mode 100644 index 0000000000..9ab6764463 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/backup.go @@ -0,0 +1,174 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// BackupsGetter has a method to return a BackupInterface. +// A group's client should implement this interface. +type BackupsGetter interface { + Backups(namespace string) BackupInterface +} + +// BackupInterface has methods to work with Backup resources. +type BackupInterface interface { + Create(*v1.Backup) (*v1.Backup, error) + Update(*v1.Backup) (*v1.Backup, error) + UpdateStatus(*v1.Backup) (*v1.Backup, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Backup, error) + List(opts meta_v1.ListOptions) (*v1.BackupList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Backup, err error) + BackupExpansion +} + +// backups implements BackupInterface +type backups struct { + client rest.Interface + ns string +} + +// newBackups returns a Backups +func newBackups(c *VeleroV1Client, namespace string) *backups { + return &backups{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the backup, and returns the corresponding backup object, and an error if there is any. +func (c *backups) Get(name string, options meta_v1.GetOptions) (result *v1.Backup, err error) { + result = &v1.Backup{} + err = c.client.Get(). + Namespace(c.ns). + Resource("backups"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Backups that match those selectors. +func (c *backups) List(opts meta_v1.ListOptions) (result *v1.BackupList, err error) { + result = &v1.BackupList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("backups"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested backups. +func (c *backups) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("backups"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a backup and creates it. Returns the server's representation of the backup, and an error, if there is any. +func (c *backups) Create(backup *v1.Backup) (result *v1.Backup, err error) { + result = &v1.Backup{} + err = c.client.Post(). + Namespace(c.ns). + Resource("backups"). + Body(backup). + Do(). + Into(result) + return +} + +// Update takes the representation of a backup and updates it. Returns the server's representation of the backup, and an error, if there is any. +func (c *backups) Update(backup *v1.Backup) (result *v1.Backup, err error) { + result = &v1.Backup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("backups"). + Name(backup.Name). + Body(backup). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *backups) UpdateStatus(backup *v1.Backup) (result *v1.Backup, err error) { + result = &v1.Backup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("backups"). + Name(backup.Name). + SubResource("status"). + Body(backup). + Do(). + Into(result) + return +} + +// Delete takes name of the backup and deletes it. Returns an error if one occurs. +func (c *backups) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("backups"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *backups) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("backups"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched backup. +func (c *backups) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Backup, err error) { + result = &v1.Backup{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("backups"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/backupstoragelocation.go b/pkg/generated/clientset/versioned/typed/velero/v1/backupstoragelocation.go new file mode 100644 index 0000000000..b5ed16e2a3 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/backupstoragelocation.go @@ -0,0 +1,174 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// BackupStorageLocationsGetter has a method to return a BackupStorageLocationInterface. +// A group's client should implement this interface. +type BackupStorageLocationsGetter interface { + BackupStorageLocations(namespace string) BackupStorageLocationInterface +} + +// BackupStorageLocationInterface has methods to work with BackupStorageLocation resources. +type BackupStorageLocationInterface interface { + Create(*v1.BackupStorageLocation) (*v1.BackupStorageLocation, error) + Update(*v1.BackupStorageLocation) (*v1.BackupStorageLocation, error) + UpdateStatus(*v1.BackupStorageLocation) (*v1.BackupStorageLocation, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.BackupStorageLocation, error) + List(opts meta_v1.ListOptions) (*v1.BackupStorageLocationList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.BackupStorageLocation, err error) + BackupStorageLocationExpansion +} + +// backupStorageLocations implements BackupStorageLocationInterface +type backupStorageLocations struct { + client rest.Interface + ns string +} + +// newBackupStorageLocations returns a BackupStorageLocations +func newBackupStorageLocations(c *VeleroV1Client, namespace string) *backupStorageLocations { + return &backupStorageLocations{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the backupStorageLocation, and returns the corresponding backupStorageLocation object, and an error if there is any. +func (c *backupStorageLocations) Get(name string, options meta_v1.GetOptions) (result *v1.BackupStorageLocation, err error) { + result = &v1.BackupStorageLocation{} + err = c.client.Get(). + Namespace(c.ns). + Resource("backupstoragelocations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of BackupStorageLocations that match those selectors. +func (c *backupStorageLocations) List(opts meta_v1.ListOptions) (result *v1.BackupStorageLocationList, err error) { + result = &v1.BackupStorageLocationList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("backupstoragelocations"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested backupStorageLocations. +func (c *backupStorageLocations) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("backupstoragelocations"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a backupStorageLocation and creates it. Returns the server's representation of the backupStorageLocation, and an error, if there is any. +func (c *backupStorageLocations) Create(backupStorageLocation *v1.BackupStorageLocation) (result *v1.BackupStorageLocation, err error) { + result = &v1.BackupStorageLocation{} + err = c.client.Post(). + Namespace(c.ns). + Resource("backupstoragelocations"). + Body(backupStorageLocation). + Do(). + Into(result) + return +} + +// Update takes the representation of a backupStorageLocation and updates it. Returns the server's representation of the backupStorageLocation, and an error, if there is any. +func (c *backupStorageLocations) Update(backupStorageLocation *v1.BackupStorageLocation) (result *v1.BackupStorageLocation, err error) { + result = &v1.BackupStorageLocation{} + err = c.client.Put(). + Namespace(c.ns). + Resource("backupstoragelocations"). + Name(backupStorageLocation.Name). + Body(backupStorageLocation). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *backupStorageLocations) UpdateStatus(backupStorageLocation *v1.BackupStorageLocation) (result *v1.BackupStorageLocation, err error) { + result = &v1.BackupStorageLocation{} + err = c.client.Put(). + Namespace(c.ns). + Resource("backupstoragelocations"). + Name(backupStorageLocation.Name). + SubResource("status"). + Body(backupStorageLocation). + Do(). + Into(result) + return +} + +// Delete takes name of the backupStorageLocation and deletes it. Returns an error if one occurs. +func (c *backupStorageLocations) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("backupstoragelocations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *backupStorageLocations) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("backupstoragelocations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched backupStorageLocation. +func (c *backupStorageLocations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.BackupStorageLocation, err error) { + result = &v1.BackupStorageLocation{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("backupstoragelocations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/deletebackuprequest.go b/pkg/generated/clientset/versioned/typed/velero/v1/deletebackuprequest.go new file mode 100644 index 0000000000..9d6b6597ba --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/deletebackuprequest.go @@ -0,0 +1,174 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// DeleteBackupRequestsGetter has a method to return a DeleteBackupRequestInterface. +// A group's client should implement this interface. +type DeleteBackupRequestsGetter interface { + DeleteBackupRequests(namespace string) DeleteBackupRequestInterface +} + +// DeleteBackupRequestInterface has methods to work with DeleteBackupRequest resources. +type DeleteBackupRequestInterface interface { + Create(*v1.DeleteBackupRequest) (*v1.DeleteBackupRequest, error) + Update(*v1.DeleteBackupRequest) (*v1.DeleteBackupRequest, error) + UpdateStatus(*v1.DeleteBackupRequest) (*v1.DeleteBackupRequest, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.DeleteBackupRequest, error) + List(opts meta_v1.ListOptions) (*v1.DeleteBackupRequestList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DeleteBackupRequest, err error) + DeleteBackupRequestExpansion +} + +// deleteBackupRequests implements DeleteBackupRequestInterface +type deleteBackupRequests struct { + client rest.Interface + ns string +} + +// newDeleteBackupRequests returns a DeleteBackupRequests +func newDeleteBackupRequests(c *VeleroV1Client, namespace string) *deleteBackupRequests { + return &deleteBackupRequests{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the deleteBackupRequest, and returns the corresponding deleteBackupRequest object, and an error if there is any. +func (c *deleteBackupRequests) Get(name string, options meta_v1.GetOptions) (result *v1.DeleteBackupRequest, err error) { + result = &v1.DeleteBackupRequest{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deletebackuprequests"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DeleteBackupRequests that match those selectors. +func (c *deleteBackupRequests) List(opts meta_v1.ListOptions) (result *v1.DeleteBackupRequestList, err error) { + result = &v1.DeleteBackupRequestList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deletebackuprequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deleteBackupRequests. +func (c *deleteBackupRequests) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deletebackuprequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a deleteBackupRequest and creates it. Returns the server's representation of the deleteBackupRequest, and an error, if there is any. +func (c *deleteBackupRequests) Create(deleteBackupRequest *v1.DeleteBackupRequest) (result *v1.DeleteBackupRequest, err error) { + result = &v1.DeleteBackupRequest{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deletebackuprequests"). + Body(deleteBackupRequest). + Do(). + Into(result) + return +} + +// Update takes the representation of a deleteBackupRequest and updates it. Returns the server's representation of the deleteBackupRequest, and an error, if there is any. +func (c *deleteBackupRequests) Update(deleteBackupRequest *v1.DeleteBackupRequest) (result *v1.DeleteBackupRequest, err error) { + result = &v1.DeleteBackupRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deletebackuprequests"). + Name(deleteBackupRequest.Name). + Body(deleteBackupRequest). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *deleteBackupRequests) UpdateStatus(deleteBackupRequest *v1.DeleteBackupRequest) (result *v1.DeleteBackupRequest, err error) { + result = &v1.DeleteBackupRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deletebackuprequests"). + Name(deleteBackupRequest.Name). + SubResource("status"). + Body(deleteBackupRequest). + Do(). + Into(result) + return +} + +// Delete takes name of the deleteBackupRequest and deletes it. Returns an error if one occurs. +func (c *deleteBackupRequests) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deletebackuprequests"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deleteBackupRequests) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deletebackuprequests"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched deleteBackupRequest. +func (c *deleteBackupRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DeleteBackupRequest, err error) { + result = &v1.DeleteBackupRequest{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deletebackuprequests"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/doc.go b/pkg/generated/clientset/versioned/typed/velero/v1/doc.go new file mode 100644 index 0000000000..5c4c68f87a --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/downloadrequest.go b/pkg/generated/clientset/versioned/typed/velero/v1/downloadrequest.go new file mode 100644 index 0000000000..5170f9e36c --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/downloadrequest.go @@ -0,0 +1,174 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// DownloadRequestsGetter has a method to return a DownloadRequestInterface. +// A group's client should implement this interface. +type DownloadRequestsGetter interface { + DownloadRequests(namespace string) DownloadRequestInterface +} + +// DownloadRequestInterface has methods to work with DownloadRequest resources. +type DownloadRequestInterface interface { + Create(*v1.DownloadRequest) (*v1.DownloadRequest, error) + Update(*v1.DownloadRequest) (*v1.DownloadRequest, error) + UpdateStatus(*v1.DownloadRequest) (*v1.DownloadRequest, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.DownloadRequest, error) + List(opts meta_v1.ListOptions) (*v1.DownloadRequestList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DownloadRequest, err error) + DownloadRequestExpansion +} + +// downloadRequests implements DownloadRequestInterface +type downloadRequests struct { + client rest.Interface + ns string +} + +// newDownloadRequests returns a DownloadRequests +func newDownloadRequests(c *VeleroV1Client, namespace string) *downloadRequests { + return &downloadRequests{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the downloadRequest, and returns the corresponding downloadRequest object, and an error if there is any. +func (c *downloadRequests) Get(name string, options meta_v1.GetOptions) (result *v1.DownloadRequest, err error) { + result = &v1.DownloadRequest{} + err = c.client.Get(). + Namespace(c.ns). + Resource("downloadrequests"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DownloadRequests that match those selectors. +func (c *downloadRequests) List(opts meta_v1.ListOptions) (result *v1.DownloadRequestList, err error) { + result = &v1.DownloadRequestList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("downloadrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested downloadRequests. +func (c *downloadRequests) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("downloadrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a downloadRequest and creates it. Returns the server's representation of the downloadRequest, and an error, if there is any. +func (c *downloadRequests) Create(downloadRequest *v1.DownloadRequest) (result *v1.DownloadRequest, err error) { + result = &v1.DownloadRequest{} + err = c.client.Post(). + Namespace(c.ns). + Resource("downloadrequests"). + Body(downloadRequest). + Do(). + Into(result) + return +} + +// Update takes the representation of a downloadRequest and updates it. Returns the server's representation of the downloadRequest, and an error, if there is any. +func (c *downloadRequests) Update(downloadRequest *v1.DownloadRequest) (result *v1.DownloadRequest, err error) { + result = &v1.DownloadRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("downloadrequests"). + Name(downloadRequest.Name). + Body(downloadRequest). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *downloadRequests) UpdateStatus(downloadRequest *v1.DownloadRequest) (result *v1.DownloadRequest, err error) { + result = &v1.DownloadRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("downloadrequests"). + Name(downloadRequest.Name). + SubResource("status"). + Body(downloadRequest). + Do(). + Into(result) + return +} + +// Delete takes name of the downloadRequest and deletes it. Returns an error if one occurs. +func (c *downloadRequests) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("downloadrequests"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *downloadRequests) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("downloadrequests"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched downloadRequest. +func (c *downloadRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DownloadRequest, err error) { + result = &v1.DownloadRequest{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("downloadrequests"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/fake/doc.go b/pkg/generated/clientset/versioned/typed/velero/v1/fake/doc.go new file mode 100644 index 0000000000..109f99501e --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_backup.go b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_backup.go new file mode 100644 index 0000000000..bda0094dfb --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_backup.go @@ -0,0 +1,140 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeBackups implements BackupInterface +type FakeBackups struct { + Fake *FakeVeleroV1 + ns string +} + +var backupsResource = schema.GroupVersionResource{Group: "velero.io", Version: "v1", Resource: "backups"} + +var backupsKind = schema.GroupVersionKind{Group: "velero.io", Version: "v1", Kind: "Backup"} + +// Get takes name of the backup, and returns the corresponding backup object, and an error if there is any. +func (c *FakeBackups) Get(name string, options v1.GetOptions) (result *velero_v1.Backup, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(backupsResource, c.ns, name), &velero_v1.Backup{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.Backup), err +} + +// List takes label and field selectors, and returns the list of Backups that match those selectors. +func (c *FakeBackups) List(opts v1.ListOptions) (result *velero_v1.BackupList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(backupsResource, backupsKind, c.ns, opts), &velero_v1.BackupList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &velero_v1.BackupList{ListMeta: obj.(*velero_v1.BackupList).ListMeta} + for _, item := range obj.(*velero_v1.BackupList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested backups. +func (c *FakeBackups) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(backupsResource, c.ns, opts)) + +} + +// Create takes the representation of a backup and creates it. Returns the server's representation of the backup, and an error, if there is any. +func (c *FakeBackups) Create(backup *velero_v1.Backup) (result *velero_v1.Backup, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(backupsResource, c.ns, backup), &velero_v1.Backup{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.Backup), err +} + +// Update takes the representation of a backup and updates it. Returns the server's representation of the backup, and an error, if there is any. +func (c *FakeBackups) Update(backup *velero_v1.Backup) (result *velero_v1.Backup, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(backupsResource, c.ns, backup), &velero_v1.Backup{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.Backup), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeBackups) UpdateStatus(backup *velero_v1.Backup) (*velero_v1.Backup, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(backupsResource, "status", c.ns, backup), &velero_v1.Backup{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.Backup), err +} + +// Delete takes name of the backup and deletes it. Returns an error if one occurs. +func (c *FakeBackups) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(backupsResource, c.ns, name), &velero_v1.Backup{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeBackups) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(backupsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &velero_v1.BackupList{}) + return err +} + +// Patch applies the patch and returns the patched backup. +func (c *FakeBackups) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velero_v1.Backup, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(backupsResource, c.ns, name, data, subresources...), &velero_v1.Backup{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.Backup), err +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_backupstoragelocation.go b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_backupstoragelocation.go new file mode 100644 index 0000000000..d80639b479 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_backupstoragelocation.go @@ -0,0 +1,140 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeBackupStorageLocations implements BackupStorageLocationInterface +type FakeBackupStorageLocations struct { + Fake *FakeVeleroV1 + ns string +} + +var backupstoragelocationsResource = schema.GroupVersionResource{Group: "velero.io", Version: "v1", Resource: "backupstoragelocations"} + +var backupstoragelocationsKind = schema.GroupVersionKind{Group: "velero.io", Version: "v1", Kind: "BackupStorageLocation"} + +// Get takes name of the backupStorageLocation, and returns the corresponding backupStorageLocation object, and an error if there is any. +func (c *FakeBackupStorageLocations) Get(name string, options v1.GetOptions) (result *velero_v1.BackupStorageLocation, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(backupstoragelocationsResource, c.ns, name), &velero_v1.BackupStorageLocation{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.BackupStorageLocation), err +} + +// List takes label and field selectors, and returns the list of BackupStorageLocations that match those selectors. +func (c *FakeBackupStorageLocations) List(opts v1.ListOptions) (result *velero_v1.BackupStorageLocationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(backupstoragelocationsResource, backupstoragelocationsKind, c.ns, opts), &velero_v1.BackupStorageLocationList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &velero_v1.BackupStorageLocationList{ListMeta: obj.(*velero_v1.BackupStorageLocationList).ListMeta} + for _, item := range obj.(*velero_v1.BackupStorageLocationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested backupStorageLocations. +func (c *FakeBackupStorageLocations) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(backupstoragelocationsResource, c.ns, opts)) + +} + +// Create takes the representation of a backupStorageLocation and creates it. Returns the server's representation of the backupStorageLocation, and an error, if there is any. +func (c *FakeBackupStorageLocations) Create(backupStorageLocation *velero_v1.BackupStorageLocation) (result *velero_v1.BackupStorageLocation, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(backupstoragelocationsResource, c.ns, backupStorageLocation), &velero_v1.BackupStorageLocation{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.BackupStorageLocation), err +} + +// Update takes the representation of a backupStorageLocation and updates it. Returns the server's representation of the backupStorageLocation, and an error, if there is any. +func (c *FakeBackupStorageLocations) Update(backupStorageLocation *velero_v1.BackupStorageLocation) (result *velero_v1.BackupStorageLocation, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(backupstoragelocationsResource, c.ns, backupStorageLocation), &velero_v1.BackupStorageLocation{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.BackupStorageLocation), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeBackupStorageLocations) UpdateStatus(backupStorageLocation *velero_v1.BackupStorageLocation) (*velero_v1.BackupStorageLocation, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(backupstoragelocationsResource, "status", c.ns, backupStorageLocation), &velero_v1.BackupStorageLocation{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.BackupStorageLocation), err +} + +// Delete takes name of the backupStorageLocation and deletes it. Returns an error if one occurs. +func (c *FakeBackupStorageLocations) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(backupstoragelocationsResource, c.ns, name), &velero_v1.BackupStorageLocation{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeBackupStorageLocations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(backupstoragelocationsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &velero_v1.BackupStorageLocationList{}) + return err +} + +// Patch applies the patch and returns the patched backupStorageLocation. +func (c *FakeBackupStorageLocations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velero_v1.BackupStorageLocation, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(backupstoragelocationsResource, c.ns, name, data, subresources...), &velero_v1.BackupStorageLocation{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.BackupStorageLocation), err +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_deletebackuprequest.go b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_deletebackuprequest.go new file mode 100644 index 0000000000..7cc4096382 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_deletebackuprequest.go @@ -0,0 +1,140 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeDeleteBackupRequests implements DeleteBackupRequestInterface +type FakeDeleteBackupRequests struct { + Fake *FakeVeleroV1 + ns string +} + +var deletebackuprequestsResource = schema.GroupVersionResource{Group: "velero.io", Version: "v1", Resource: "deletebackuprequests"} + +var deletebackuprequestsKind = schema.GroupVersionKind{Group: "velero.io", Version: "v1", Kind: "DeleteBackupRequest"} + +// Get takes name of the deleteBackupRequest, and returns the corresponding deleteBackupRequest object, and an error if there is any. +func (c *FakeDeleteBackupRequests) Get(name string, options v1.GetOptions) (result *velero_v1.DeleteBackupRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(deletebackuprequestsResource, c.ns, name), &velero_v1.DeleteBackupRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.DeleteBackupRequest), err +} + +// List takes label and field selectors, and returns the list of DeleteBackupRequests that match those selectors. +func (c *FakeDeleteBackupRequests) List(opts v1.ListOptions) (result *velero_v1.DeleteBackupRequestList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(deletebackuprequestsResource, deletebackuprequestsKind, c.ns, opts), &velero_v1.DeleteBackupRequestList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &velero_v1.DeleteBackupRequestList{ListMeta: obj.(*velero_v1.DeleteBackupRequestList).ListMeta} + for _, item := range obj.(*velero_v1.DeleteBackupRequestList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested deleteBackupRequests. +func (c *FakeDeleteBackupRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(deletebackuprequestsResource, c.ns, opts)) + +} + +// Create takes the representation of a deleteBackupRequest and creates it. Returns the server's representation of the deleteBackupRequest, and an error, if there is any. +func (c *FakeDeleteBackupRequests) Create(deleteBackupRequest *velero_v1.DeleteBackupRequest) (result *velero_v1.DeleteBackupRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(deletebackuprequestsResource, c.ns, deleteBackupRequest), &velero_v1.DeleteBackupRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.DeleteBackupRequest), err +} + +// Update takes the representation of a deleteBackupRequest and updates it. Returns the server's representation of the deleteBackupRequest, and an error, if there is any. +func (c *FakeDeleteBackupRequests) Update(deleteBackupRequest *velero_v1.DeleteBackupRequest) (result *velero_v1.DeleteBackupRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(deletebackuprequestsResource, c.ns, deleteBackupRequest), &velero_v1.DeleteBackupRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.DeleteBackupRequest), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeDeleteBackupRequests) UpdateStatus(deleteBackupRequest *velero_v1.DeleteBackupRequest) (*velero_v1.DeleteBackupRequest, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(deletebackuprequestsResource, "status", c.ns, deleteBackupRequest), &velero_v1.DeleteBackupRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.DeleteBackupRequest), err +} + +// Delete takes name of the deleteBackupRequest and deletes it. Returns an error if one occurs. +func (c *FakeDeleteBackupRequests) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(deletebackuprequestsResource, c.ns, name), &velero_v1.DeleteBackupRequest{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeDeleteBackupRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(deletebackuprequestsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &velero_v1.DeleteBackupRequestList{}) + return err +} + +// Patch applies the patch and returns the patched deleteBackupRequest. +func (c *FakeDeleteBackupRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velero_v1.DeleteBackupRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(deletebackuprequestsResource, c.ns, name, data, subresources...), &velero_v1.DeleteBackupRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.DeleteBackupRequest), err +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_downloadrequest.go b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_downloadrequest.go new file mode 100644 index 0000000000..52e3ef577c --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_downloadrequest.go @@ -0,0 +1,140 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeDownloadRequests implements DownloadRequestInterface +type FakeDownloadRequests struct { + Fake *FakeVeleroV1 + ns string +} + +var downloadrequestsResource = schema.GroupVersionResource{Group: "velero.io", Version: "v1", Resource: "downloadrequests"} + +var downloadrequestsKind = schema.GroupVersionKind{Group: "velero.io", Version: "v1", Kind: "DownloadRequest"} + +// Get takes name of the downloadRequest, and returns the corresponding downloadRequest object, and an error if there is any. +func (c *FakeDownloadRequests) Get(name string, options v1.GetOptions) (result *velero_v1.DownloadRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(downloadrequestsResource, c.ns, name), &velero_v1.DownloadRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.DownloadRequest), err +} + +// List takes label and field selectors, and returns the list of DownloadRequests that match those selectors. +func (c *FakeDownloadRequests) List(opts v1.ListOptions) (result *velero_v1.DownloadRequestList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(downloadrequestsResource, downloadrequestsKind, c.ns, opts), &velero_v1.DownloadRequestList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &velero_v1.DownloadRequestList{ListMeta: obj.(*velero_v1.DownloadRequestList).ListMeta} + for _, item := range obj.(*velero_v1.DownloadRequestList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested downloadRequests. +func (c *FakeDownloadRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(downloadrequestsResource, c.ns, opts)) + +} + +// Create takes the representation of a downloadRequest and creates it. Returns the server's representation of the downloadRequest, and an error, if there is any. +func (c *FakeDownloadRequests) Create(downloadRequest *velero_v1.DownloadRequest) (result *velero_v1.DownloadRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(downloadrequestsResource, c.ns, downloadRequest), &velero_v1.DownloadRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.DownloadRequest), err +} + +// Update takes the representation of a downloadRequest and updates it. Returns the server's representation of the downloadRequest, and an error, if there is any. +func (c *FakeDownloadRequests) Update(downloadRequest *velero_v1.DownloadRequest) (result *velero_v1.DownloadRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(downloadrequestsResource, c.ns, downloadRequest), &velero_v1.DownloadRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.DownloadRequest), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeDownloadRequests) UpdateStatus(downloadRequest *velero_v1.DownloadRequest) (*velero_v1.DownloadRequest, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(downloadrequestsResource, "status", c.ns, downloadRequest), &velero_v1.DownloadRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.DownloadRequest), err +} + +// Delete takes name of the downloadRequest and deletes it. Returns an error if one occurs. +func (c *FakeDownloadRequests) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(downloadrequestsResource, c.ns, name), &velero_v1.DownloadRequest{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeDownloadRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(downloadrequestsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &velero_v1.DownloadRequestList{}) + return err +} + +// Patch applies the patch and returns the patched downloadRequest. +func (c *FakeDownloadRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velero_v1.DownloadRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(downloadrequestsResource, c.ns, name, data, subresources...), &velero_v1.DownloadRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.DownloadRequest), err +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_podvolumebackup.go b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_podvolumebackup.go new file mode 100644 index 0000000000..c4fa35d572 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_podvolumebackup.go @@ -0,0 +1,140 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePodVolumeBackups implements PodVolumeBackupInterface +type FakePodVolumeBackups struct { + Fake *FakeVeleroV1 + ns string +} + +var podvolumebackupsResource = schema.GroupVersionResource{Group: "velero.io", Version: "v1", Resource: "podvolumebackups"} + +var podvolumebackupsKind = schema.GroupVersionKind{Group: "velero.io", Version: "v1", Kind: "PodVolumeBackup"} + +// Get takes name of the podVolumeBackup, and returns the corresponding podVolumeBackup object, and an error if there is any. +func (c *FakePodVolumeBackups) Get(name string, options v1.GetOptions) (result *velero_v1.PodVolumeBackup, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(podvolumebackupsResource, c.ns, name), &velero_v1.PodVolumeBackup{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.PodVolumeBackup), err +} + +// List takes label and field selectors, and returns the list of PodVolumeBackups that match those selectors. +func (c *FakePodVolumeBackups) List(opts v1.ListOptions) (result *velero_v1.PodVolumeBackupList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(podvolumebackupsResource, podvolumebackupsKind, c.ns, opts), &velero_v1.PodVolumeBackupList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &velero_v1.PodVolumeBackupList{ListMeta: obj.(*velero_v1.PodVolumeBackupList).ListMeta} + for _, item := range obj.(*velero_v1.PodVolumeBackupList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested podVolumeBackups. +func (c *FakePodVolumeBackups) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(podvolumebackupsResource, c.ns, opts)) + +} + +// Create takes the representation of a podVolumeBackup and creates it. Returns the server's representation of the podVolumeBackup, and an error, if there is any. +func (c *FakePodVolumeBackups) Create(podVolumeBackup *velero_v1.PodVolumeBackup) (result *velero_v1.PodVolumeBackup, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(podvolumebackupsResource, c.ns, podVolumeBackup), &velero_v1.PodVolumeBackup{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.PodVolumeBackup), err +} + +// Update takes the representation of a podVolumeBackup and updates it. Returns the server's representation of the podVolumeBackup, and an error, if there is any. +func (c *FakePodVolumeBackups) Update(podVolumeBackup *velero_v1.PodVolumeBackup) (result *velero_v1.PodVolumeBackup, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(podvolumebackupsResource, c.ns, podVolumeBackup), &velero_v1.PodVolumeBackup{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.PodVolumeBackup), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePodVolumeBackups) UpdateStatus(podVolumeBackup *velero_v1.PodVolumeBackup) (*velero_v1.PodVolumeBackup, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(podvolumebackupsResource, "status", c.ns, podVolumeBackup), &velero_v1.PodVolumeBackup{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.PodVolumeBackup), err +} + +// Delete takes name of the podVolumeBackup and deletes it. Returns an error if one occurs. +func (c *FakePodVolumeBackups) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(podvolumebackupsResource, c.ns, name), &velero_v1.PodVolumeBackup{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePodVolumeBackups) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(podvolumebackupsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &velero_v1.PodVolumeBackupList{}) + return err +} + +// Patch applies the patch and returns the patched podVolumeBackup. +func (c *FakePodVolumeBackups) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velero_v1.PodVolumeBackup, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(podvolumebackupsResource, c.ns, name, data, subresources...), &velero_v1.PodVolumeBackup{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.PodVolumeBackup), err +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_podvolumerestore.go b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_podvolumerestore.go new file mode 100644 index 0000000000..a4c215f6c3 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_podvolumerestore.go @@ -0,0 +1,140 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakePodVolumeRestores implements PodVolumeRestoreInterface +type FakePodVolumeRestores struct { + Fake *FakeVeleroV1 + ns string +} + +var podvolumerestoresResource = schema.GroupVersionResource{Group: "velero.io", Version: "v1", Resource: "podvolumerestores"} + +var podvolumerestoresKind = schema.GroupVersionKind{Group: "velero.io", Version: "v1", Kind: "PodVolumeRestore"} + +// Get takes name of the podVolumeRestore, and returns the corresponding podVolumeRestore object, and an error if there is any. +func (c *FakePodVolumeRestores) Get(name string, options v1.GetOptions) (result *velero_v1.PodVolumeRestore, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(podvolumerestoresResource, c.ns, name), &velero_v1.PodVolumeRestore{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.PodVolumeRestore), err +} + +// List takes label and field selectors, and returns the list of PodVolumeRestores that match those selectors. +func (c *FakePodVolumeRestores) List(opts v1.ListOptions) (result *velero_v1.PodVolumeRestoreList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(podvolumerestoresResource, podvolumerestoresKind, c.ns, opts), &velero_v1.PodVolumeRestoreList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &velero_v1.PodVolumeRestoreList{ListMeta: obj.(*velero_v1.PodVolumeRestoreList).ListMeta} + for _, item := range obj.(*velero_v1.PodVolumeRestoreList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested podVolumeRestores. +func (c *FakePodVolumeRestores) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(podvolumerestoresResource, c.ns, opts)) + +} + +// Create takes the representation of a podVolumeRestore and creates it. Returns the server's representation of the podVolumeRestore, and an error, if there is any. +func (c *FakePodVolumeRestores) Create(podVolumeRestore *velero_v1.PodVolumeRestore) (result *velero_v1.PodVolumeRestore, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(podvolumerestoresResource, c.ns, podVolumeRestore), &velero_v1.PodVolumeRestore{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.PodVolumeRestore), err +} + +// Update takes the representation of a podVolumeRestore and updates it. Returns the server's representation of the podVolumeRestore, and an error, if there is any. +func (c *FakePodVolumeRestores) Update(podVolumeRestore *velero_v1.PodVolumeRestore) (result *velero_v1.PodVolumeRestore, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(podvolumerestoresResource, c.ns, podVolumeRestore), &velero_v1.PodVolumeRestore{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.PodVolumeRestore), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakePodVolumeRestores) UpdateStatus(podVolumeRestore *velero_v1.PodVolumeRestore) (*velero_v1.PodVolumeRestore, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(podvolumerestoresResource, "status", c.ns, podVolumeRestore), &velero_v1.PodVolumeRestore{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.PodVolumeRestore), err +} + +// Delete takes name of the podVolumeRestore and deletes it. Returns an error if one occurs. +func (c *FakePodVolumeRestores) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(podvolumerestoresResource, c.ns, name), &velero_v1.PodVolumeRestore{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakePodVolumeRestores) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(podvolumerestoresResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &velero_v1.PodVolumeRestoreList{}) + return err +} + +// Patch applies the patch and returns the patched podVolumeRestore. +func (c *FakePodVolumeRestores) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velero_v1.PodVolumeRestore, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(podvolumerestoresResource, c.ns, name, data, subresources...), &velero_v1.PodVolumeRestore{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.PodVolumeRestore), err +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_resticrepository.go b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_resticrepository.go new file mode 100644 index 0000000000..02cf48078a --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_resticrepository.go @@ -0,0 +1,140 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeResticRepositories implements ResticRepositoryInterface +type FakeResticRepositories struct { + Fake *FakeVeleroV1 + ns string +} + +var resticrepositoriesResource = schema.GroupVersionResource{Group: "velero.io", Version: "v1", Resource: "resticrepositories"} + +var resticrepositoriesKind = schema.GroupVersionKind{Group: "velero.io", Version: "v1", Kind: "ResticRepository"} + +// Get takes name of the resticRepository, and returns the corresponding resticRepository object, and an error if there is any. +func (c *FakeResticRepositories) Get(name string, options v1.GetOptions) (result *velero_v1.ResticRepository, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(resticrepositoriesResource, c.ns, name), &velero_v1.ResticRepository{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.ResticRepository), err +} + +// List takes label and field selectors, and returns the list of ResticRepositories that match those selectors. +func (c *FakeResticRepositories) List(opts v1.ListOptions) (result *velero_v1.ResticRepositoryList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(resticrepositoriesResource, resticrepositoriesKind, c.ns, opts), &velero_v1.ResticRepositoryList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &velero_v1.ResticRepositoryList{ListMeta: obj.(*velero_v1.ResticRepositoryList).ListMeta} + for _, item := range obj.(*velero_v1.ResticRepositoryList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested resticRepositories. +func (c *FakeResticRepositories) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(resticrepositoriesResource, c.ns, opts)) + +} + +// Create takes the representation of a resticRepository and creates it. Returns the server's representation of the resticRepository, and an error, if there is any. +func (c *FakeResticRepositories) Create(resticRepository *velero_v1.ResticRepository) (result *velero_v1.ResticRepository, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(resticrepositoriesResource, c.ns, resticRepository), &velero_v1.ResticRepository{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.ResticRepository), err +} + +// Update takes the representation of a resticRepository and updates it. Returns the server's representation of the resticRepository, and an error, if there is any. +func (c *FakeResticRepositories) Update(resticRepository *velero_v1.ResticRepository) (result *velero_v1.ResticRepository, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(resticrepositoriesResource, c.ns, resticRepository), &velero_v1.ResticRepository{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.ResticRepository), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeResticRepositories) UpdateStatus(resticRepository *velero_v1.ResticRepository) (*velero_v1.ResticRepository, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(resticrepositoriesResource, "status", c.ns, resticRepository), &velero_v1.ResticRepository{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.ResticRepository), err +} + +// Delete takes name of the resticRepository and deletes it. Returns an error if one occurs. +func (c *FakeResticRepositories) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(resticrepositoriesResource, c.ns, name), &velero_v1.ResticRepository{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeResticRepositories) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(resticrepositoriesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &velero_v1.ResticRepositoryList{}) + return err +} + +// Patch applies the patch and returns the patched resticRepository. +func (c *FakeResticRepositories) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velero_v1.ResticRepository, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(resticrepositoriesResource, c.ns, name, data, subresources...), &velero_v1.ResticRepository{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.ResticRepository), err +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_restore.go b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_restore.go new file mode 100644 index 0000000000..ce83c635ea --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_restore.go @@ -0,0 +1,140 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeRestores implements RestoreInterface +type FakeRestores struct { + Fake *FakeVeleroV1 + ns string +} + +var restoresResource = schema.GroupVersionResource{Group: "velero.io", Version: "v1", Resource: "restores"} + +var restoresKind = schema.GroupVersionKind{Group: "velero.io", Version: "v1", Kind: "Restore"} + +// Get takes name of the restore, and returns the corresponding restore object, and an error if there is any. +func (c *FakeRestores) Get(name string, options v1.GetOptions) (result *velero_v1.Restore, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(restoresResource, c.ns, name), &velero_v1.Restore{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.Restore), err +} + +// List takes label and field selectors, and returns the list of Restores that match those selectors. +func (c *FakeRestores) List(opts v1.ListOptions) (result *velero_v1.RestoreList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(restoresResource, restoresKind, c.ns, opts), &velero_v1.RestoreList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &velero_v1.RestoreList{ListMeta: obj.(*velero_v1.RestoreList).ListMeta} + for _, item := range obj.(*velero_v1.RestoreList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested restores. +func (c *FakeRestores) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(restoresResource, c.ns, opts)) + +} + +// Create takes the representation of a restore and creates it. Returns the server's representation of the restore, and an error, if there is any. +func (c *FakeRestores) Create(restore *velero_v1.Restore) (result *velero_v1.Restore, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(restoresResource, c.ns, restore), &velero_v1.Restore{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.Restore), err +} + +// Update takes the representation of a restore and updates it. Returns the server's representation of the restore, and an error, if there is any. +func (c *FakeRestores) Update(restore *velero_v1.Restore) (result *velero_v1.Restore, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(restoresResource, c.ns, restore), &velero_v1.Restore{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.Restore), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeRestores) UpdateStatus(restore *velero_v1.Restore) (*velero_v1.Restore, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(restoresResource, "status", c.ns, restore), &velero_v1.Restore{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.Restore), err +} + +// Delete takes name of the restore and deletes it. Returns an error if one occurs. +func (c *FakeRestores) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(restoresResource, c.ns, name), &velero_v1.Restore{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeRestores) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(restoresResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &velero_v1.RestoreList{}) + return err +} + +// Patch applies the patch and returns the patched restore. +func (c *FakeRestores) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velero_v1.Restore, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(restoresResource, c.ns, name, data, subresources...), &velero_v1.Restore{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.Restore), err +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_schedule.go b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_schedule.go new file mode 100644 index 0000000000..de9d34d6ae --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_schedule.go @@ -0,0 +1,140 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeSchedules implements ScheduleInterface +type FakeSchedules struct { + Fake *FakeVeleroV1 + ns string +} + +var schedulesResource = schema.GroupVersionResource{Group: "velero.io", Version: "v1", Resource: "schedules"} + +var schedulesKind = schema.GroupVersionKind{Group: "velero.io", Version: "v1", Kind: "Schedule"} + +// Get takes name of the schedule, and returns the corresponding schedule object, and an error if there is any. +func (c *FakeSchedules) Get(name string, options v1.GetOptions) (result *velero_v1.Schedule, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(schedulesResource, c.ns, name), &velero_v1.Schedule{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.Schedule), err +} + +// List takes label and field selectors, and returns the list of Schedules that match those selectors. +func (c *FakeSchedules) List(opts v1.ListOptions) (result *velero_v1.ScheduleList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(schedulesResource, schedulesKind, c.ns, opts), &velero_v1.ScheduleList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &velero_v1.ScheduleList{ListMeta: obj.(*velero_v1.ScheduleList).ListMeta} + for _, item := range obj.(*velero_v1.ScheduleList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested schedules. +func (c *FakeSchedules) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(schedulesResource, c.ns, opts)) + +} + +// Create takes the representation of a schedule and creates it. Returns the server's representation of the schedule, and an error, if there is any. +func (c *FakeSchedules) Create(schedule *velero_v1.Schedule) (result *velero_v1.Schedule, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(schedulesResource, c.ns, schedule), &velero_v1.Schedule{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.Schedule), err +} + +// Update takes the representation of a schedule and updates it. Returns the server's representation of the schedule, and an error, if there is any. +func (c *FakeSchedules) Update(schedule *velero_v1.Schedule) (result *velero_v1.Schedule, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(schedulesResource, c.ns, schedule), &velero_v1.Schedule{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.Schedule), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeSchedules) UpdateStatus(schedule *velero_v1.Schedule) (*velero_v1.Schedule, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(schedulesResource, "status", c.ns, schedule), &velero_v1.Schedule{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.Schedule), err +} + +// Delete takes name of the schedule and deletes it. Returns an error if one occurs. +func (c *FakeSchedules) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(schedulesResource, c.ns, name), &velero_v1.Schedule{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeSchedules) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(schedulesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &velero_v1.ScheduleList{}) + return err +} + +// Patch applies the patch and returns the patched schedule. +func (c *FakeSchedules) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velero_v1.Schedule, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(schedulesResource, c.ns, name, data, subresources...), &velero_v1.Schedule{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.Schedule), err +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_serverstatusrequest.go b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_serverstatusrequest.go new file mode 100644 index 0000000000..9e3f95d729 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_serverstatusrequest.go @@ -0,0 +1,140 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeServerStatusRequests implements ServerStatusRequestInterface +type FakeServerStatusRequests struct { + Fake *FakeVeleroV1 + ns string +} + +var serverstatusrequestsResource = schema.GroupVersionResource{Group: "velero.io", Version: "v1", Resource: "serverstatusrequests"} + +var serverstatusrequestsKind = schema.GroupVersionKind{Group: "velero.io", Version: "v1", Kind: "ServerStatusRequest"} + +// Get takes name of the serverStatusRequest, and returns the corresponding serverStatusRequest object, and an error if there is any. +func (c *FakeServerStatusRequests) Get(name string, options v1.GetOptions) (result *velero_v1.ServerStatusRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(serverstatusrequestsResource, c.ns, name), &velero_v1.ServerStatusRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.ServerStatusRequest), err +} + +// List takes label and field selectors, and returns the list of ServerStatusRequests that match those selectors. +func (c *FakeServerStatusRequests) List(opts v1.ListOptions) (result *velero_v1.ServerStatusRequestList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(serverstatusrequestsResource, serverstatusrequestsKind, c.ns, opts), &velero_v1.ServerStatusRequestList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &velero_v1.ServerStatusRequestList{ListMeta: obj.(*velero_v1.ServerStatusRequestList).ListMeta} + for _, item := range obj.(*velero_v1.ServerStatusRequestList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested serverStatusRequests. +func (c *FakeServerStatusRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(serverstatusrequestsResource, c.ns, opts)) + +} + +// Create takes the representation of a serverStatusRequest and creates it. Returns the server's representation of the serverStatusRequest, and an error, if there is any. +func (c *FakeServerStatusRequests) Create(serverStatusRequest *velero_v1.ServerStatusRequest) (result *velero_v1.ServerStatusRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(serverstatusrequestsResource, c.ns, serverStatusRequest), &velero_v1.ServerStatusRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.ServerStatusRequest), err +} + +// Update takes the representation of a serverStatusRequest and updates it. Returns the server's representation of the serverStatusRequest, and an error, if there is any. +func (c *FakeServerStatusRequests) Update(serverStatusRequest *velero_v1.ServerStatusRequest) (result *velero_v1.ServerStatusRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(serverstatusrequestsResource, c.ns, serverStatusRequest), &velero_v1.ServerStatusRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.ServerStatusRequest), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeServerStatusRequests) UpdateStatus(serverStatusRequest *velero_v1.ServerStatusRequest) (*velero_v1.ServerStatusRequest, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(serverstatusrequestsResource, "status", c.ns, serverStatusRequest), &velero_v1.ServerStatusRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.ServerStatusRequest), err +} + +// Delete takes name of the serverStatusRequest and deletes it. Returns an error if one occurs. +func (c *FakeServerStatusRequests) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(serverstatusrequestsResource, c.ns, name), &velero_v1.ServerStatusRequest{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeServerStatusRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(serverstatusrequestsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &velero_v1.ServerStatusRequestList{}) + return err +} + +// Patch applies the patch and returns the patched serverStatusRequest. +func (c *FakeServerStatusRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velero_v1.ServerStatusRequest, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(serverstatusrequestsResource, c.ns, name, data, subresources...), &velero_v1.ServerStatusRequest{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.ServerStatusRequest), err +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_velero_client.go b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_velero_client.go new file mode 100644 index 0000000000..a94ab05709 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_velero_client.go @@ -0,0 +1,80 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeVeleroV1 struct { + *testing.Fake +} + +func (c *FakeVeleroV1) Backups(namespace string) v1.BackupInterface { + return &FakeBackups{c, namespace} +} + +func (c *FakeVeleroV1) BackupStorageLocations(namespace string) v1.BackupStorageLocationInterface { + return &FakeBackupStorageLocations{c, namespace} +} + +func (c *FakeVeleroV1) DeleteBackupRequests(namespace string) v1.DeleteBackupRequestInterface { + return &FakeDeleteBackupRequests{c, namespace} +} + +func (c *FakeVeleroV1) DownloadRequests(namespace string) v1.DownloadRequestInterface { + return &FakeDownloadRequests{c, namespace} +} + +func (c *FakeVeleroV1) PodVolumeBackups(namespace string) v1.PodVolumeBackupInterface { + return &FakePodVolumeBackups{c, namespace} +} + +func (c *FakeVeleroV1) PodVolumeRestores(namespace string) v1.PodVolumeRestoreInterface { + return &FakePodVolumeRestores{c, namespace} +} + +func (c *FakeVeleroV1) ResticRepositories(namespace string) v1.ResticRepositoryInterface { + return &FakeResticRepositories{c, namespace} +} + +func (c *FakeVeleroV1) Restores(namespace string) v1.RestoreInterface { + return &FakeRestores{c, namespace} +} + +func (c *FakeVeleroV1) Schedules(namespace string) v1.ScheduleInterface { + return &FakeSchedules{c, namespace} +} + +func (c *FakeVeleroV1) ServerStatusRequests(namespace string) v1.ServerStatusRequestInterface { + return &FakeServerStatusRequests{c, namespace} +} + +func (c *FakeVeleroV1) VolumeSnapshotLocations(namespace string) v1.VolumeSnapshotLocationInterface { + return &FakeVolumeSnapshotLocations{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeVeleroV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_volumesnapshotlocation.go b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_volumesnapshotlocation.go new file mode 100644 index 0000000000..bc0fd00bc0 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/fake/fake_volumesnapshotlocation.go @@ -0,0 +1,140 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" +) + +// FakeVolumeSnapshotLocations implements VolumeSnapshotLocationInterface +type FakeVolumeSnapshotLocations struct { + Fake *FakeVeleroV1 + ns string +} + +var volumesnapshotlocationsResource = schema.GroupVersionResource{Group: "velero.io", Version: "v1", Resource: "volumesnapshotlocations"} + +var volumesnapshotlocationsKind = schema.GroupVersionKind{Group: "velero.io", Version: "v1", Kind: "VolumeSnapshotLocation"} + +// Get takes name of the volumeSnapshotLocation, and returns the corresponding volumeSnapshotLocation object, and an error if there is any. +func (c *FakeVolumeSnapshotLocations) Get(name string, options v1.GetOptions) (result *velero_v1.VolumeSnapshotLocation, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(volumesnapshotlocationsResource, c.ns, name), &velero_v1.VolumeSnapshotLocation{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.VolumeSnapshotLocation), err +} + +// List takes label and field selectors, and returns the list of VolumeSnapshotLocations that match those selectors. +func (c *FakeVolumeSnapshotLocations) List(opts v1.ListOptions) (result *velero_v1.VolumeSnapshotLocationList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(volumesnapshotlocationsResource, volumesnapshotlocationsKind, c.ns, opts), &velero_v1.VolumeSnapshotLocationList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &velero_v1.VolumeSnapshotLocationList{ListMeta: obj.(*velero_v1.VolumeSnapshotLocationList).ListMeta} + for _, item := range obj.(*velero_v1.VolumeSnapshotLocationList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested volumeSnapshotLocations. +func (c *FakeVolumeSnapshotLocations) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(volumesnapshotlocationsResource, c.ns, opts)) + +} + +// Create takes the representation of a volumeSnapshotLocation and creates it. Returns the server's representation of the volumeSnapshotLocation, and an error, if there is any. +func (c *FakeVolumeSnapshotLocations) Create(volumeSnapshotLocation *velero_v1.VolumeSnapshotLocation) (result *velero_v1.VolumeSnapshotLocation, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(volumesnapshotlocationsResource, c.ns, volumeSnapshotLocation), &velero_v1.VolumeSnapshotLocation{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.VolumeSnapshotLocation), err +} + +// Update takes the representation of a volumeSnapshotLocation and updates it. Returns the server's representation of the volumeSnapshotLocation, and an error, if there is any. +func (c *FakeVolumeSnapshotLocations) Update(volumeSnapshotLocation *velero_v1.VolumeSnapshotLocation) (result *velero_v1.VolumeSnapshotLocation, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(volumesnapshotlocationsResource, c.ns, volumeSnapshotLocation), &velero_v1.VolumeSnapshotLocation{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.VolumeSnapshotLocation), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeVolumeSnapshotLocations) UpdateStatus(volumeSnapshotLocation *velero_v1.VolumeSnapshotLocation) (*velero_v1.VolumeSnapshotLocation, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(volumesnapshotlocationsResource, "status", c.ns, volumeSnapshotLocation), &velero_v1.VolumeSnapshotLocation{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.VolumeSnapshotLocation), err +} + +// Delete takes name of the volumeSnapshotLocation and deletes it. Returns an error if one occurs. +func (c *FakeVolumeSnapshotLocations) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(volumesnapshotlocationsResource, c.ns, name), &velero_v1.VolumeSnapshotLocation{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeVolumeSnapshotLocations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(volumesnapshotlocationsResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &velero_v1.VolumeSnapshotLocationList{}) + return err +} + +// Patch applies the patch and returns the patched volumeSnapshotLocation. +func (c *FakeVolumeSnapshotLocations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velero_v1.VolumeSnapshotLocation, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(volumesnapshotlocationsResource, c.ns, name, data, subresources...), &velero_v1.VolumeSnapshotLocation{}) + + if obj == nil { + return nil, err + } + return obj.(*velero_v1.VolumeSnapshotLocation), err +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/generated_expansion.go b/pkg/generated/clientset/versioned/typed/velero/v1/generated_expansion.go new file mode 100644 index 0000000000..9c0a18a9d7 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/generated_expansion.go @@ -0,0 +1,41 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type BackupExpansion interface{} + +type BackupStorageLocationExpansion interface{} + +type DeleteBackupRequestExpansion interface{} + +type DownloadRequestExpansion interface{} + +type PodVolumeBackupExpansion interface{} + +type PodVolumeRestoreExpansion interface{} + +type ResticRepositoryExpansion interface{} + +type RestoreExpansion interface{} + +type ScheduleExpansion interface{} + +type ServerStatusRequestExpansion interface{} + +type VolumeSnapshotLocationExpansion interface{} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/podvolumebackup.go b/pkg/generated/clientset/versioned/typed/velero/v1/podvolumebackup.go new file mode 100644 index 0000000000..6ce585bad7 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/podvolumebackup.go @@ -0,0 +1,174 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PodVolumeBackupsGetter has a method to return a PodVolumeBackupInterface. +// A group's client should implement this interface. +type PodVolumeBackupsGetter interface { + PodVolumeBackups(namespace string) PodVolumeBackupInterface +} + +// PodVolumeBackupInterface has methods to work with PodVolumeBackup resources. +type PodVolumeBackupInterface interface { + Create(*v1.PodVolumeBackup) (*v1.PodVolumeBackup, error) + Update(*v1.PodVolumeBackup) (*v1.PodVolumeBackup, error) + UpdateStatus(*v1.PodVolumeBackup) (*v1.PodVolumeBackup, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.PodVolumeBackup, error) + List(opts meta_v1.ListOptions) (*v1.PodVolumeBackupList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodVolumeBackup, err error) + PodVolumeBackupExpansion +} + +// podVolumeBackups implements PodVolumeBackupInterface +type podVolumeBackups struct { + client rest.Interface + ns string +} + +// newPodVolumeBackups returns a PodVolumeBackups +func newPodVolumeBackups(c *VeleroV1Client, namespace string) *podVolumeBackups { + return &podVolumeBackups{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podVolumeBackup, and returns the corresponding podVolumeBackup object, and an error if there is any. +func (c *podVolumeBackups) Get(name string, options meta_v1.GetOptions) (result *v1.PodVolumeBackup, err error) { + result = &v1.PodVolumeBackup{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podvolumebackups"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodVolumeBackups that match those selectors. +func (c *podVolumeBackups) List(opts meta_v1.ListOptions) (result *v1.PodVolumeBackupList, err error) { + result = &v1.PodVolumeBackupList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podvolumebackups"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podVolumeBackups. +func (c *podVolumeBackups) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podvolumebackups"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a podVolumeBackup and creates it. Returns the server's representation of the podVolumeBackup, and an error, if there is any. +func (c *podVolumeBackups) Create(podVolumeBackup *v1.PodVolumeBackup) (result *v1.PodVolumeBackup, err error) { + result = &v1.PodVolumeBackup{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podvolumebackups"). + Body(podVolumeBackup). + Do(). + Into(result) + return +} + +// Update takes the representation of a podVolumeBackup and updates it. Returns the server's representation of the podVolumeBackup, and an error, if there is any. +func (c *podVolumeBackups) Update(podVolumeBackup *v1.PodVolumeBackup) (result *v1.PodVolumeBackup, err error) { + result = &v1.PodVolumeBackup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podvolumebackups"). + Name(podVolumeBackup.Name). + Body(podVolumeBackup). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *podVolumeBackups) UpdateStatus(podVolumeBackup *v1.PodVolumeBackup) (result *v1.PodVolumeBackup, err error) { + result = &v1.PodVolumeBackup{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podvolumebackups"). + Name(podVolumeBackup.Name). + SubResource("status"). + Body(podVolumeBackup). + Do(). + Into(result) + return +} + +// Delete takes name of the podVolumeBackup and deletes it. Returns an error if one occurs. +func (c *podVolumeBackups) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podvolumebackups"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podVolumeBackups) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podvolumebackups"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched podVolumeBackup. +func (c *podVolumeBackups) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodVolumeBackup, err error) { + result = &v1.PodVolumeBackup{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podvolumebackups"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/podvolumerestore.go b/pkg/generated/clientset/versioned/typed/velero/v1/podvolumerestore.go new file mode 100644 index 0000000000..ef70373f9f --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/podvolumerestore.go @@ -0,0 +1,174 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// PodVolumeRestoresGetter has a method to return a PodVolumeRestoreInterface. +// A group's client should implement this interface. +type PodVolumeRestoresGetter interface { + PodVolumeRestores(namespace string) PodVolumeRestoreInterface +} + +// PodVolumeRestoreInterface has methods to work with PodVolumeRestore resources. +type PodVolumeRestoreInterface interface { + Create(*v1.PodVolumeRestore) (*v1.PodVolumeRestore, error) + Update(*v1.PodVolumeRestore) (*v1.PodVolumeRestore, error) + UpdateStatus(*v1.PodVolumeRestore) (*v1.PodVolumeRestore, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.PodVolumeRestore, error) + List(opts meta_v1.ListOptions) (*v1.PodVolumeRestoreList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodVolumeRestore, err error) + PodVolumeRestoreExpansion +} + +// podVolumeRestores implements PodVolumeRestoreInterface +type podVolumeRestores struct { + client rest.Interface + ns string +} + +// newPodVolumeRestores returns a PodVolumeRestores +func newPodVolumeRestores(c *VeleroV1Client, namespace string) *podVolumeRestores { + return &podVolumeRestores{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podVolumeRestore, and returns the corresponding podVolumeRestore object, and an error if there is any. +func (c *podVolumeRestores) Get(name string, options meta_v1.GetOptions) (result *v1.PodVolumeRestore, err error) { + result = &v1.PodVolumeRestore{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podvolumerestores"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodVolumeRestores that match those selectors. +func (c *podVolumeRestores) List(opts meta_v1.ListOptions) (result *v1.PodVolumeRestoreList, err error) { + result = &v1.PodVolumeRestoreList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podvolumerestores"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podVolumeRestores. +func (c *podVolumeRestores) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podvolumerestores"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a podVolumeRestore and creates it. Returns the server's representation of the podVolumeRestore, and an error, if there is any. +func (c *podVolumeRestores) Create(podVolumeRestore *v1.PodVolumeRestore) (result *v1.PodVolumeRestore, err error) { + result = &v1.PodVolumeRestore{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podvolumerestores"). + Body(podVolumeRestore). + Do(). + Into(result) + return +} + +// Update takes the representation of a podVolumeRestore and updates it. Returns the server's representation of the podVolumeRestore, and an error, if there is any. +func (c *podVolumeRestores) Update(podVolumeRestore *v1.PodVolumeRestore) (result *v1.PodVolumeRestore, err error) { + result = &v1.PodVolumeRestore{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podvolumerestores"). + Name(podVolumeRestore.Name). + Body(podVolumeRestore). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *podVolumeRestores) UpdateStatus(podVolumeRestore *v1.PodVolumeRestore) (result *v1.PodVolumeRestore, err error) { + result = &v1.PodVolumeRestore{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podvolumerestores"). + Name(podVolumeRestore.Name). + SubResource("status"). + Body(podVolumeRestore). + Do(). + Into(result) + return +} + +// Delete takes name of the podVolumeRestore and deletes it. Returns an error if one occurs. +func (c *podVolumeRestores) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podvolumerestores"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podVolumeRestores) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podvolumerestores"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched podVolumeRestore. +func (c *podVolumeRestores) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodVolumeRestore, err error) { + result = &v1.PodVolumeRestore{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podvolumerestores"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/resticrepository.go b/pkg/generated/clientset/versioned/typed/velero/v1/resticrepository.go new file mode 100644 index 0000000000..934e94a307 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/resticrepository.go @@ -0,0 +1,174 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ResticRepositoriesGetter has a method to return a ResticRepositoryInterface. +// A group's client should implement this interface. +type ResticRepositoriesGetter interface { + ResticRepositories(namespace string) ResticRepositoryInterface +} + +// ResticRepositoryInterface has methods to work with ResticRepository resources. +type ResticRepositoryInterface interface { + Create(*v1.ResticRepository) (*v1.ResticRepository, error) + Update(*v1.ResticRepository) (*v1.ResticRepository, error) + UpdateStatus(*v1.ResticRepository) (*v1.ResticRepository, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ResticRepository, error) + List(opts meta_v1.ListOptions) (*v1.ResticRepositoryList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResticRepository, err error) + ResticRepositoryExpansion +} + +// resticRepositories implements ResticRepositoryInterface +type resticRepositories struct { + client rest.Interface + ns string +} + +// newResticRepositories returns a ResticRepositories +func newResticRepositories(c *VeleroV1Client, namespace string) *resticRepositories { + return &resticRepositories{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the resticRepository, and returns the corresponding resticRepository object, and an error if there is any. +func (c *resticRepositories) Get(name string, options meta_v1.GetOptions) (result *v1.ResticRepository, err error) { + result = &v1.ResticRepository{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resticrepositories"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResticRepositories that match those selectors. +func (c *resticRepositories) List(opts meta_v1.ListOptions) (result *v1.ResticRepositoryList, err error) { + result = &v1.ResticRepositoryList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resticrepositories"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resticRepositories. +func (c *resticRepositories) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("resticrepositories"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a resticRepository and creates it. Returns the server's representation of the resticRepository, and an error, if there is any. +func (c *resticRepositories) Create(resticRepository *v1.ResticRepository) (result *v1.ResticRepository, err error) { + result = &v1.ResticRepository{} + err = c.client.Post(). + Namespace(c.ns). + Resource("resticrepositories"). + Body(resticRepository). + Do(). + Into(result) + return +} + +// Update takes the representation of a resticRepository and updates it. Returns the server's representation of the resticRepository, and an error, if there is any. +func (c *resticRepositories) Update(resticRepository *v1.ResticRepository) (result *v1.ResticRepository, err error) { + result = &v1.ResticRepository{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resticrepositories"). + Name(resticRepository.Name). + Body(resticRepository). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *resticRepositories) UpdateStatus(resticRepository *v1.ResticRepository) (result *v1.ResticRepository, err error) { + result = &v1.ResticRepository{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resticrepositories"). + Name(resticRepository.Name). + SubResource("status"). + Body(resticRepository). + Do(). + Into(result) + return +} + +// Delete takes name of the resticRepository and deletes it. Returns an error if one occurs. +func (c *resticRepositories) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resticrepositories"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resticRepositories) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resticrepositories"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched resticRepository. +func (c *resticRepositories) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResticRepository, err error) { + result = &v1.ResticRepository{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("resticrepositories"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/restore.go b/pkg/generated/clientset/versioned/typed/velero/v1/restore.go new file mode 100644 index 0000000000..bc71ad97d7 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/restore.go @@ -0,0 +1,174 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// RestoresGetter has a method to return a RestoreInterface. +// A group's client should implement this interface. +type RestoresGetter interface { + Restores(namespace string) RestoreInterface +} + +// RestoreInterface has methods to work with Restore resources. +type RestoreInterface interface { + Create(*v1.Restore) (*v1.Restore, error) + Update(*v1.Restore) (*v1.Restore, error) + UpdateStatus(*v1.Restore) (*v1.Restore, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Restore, error) + List(opts meta_v1.ListOptions) (*v1.RestoreList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Restore, err error) + RestoreExpansion +} + +// restores implements RestoreInterface +type restores struct { + client rest.Interface + ns string +} + +// newRestores returns a Restores +func newRestores(c *VeleroV1Client, namespace string) *restores { + return &restores{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the restore, and returns the corresponding restore object, and an error if there is any. +func (c *restores) Get(name string, options meta_v1.GetOptions) (result *v1.Restore, err error) { + result = &v1.Restore{} + err = c.client.Get(). + Namespace(c.ns). + Resource("restores"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Restores that match those selectors. +func (c *restores) List(opts meta_v1.ListOptions) (result *v1.RestoreList, err error) { + result = &v1.RestoreList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("restores"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested restores. +func (c *restores) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("restores"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a restore and creates it. Returns the server's representation of the restore, and an error, if there is any. +func (c *restores) Create(restore *v1.Restore) (result *v1.Restore, err error) { + result = &v1.Restore{} + err = c.client.Post(). + Namespace(c.ns). + Resource("restores"). + Body(restore). + Do(). + Into(result) + return +} + +// Update takes the representation of a restore and updates it. Returns the server's representation of the restore, and an error, if there is any. +func (c *restores) Update(restore *v1.Restore) (result *v1.Restore, err error) { + result = &v1.Restore{} + err = c.client.Put(). + Namespace(c.ns). + Resource("restores"). + Name(restore.Name). + Body(restore). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *restores) UpdateStatus(restore *v1.Restore) (result *v1.Restore, err error) { + result = &v1.Restore{} + err = c.client.Put(). + Namespace(c.ns). + Resource("restores"). + Name(restore.Name). + SubResource("status"). + Body(restore). + Do(). + Into(result) + return +} + +// Delete takes name of the restore and deletes it. Returns an error if one occurs. +func (c *restores) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("restores"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *restores) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("restores"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched restore. +func (c *restores) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Restore, err error) { + result = &v1.Restore{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("restores"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/schedule.go b/pkg/generated/clientset/versioned/typed/velero/v1/schedule.go new file mode 100644 index 0000000000..462cbc0fb9 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/schedule.go @@ -0,0 +1,174 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// SchedulesGetter has a method to return a ScheduleInterface. +// A group's client should implement this interface. +type SchedulesGetter interface { + Schedules(namespace string) ScheduleInterface +} + +// ScheduleInterface has methods to work with Schedule resources. +type ScheduleInterface interface { + Create(*v1.Schedule) (*v1.Schedule, error) + Update(*v1.Schedule) (*v1.Schedule, error) + UpdateStatus(*v1.Schedule) (*v1.Schedule, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Schedule, error) + List(opts meta_v1.ListOptions) (*v1.ScheduleList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Schedule, err error) + ScheduleExpansion +} + +// schedules implements ScheduleInterface +type schedules struct { + client rest.Interface + ns string +} + +// newSchedules returns a Schedules +func newSchedules(c *VeleroV1Client, namespace string) *schedules { + return &schedules{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the schedule, and returns the corresponding schedule object, and an error if there is any. +func (c *schedules) Get(name string, options meta_v1.GetOptions) (result *v1.Schedule, err error) { + result = &v1.Schedule{} + err = c.client.Get(). + Namespace(c.ns). + Resource("schedules"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Schedules that match those selectors. +func (c *schedules) List(opts meta_v1.ListOptions) (result *v1.ScheduleList, err error) { + result = &v1.ScheduleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("schedules"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested schedules. +func (c *schedules) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("schedules"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a schedule and creates it. Returns the server's representation of the schedule, and an error, if there is any. +func (c *schedules) Create(schedule *v1.Schedule) (result *v1.Schedule, err error) { + result = &v1.Schedule{} + err = c.client.Post(). + Namespace(c.ns). + Resource("schedules"). + Body(schedule). + Do(). + Into(result) + return +} + +// Update takes the representation of a schedule and updates it. Returns the server's representation of the schedule, and an error, if there is any. +func (c *schedules) Update(schedule *v1.Schedule) (result *v1.Schedule, err error) { + result = &v1.Schedule{} + err = c.client.Put(). + Namespace(c.ns). + Resource("schedules"). + Name(schedule.Name). + Body(schedule). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *schedules) UpdateStatus(schedule *v1.Schedule) (result *v1.Schedule, err error) { + result = &v1.Schedule{} + err = c.client.Put(). + Namespace(c.ns). + Resource("schedules"). + Name(schedule.Name). + SubResource("status"). + Body(schedule). + Do(). + Into(result) + return +} + +// Delete takes name of the schedule and deletes it. Returns an error if one occurs. +func (c *schedules) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("schedules"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *schedules) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("schedules"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched schedule. +func (c *schedules) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Schedule, err error) { + result = &v1.Schedule{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("schedules"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/serverstatusrequest.go b/pkg/generated/clientset/versioned/typed/velero/v1/serverstatusrequest.go new file mode 100644 index 0000000000..4b892dbd85 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/serverstatusrequest.go @@ -0,0 +1,174 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ServerStatusRequestsGetter has a method to return a ServerStatusRequestInterface. +// A group's client should implement this interface. +type ServerStatusRequestsGetter interface { + ServerStatusRequests(namespace string) ServerStatusRequestInterface +} + +// ServerStatusRequestInterface has methods to work with ServerStatusRequest resources. +type ServerStatusRequestInterface interface { + Create(*v1.ServerStatusRequest) (*v1.ServerStatusRequest, error) + Update(*v1.ServerStatusRequest) (*v1.ServerStatusRequest, error) + UpdateStatus(*v1.ServerStatusRequest) (*v1.ServerStatusRequest, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ServerStatusRequest, error) + List(opts meta_v1.ListOptions) (*v1.ServerStatusRequestList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServerStatusRequest, err error) + ServerStatusRequestExpansion +} + +// serverStatusRequests implements ServerStatusRequestInterface +type serverStatusRequests struct { + client rest.Interface + ns string +} + +// newServerStatusRequests returns a ServerStatusRequests +func newServerStatusRequests(c *VeleroV1Client, namespace string) *serverStatusRequests { + return &serverStatusRequests{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the serverStatusRequest, and returns the corresponding serverStatusRequest object, and an error if there is any. +func (c *serverStatusRequests) Get(name string, options meta_v1.GetOptions) (result *v1.ServerStatusRequest, err error) { + result = &v1.ServerStatusRequest{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serverstatusrequests"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ServerStatusRequests that match those selectors. +func (c *serverStatusRequests) List(opts meta_v1.ListOptions) (result *v1.ServerStatusRequestList, err error) { + result = &v1.ServerStatusRequestList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serverstatusrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested serverStatusRequests. +func (c *serverStatusRequests) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("serverstatusrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a serverStatusRequest and creates it. Returns the server's representation of the serverStatusRequest, and an error, if there is any. +func (c *serverStatusRequests) Create(serverStatusRequest *v1.ServerStatusRequest) (result *v1.ServerStatusRequest, err error) { + result = &v1.ServerStatusRequest{} + err = c.client.Post(). + Namespace(c.ns). + Resource("serverstatusrequests"). + Body(serverStatusRequest). + Do(). + Into(result) + return +} + +// Update takes the representation of a serverStatusRequest and updates it. Returns the server's representation of the serverStatusRequest, and an error, if there is any. +func (c *serverStatusRequests) Update(serverStatusRequest *v1.ServerStatusRequest) (result *v1.ServerStatusRequest, err error) { + result = &v1.ServerStatusRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("serverstatusrequests"). + Name(serverStatusRequest.Name). + Body(serverStatusRequest). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *serverStatusRequests) UpdateStatus(serverStatusRequest *v1.ServerStatusRequest) (result *v1.ServerStatusRequest, err error) { + result = &v1.ServerStatusRequest{} + err = c.client.Put(). + Namespace(c.ns). + Resource("serverstatusrequests"). + Name(serverStatusRequest.Name). + SubResource("status"). + Body(serverStatusRequest). + Do(). + Into(result) + return +} + +// Delete takes name of the serverStatusRequest and deletes it. Returns an error if one occurs. +func (c *serverStatusRequests) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("serverstatusrequests"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *serverStatusRequests) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("serverstatusrequests"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched serverStatusRequest. +func (c *serverStatusRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServerStatusRequest, err error) { + result = &v1.ServerStatusRequest{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("serverstatusrequests"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/velero_client.go b/pkg/generated/clientset/versioned/typed/velero/v1/velero_client.go new file mode 100644 index 0000000000..ecb018198a --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/velero_client.go @@ -0,0 +1,140 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + rest "k8s.io/client-go/rest" +) + +type VeleroV1Interface interface { + RESTClient() rest.Interface + BackupsGetter + BackupStorageLocationsGetter + DeleteBackupRequestsGetter + DownloadRequestsGetter + PodVolumeBackupsGetter + PodVolumeRestoresGetter + ResticRepositoriesGetter + RestoresGetter + SchedulesGetter + ServerStatusRequestsGetter + VolumeSnapshotLocationsGetter +} + +// VeleroV1Client is used to interact with features provided by the velero.io group. +type VeleroV1Client struct { + restClient rest.Interface +} + +func (c *VeleroV1Client) Backups(namespace string) BackupInterface { + return newBackups(c, namespace) +} + +func (c *VeleroV1Client) BackupStorageLocations(namespace string) BackupStorageLocationInterface { + return newBackupStorageLocations(c, namespace) +} + +func (c *VeleroV1Client) DeleteBackupRequests(namespace string) DeleteBackupRequestInterface { + return newDeleteBackupRequests(c, namespace) +} + +func (c *VeleroV1Client) DownloadRequests(namespace string) DownloadRequestInterface { + return newDownloadRequests(c, namespace) +} + +func (c *VeleroV1Client) PodVolumeBackups(namespace string) PodVolumeBackupInterface { + return newPodVolumeBackups(c, namespace) +} + +func (c *VeleroV1Client) PodVolumeRestores(namespace string) PodVolumeRestoreInterface { + return newPodVolumeRestores(c, namespace) +} + +func (c *VeleroV1Client) ResticRepositories(namespace string) ResticRepositoryInterface { + return newResticRepositories(c, namespace) +} + +func (c *VeleroV1Client) Restores(namespace string) RestoreInterface { + return newRestores(c, namespace) +} + +func (c *VeleroV1Client) Schedules(namespace string) ScheduleInterface { + return newSchedules(c, namespace) +} + +func (c *VeleroV1Client) ServerStatusRequests(namespace string) ServerStatusRequestInterface { + return newServerStatusRequests(c, namespace) +} + +func (c *VeleroV1Client) VolumeSnapshotLocations(namespace string) VolumeSnapshotLocationInterface { + return newVolumeSnapshotLocations(c, namespace) +} + +// NewForConfig creates a new VeleroV1Client for the given config. +func NewForConfig(c *rest.Config) (*VeleroV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &VeleroV1Client{client}, nil +} + +// NewForConfigOrDie creates a new VeleroV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *VeleroV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new VeleroV1Client for the given RESTClient. +func New(c rest.Interface) *VeleroV1Client { + return &VeleroV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *VeleroV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/pkg/generated/clientset/versioned/typed/velero/v1/volumesnapshotlocation.go b/pkg/generated/clientset/versioned/typed/velero/v1/volumesnapshotlocation.go new file mode 100644 index 0000000000..099f955ff1 --- /dev/null +++ b/pkg/generated/clientset/versioned/typed/velero/v1/volumesnapshotlocation.go @@ -0,0 +1,174 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// VolumeSnapshotLocationsGetter has a method to return a VolumeSnapshotLocationInterface. +// A group's client should implement this interface. +type VolumeSnapshotLocationsGetter interface { + VolumeSnapshotLocations(namespace string) VolumeSnapshotLocationInterface +} + +// VolumeSnapshotLocationInterface has methods to work with VolumeSnapshotLocation resources. +type VolumeSnapshotLocationInterface interface { + Create(*v1.VolumeSnapshotLocation) (*v1.VolumeSnapshotLocation, error) + Update(*v1.VolumeSnapshotLocation) (*v1.VolumeSnapshotLocation, error) + UpdateStatus(*v1.VolumeSnapshotLocation) (*v1.VolumeSnapshotLocation, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.VolumeSnapshotLocation, error) + List(opts meta_v1.ListOptions) (*v1.VolumeSnapshotLocationList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeSnapshotLocation, err error) + VolumeSnapshotLocationExpansion +} + +// volumeSnapshotLocations implements VolumeSnapshotLocationInterface +type volumeSnapshotLocations struct { + client rest.Interface + ns string +} + +// newVolumeSnapshotLocations returns a VolumeSnapshotLocations +func newVolumeSnapshotLocations(c *VeleroV1Client, namespace string) *volumeSnapshotLocations { + return &volumeSnapshotLocations{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the volumeSnapshotLocation, and returns the corresponding volumeSnapshotLocation object, and an error if there is any. +func (c *volumeSnapshotLocations) Get(name string, options meta_v1.GetOptions) (result *v1.VolumeSnapshotLocation, err error) { + result = &v1.VolumeSnapshotLocation{} + err = c.client.Get(). + Namespace(c.ns). + Resource("volumesnapshotlocations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeSnapshotLocations that match those selectors. +func (c *volumeSnapshotLocations) List(opts meta_v1.ListOptions) (result *v1.VolumeSnapshotLocationList, err error) { + result = &v1.VolumeSnapshotLocationList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("volumesnapshotlocations"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeSnapshotLocations. +func (c *volumeSnapshotLocations) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("volumesnapshotlocations"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a volumeSnapshotLocation and creates it. Returns the server's representation of the volumeSnapshotLocation, and an error, if there is any. +func (c *volumeSnapshotLocations) Create(volumeSnapshotLocation *v1.VolumeSnapshotLocation) (result *v1.VolumeSnapshotLocation, err error) { + result = &v1.VolumeSnapshotLocation{} + err = c.client.Post(). + Namespace(c.ns). + Resource("volumesnapshotlocations"). + Body(volumeSnapshotLocation). + Do(). + Into(result) + return +} + +// Update takes the representation of a volumeSnapshotLocation and updates it. Returns the server's representation of the volumeSnapshotLocation, and an error, if there is any. +func (c *volumeSnapshotLocations) Update(volumeSnapshotLocation *v1.VolumeSnapshotLocation) (result *v1.VolumeSnapshotLocation, err error) { + result = &v1.VolumeSnapshotLocation{} + err = c.client.Put(). + Namespace(c.ns). + Resource("volumesnapshotlocations"). + Name(volumeSnapshotLocation.Name). + Body(volumeSnapshotLocation). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *volumeSnapshotLocations) UpdateStatus(volumeSnapshotLocation *v1.VolumeSnapshotLocation) (result *v1.VolumeSnapshotLocation, err error) { + result = &v1.VolumeSnapshotLocation{} + err = c.client.Put(). + Namespace(c.ns). + Resource("volumesnapshotlocations"). + Name(volumeSnapshotLocation.Name). + SubResource("status"). + Body(volumeSnapshotLocation). + Do(). + Into(result) + return +} + +// Delete takes name of the volumeSnapshotLocation and deletes it. Returns an error if one occurs. +func (c *volumeSnapshotLocations) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("volumesnapshotlocations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeSnapshotLocations) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("volumesnapshotlocations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched volumeSnapshotLocation. +func (c *volumeSnapshotLocations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeSnapshotLocation, err error) { + result = &v1.VolumeSnapshotLocation{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("volumesnapshotlocations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/pkg/generated/informers/externalversions/ark/interface.go b/pkg/generated/informers/externalversions/ark/interface.go index 654933dae4..f2640327e1 100644 --- a/pkg/generated/informers/externalversions/ark/interface.go +++ b/pkg/generated/informers/externalversions/ark/interface.go @@ -19,8 +19,8 @@ limitations under the License. package ark import ( - v1 "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" - internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/informers/externalversions/ark/v1" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" ) // Interface provides access to each of this group's versions. diff --git a/pkg/generated/informers/externalversions/ark/v1/backup.go b/pkg/generated/informers/externalversions/ark/v1/backup.go index f4a005658d..b6d94c36b2 100644 --- a/pkg/generated/informers/externalversions/ark/v1/backup.go +++ b/pkg/generated/informers/externalversions/ark/v1/backup.go @@ -21,10 +21,10 @@ package v1 import ( time "time" - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" - versioned "github.com/heptio/ark/pkg/generated/clientset/versioned" - internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces" - v1 "github.com/heptio/ark/pkg/generated/listers/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/ark/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/informers/externalversions/ark/v1/backupstoragelocation.go b/pkg/generated/informers/externalversions/ark/v1/backupstoragelocation.go index 2bb2f8f94e..d321f0b7d6 100644 --- a/pkg/generated/informers/externalversions/ark/v1/backupstoragelocation.go +++ b/pkg/generated/informers/externalversions/ark/v1/backupstoragelocation.go @@ -21,10 +21,10 @@ package v1 import ( time "time" - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" - versioned "github.com/heptio/ark/pkg/generated/clientset/versioned" - internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces" - v1 "github.com/heptio/ark/pkg/generated/listers/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/ark/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/informers/externalversions/ark/v1/deletebackuprequest.go b/pkg/generated/informers/externalversions/ark/v1/deletebackuprequest.go index bcca5d2464..acb8bb0f42 100644 --- a/pkg/generated/informers/externalversions/ark/v1/deletebackuprequest.go +++ b/pkg/generated/informers/externalversions/ark/v1/deletebackuprequest.go @@ -21,10 +21,10 @@ package v1 import ( time "time" - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" - versioned "github.com/heptio/ark/pkg/generated/clientset/versioned" - internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces" - v1 "github.com/heptio/ark/pkg/generated/listers/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/ark/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/informers/externalversions/ark/v1/downloadrequest.go b/pkg/generated/informers/externalversions/ark/v1/downloadrequest.go index 3f072ca8fb..02d7f1d23b 100644 --- a/pkg/generated/informers/externalversions/ark/v1/downloadrequest.go +++ b/pkg/generated/informers/externalversions/ark/v1/downloadrequest.go @@ -21,10 +21,10 @@ package v1 import ( time "time" - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" - versioned "github.com/heptio/ark/pkg/generated/clientset/versioned" - internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces" - v1 "github.com/heptio/ark/pkg/generated/listers/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/ark/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/informers/externalversions/ark/v1/interface.go b/pkg/generated/informers/externalversions/ark/v1/interface.go index 8c569d145f..3436d9bdd3 100644 --- a/pkg/generated/informers/externalversions/ark/v1/interface.go +++ b/pkg/generated/informers/externalversions/ark/v1/interface.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" ) // Interface provides access to all the informers in this group version. diff --git a/pkg/generated/informers/externalversions/ark/v1/podvolumebackup.go b/pkg/generated/informers/externalversions/ark/v1/podvolumebackup.go index 83c3909d58..d3c0705d49 100644 --- a/pkg/generated/informers/externalversions/ark/v1/podvolumebackup.go +++ b/pkg/generated/informers/externalversions/ark/v1/podvolumebackup.go @@ -21,10 +21,10 @@ package v1 import ( time "time" - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" - versioned "github.com/heptio/ark/pkg/generated/clientset/versioned" - internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces" - v1 "github.com/heptio/ark/pkg/generated/listers/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/ark/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/informers/externalversions/ark/v1/podvolumerestore.go b/pkg/generated/informers/externalversions/ark/v1/podvolumerestore.go index d21149db00..fb554c6deb 100644 --- a/pkg/generated/informers/externalversions/ark/v1/podvolumerestore.go +++ b/pkg/generated/informers/externalversions/ark/v1/podvolumerestore.go @@ -21,10 +21,10 @@ package v1 import ( time "time" - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" - versioned "github.com/heptio/ark/pkg/generated/clientset/versioned" - internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces" - v1 "github.com/heptio/ark/pkg/generated/listers/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/ark/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/informers/externalversions/ark/v1/resticrepository.go b/pkg/generated/informers/externalversions/ark/v1/resticrepository.go index d47dee1586..684879105d 100644 --- a/pkg/generated/informers/externalversions/ark/v1/resticrepository.go +++ b/pkg/generated/informers/externalversions/ark/v1/resticrepository.go @@ -21,10 +21,10 @@ package v1 import ( time "time" - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" - versioned "github.com/heptio/ark/pkg/generated/clientset/versioned" - internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces" - v1 "github.com/heptio/ark/pkg/generated/listers/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/ark/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/informers/externalversions/ark/v1/restore.go b/pkg/generated/informers/externalversions/ark/v1/restore.go index 10072ebe76..27363180cc 100644 --- a/pkg/generated/informers/externalversions/ark/v1/restore.go +++ b/pkg/generated/informers/externalversions/ark/v1/restore.go @@ -21,10 +21,10 @@ package v1 import ( time "time" - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" - versioned "github.com/heptio/ark/pkg/generated/clientset/versioned" - internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces" - v1 "github.com/heptio/ark/pkg/generated/listers/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/ark/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/informers/externalversions/ark/v1/schedule.go b/pkg/generated/informers/externalversions/ark/v1/schedule.go index 11c7cb098f..edc6b49218 100644 --- a/pkg/generated/informers/externalversions/ark/v1/schedule.go +++ b/pkg/generated/informers/externalversions/ark/v1/schedule.go @@ -21,10 +21,10 @@ package v1 import ( time "time" - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" - versioned "github.com/heptio/ark/pkg/generated/clientset/versioned" - internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces" - v1 "github.com/heptio/ark/pkg/generated/listers/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/ark/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/informers/externalversions/ark/v1/serverstatusrequest.go b/pkg/generated/informers/externalversions/ark/v1/serverstatusrequest.go index 7e8f1529aa..025474a42b 100644 --- a/pkg/generated/informers/externalversions/ark/v1/serverstatusrequest.go +++ b/pkg/generated/informers/externalversions/ark/v1/serverstatusrequest.go @@ -21,10 +21,10 @@ package v1 import ( time "time" - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" - versioned "github.com/heptio/ark/pkg/generated/clientset/versioned" - internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces" - v1 "github.com/heptio/ark/pkg/generated/listers/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/ark/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/informers/externalversions/ark/v1/volumesnapshotlocation.go b/pkg/generated/informers/externalversions/ark/v1/volumesnapshotlocation.go index f9c5d7d08b..c800736a93 100644 --- a/pkg/generated/informers/externalversions/ark/v1/volumesnapshotlocation.go +++ b/pkg/generated/informers/externalversions/ark/v1/volumesnapshotlocation.go @@ -21,10 +21,10 @@ package v1 import ( time "time" - ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1" - versioned "github.com/heptio/ark/pkg/generated/clientset/versioned" - internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces" - v1 "github.com/heptio/ark/pkg/generated/listers/ark/v1" + ark_v1 "github.com/heptio/velero/pkg/apis/ark/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/ark/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" diff --git a/pkg/generated/informers/externalversions/factory.go b/pkg/generated/informers/externalversions/factory.go index 41af7bdcbd..597c499f30 100644 --- a/pkg/generated/informers/externalversions/factory.go +++ b/pkg/generated/informers/externalversions/factory.go @@ -23,9 +23,10 @@ import ( sync "sync" time "time" - versioned "github.com/heptio/ark/pkg/generated/clientset/versioned" - ark "github.com/heptio/ark/pkg/generated/informers/externalversions/ark" - internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + ark "github.com/heptio/velero/pkg/generated/informers/externalversions/ark" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + velero "github.com/heptio/velero/pkg/generated/informers/externalversions/velero" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -173,8 +174,13 @@ type SharedInformerFactory interface { WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool Ark() ark.Interface + Velero() velero.Interface } func (f *sharedInformerFactory) Ark() ark.Interface { return ark.New(f, f.namespace, f.tweakListOptions) } + +func (f *sharedInformerFactory) Velero() velero.Interface { + return velero.New(f, f.namespace, f.tweakListOptions) +} diff --git a/pkg/generated/informers/externalversions/generic.go b/pkg/generated/informers/externalversions/generic.go index 349cc61e52..d4d836d7de 100644 --- a/pkg/generated/informers/externalversions/generic.go +++ b/pkg/generated/informers/externalversions/generic.go @@ -21,7 +21,8 @@ package externalversions import ( "fmt" - v1 "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" ) @@ -76,6 +77,30 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1.SchemeGroupVersion.WithResource("volumesnapshotlocations"): return &genericInformer{resource: resource.GroupResource(), informer: f.Ark().V1().VolumeSnapshotLocations().Informer()}, nil + // Group=velero.io, Version=v1 + case velero_v1.SchemeGroupVersion.WithResource("backups"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().Backups().Informer()}, nil + case velero_v1.SchemeGroupVersion.WithResource("backupstoragelocations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().BackupStorageLocations().Informer()}, nil + case velero_v1.SchemeGroupVersion.WithResource("deletebackuprequests"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().DeleteBackupRequests().Informer()}, nil + case velero_v1.SchemeGroupVersion.WithResource("downloadrequests"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().DownloadRequests().Informer()}, nil + case velero_v1.SchemeGroupVersion.WithResource("podvolumebackups"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().PodVolumeBackups().Informer()}, nil + case velero_v1.SchemeGroupVersion.WithResource("podvolumerestores"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().PodVolumeRestores().Informer()}, nil + case velero_v1.SchemeGroupVersion.WithResource("resticrepositories"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().ResticRepositories().Informer()}, nil + case velero_v1.SchemeGroupVersion.WithResource("restores"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().Restores().Informer()}, nil + case velero_v1.SchemeGroupVersion.WithResource("schedules"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().Schedules().Informer()}, nil + case velero_v1.SchemeGroupVersion.WithResource("serverstatusrequests"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().ServerStatusRequests().Informer()}, nil + case velero_v1.SchemeGroupVersion.WithResource("volumesnapshotlocations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Velero().V1().VolumeSnapshotLocations().Informer()}, nil + } return nil, fmt.Errorf("no informer found for %v", resource) diff --git a/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go index 1d4dfc5339..6390642038 100644 --- a/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go +++ b/pkg/generated/informers/externalversions/internalinterfaces/factory_interfaces.go @@ -21,7 +21,7 @@ package internalinterfaces import ( time "time" - versioned "github.com/heptio/ark/pkg/generated/clientset/versioned" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" cache "k8s.io/client-go/tools/cache" diff --git a/pkg/generated/informers/externalversions/velero/interface.go b/pkg/generated/informers/externalversions/velero/interface.go new file mode 100644 index 0000000000..f887ef541f --- /dev/null +++ b/pkg/generated/informers/externalversions/velero/interface.go @@ -0,0 +1,46 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package velero + +import ( + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/informers/externalversions/velero/v1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/pkg/generated/informers/externalversions/velero/v1/backup.go b/pkg/generated/informers/externalversions/velero/v1/backup.go new file mode 100644 index 0000000000..380328265b --- /dev/null +++ b/pkg/generated/informers/externalversions/velero/v1/backup.go @@ -0,0 +1,89 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/velero/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// BackupInformer provides access to a shared informer and lister for +// Backups. +type BackupInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.BackupLister +} + +type backupInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewBackupInformer constructs a new informer for Backup type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewBackupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBackupInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredBackupInformer constructs a new informer for Backup type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredBackupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().Backups(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().Backups(namespace).Watch(options) + }, + }, + &velero_v1.Backup{}, + resyncPeriod, + indexers, + ) +} + +func (f *backupInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBackupInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *backupInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velero_v1.Backup{}, f.defaultInformer) +} + +func (f *backupInformer) Lister() v1.BackupLister { + return v1.NewBackupLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/informers/externalversions/velero/v1/backupstoragelocation.go b/pkg/generated/informers/externalversions/velero/v1/backupstoragelocation.go new file mode 100644 index 0000000000..e7529d3f89 --- /dev/null +++ b/pkg/generated/informers/externalversions/velero/v1/backupstoragelocation.go @@ -0,0 +1,89 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/velero/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// BackupStorageLocationInformer provides access to a shared informer and lister for +// BackupStorageLocations. +type BackupStorageLocationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.BackupStorageLocationLister +} + +type backupStorageLocationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewBackupStorageLocationInformer constructs a new informer for BackupStorageLocation type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewBackupStorageLocationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredBackupStorageLocationInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredBackupStorageLocationInformer constructs a new informer for BackupStorageLocation type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredBackupStorageLocationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().BackupStorageLocations(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().BackupStorageLocations(namespace).Watch(options) + }, + }, + &velero_v1.BackupStorageLocation{}, + resyncPeriod, + indexers, + ) +} + +func (f *backupStorageLocationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredBackupStorageLocationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *backupStorageLocationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velero_v1.BackupStorageLocation{}, f.defaultInformer) +} + +func (f *backupStorageLocationInformer) Lister() v1.BackupStorageLocationLister { + return v1.NewBackupStorageLocationLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/informers/externalversions/velero/v1/deletebackuprequest.go b/pkg/generated/informers/externalversions/velero/v1/deletebackuprequest.go new file mode 100644 index 0000000000..15c47d061e --- /dev/null +++ b/pkg/generated/informers/externalversions/velero/v1/deletebackuprequest.go @@ -0,0 +1,89 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/velero/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// DeleteBackupRequestInformer provides access to a shared informer and lister for +// DeleteBackupRequests. +type DeleteBackupRequestInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.DeleteBackupRequestLister +} + +type deleteBackupRequestInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDeleteBackupRequestInformer constructs a new informer for DeleteBackupRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDeleteBackupRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeleteBackupRequestInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDeleteBackupRequestInformer constructs a new informer for DeleteBackupRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeleteBackupRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().DeleteBackupRequests(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().DeleteBackupRequests(namespace).Watch(options) + }, + }, + &velero_v1.DeleteBackupRequest{}, + resyncPeriod, + indexers, + ) +} + +func (f *deleteBackupRequestInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeleteBackupRequestInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *deleteBackupRequestInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velero_v1.DeleteBackupRequest{}, f.defaultInformer) +} + +func (f *deleteBackupRequestInformer) Lister() v1.DeleteBackupRequestLister { + return v1.NewDeleteBackupRequestLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/informers/externalversions/velero/v1/downloadrequest.go b/pkg/generated/informers/externalversions/velero/v1/downloadrequest.go new file mode 100644 index 0000000000..9777622a9d --- /dev/null +++ b/pkg/generated/informers/externalversions/velero/v1/downloadrequest.go @@ -0,0 +1,89 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/velero/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// DownloadRequestInformer provides access to a shared informer and lister for +// DownloadRequests. +type DownloadRequestInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.DownloadRequestLister +} + +type downloadRequestInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDownloadRequestInformer constructs a new informer for DownloadRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDownloadRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDownloadRequestInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDownloadRequestInformer constructs a new informer for DownloadRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDownloadRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().DownloadRequests(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().DownloadRequests(namespace).Watch(options) + }, + }, + &velero_v1.DownloadRequest{}, + resyncPeriod, + indexers, + ) +} + +func (f *downloadRequestInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDownloadRequestInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *downloadRequestInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velero_v1.DownloadRequest{}, f.defaultInformer) +} + +func (f *downloadRequestInformer) Lister() v1.DownloadRequestLister { + return v1.NewDownloadRequestLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/informers/externalversions/velero/v1/interface.go b/pkg/generated/informers/externalversions/velero/v1/interface.go new file mode 100644 index 0000000000..3436d9bdd3 --- /dev/null +++ b/pkg/generated/informers/externalversions/velero/v1/interface.go @@ -0,0 +1,115 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Backups returns a BackupInformer. + Backups() BackupInformer + // BackupStorageLocations returns a BackupStorageLocationInformer. + BackupStorageLocations() BackupStorageLocationInformer + // DeleteBackupRequests returns a DeleteBackupRequestInformer. + DeleteBackupRequests() DeleteBackupRequestInformer + // DownloadRequests returns a DownloadRequestInformer. + DownloadRequests() DownloadRequestInformer + // PodVolumeBackups returns a PodVolumeBackupInformer. + PodVolumeBackups() PodVolumeBackupInformer + // PodVolumeRestores returns a PodVolumeRestoreInformer. + PodVolumeRestores() PodVolumeRestoreInformer + // ResticRepositories returns a ResticRepositoryInformer. + ResticRepositories() ResticRepositoryInformer + // Restores returns a RestoreInformer. + Restores() RestoreInformer + // Schedules returns a ScheduleInformer. + Schedules() ScheduleInformer + // ServerStatusRequests returns a ServerStatusRequestInformer. + ServerStatusRequests() ServerStatusRequestInformer + // VolumeSnapshotLocations returns a VolumeSnapshotLocationInformer. + VolumeSnapshotLocations() VolumeSnapshotLocationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Backups returns a BackupInformer. +func (v *version) Backups() BackupInformer { + return &backupInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// BackupStorageLocations returns a BackupStorageLocationInformer. +func (v *version) BackupStorageLocations() BackupStorageLocationInformer { + return &backupStorageLocationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// DeleteBackupRequests returns a DeleteBackupRequestInformer. +func (v *version) DeleteBackupRequests() DeleteBackupRequestInformer { + return &deleteBackupRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// DownloadRequests returns a DownloadRequestInformer. +func (v *version) DownloadRequests() DownloadRequestInformer { + return &downloadRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// PodVolumeBackups returns a PodVolumeBackupInformer. +func (v *version) PodVolumeBackups() PodVolumeBackupInformer { + return &podVolumeBackupInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// PodVolumeRestores returns a PodVolumeRestoreInformer. +func (v *version) PodVolumeRestores() PodVolumeRestoreInformer { + return &podVolumeRestoreInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ResticRepositories returns a ResticRepositoryInformer. +func (v *version) ResticRepositories() ResticRepositoryInformer { + return &resticRepositoryInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Restores returns a RestoreInformer. +func (v *version) Restores() RestoreInformer { + return &restoreInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Schedules returns a ScheduleInformer. +func (v *version) Schedules() ScheduleInformer { + return &scheduleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ServerStatusRequests returns a ServerStatusRequestInformer. +func (v *version) ServerStatusRequests() ServerStatusRequestInformer { + return &serverStatusRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// VolumeSnapshotLocations returns a VolumeSnapshotLocationInformer. +func (v *version) VolumeSnapshotLocations() VolumeSnapshotLocationInformer { + return &volumeSnapshotLocationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/pkg/generated/informers/externalversions/velero/v1/podvolumebackup.go b/pkg/generated/informers/externalversions/velero/v1/podvolumebackup.go new file mode 100644 index 0000000000..77d79e5dcc --- /dev/null +++ b/pkg/generated/informers/externalversions/velero/v1/podvolumebackup.go @@ -0,0 +1,89 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/velero/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// PodVolumeBackupInformer provides access to a shared informer and lister for +// PodVolumeBackups. +type PodVolumeBackupInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PodVolumeBackupLister +} + +type podVolumeBackupInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodVolumeBackupInformer constructs a new informer for PodVolumeBackup type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodVolumeBackupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodVolumeBackupInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodVolumeBackupInformer constructs a new informer for PodVolumeBackup type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodVolumeBackupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().PodVolumeBackups(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().PodVolumeBackups(namespace).Watch(options) + }, + }, + &velero_v1.PodVolumeBackup{}, + resyncPeriod, + indexers, + ) +} + +func (f *podVolumeBackupInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodVolumeBackupInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podVolumeBackupInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velero_v1.PodVolumeBackup{}, f.defaultInformer) +} + +func (f *podVolumeBackupInformer) Lister() v1.PodVolumeBackupLister { + return v1.NewPodVolumeBackupLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/informers/externalversions/velero/v1/podvolumerestore.go b/pkg/generated/informers/externalversions/velero/v1/podvolumerestore.go new file mode 100644 index 0000000000..48b77852fd --- /dev/null +++ b/pkg/generated/informers/externalversions/velero/v1/podvolumerestore.go @@ -0,0 +1,89 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/velero/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// PodVolumeRestoreInformer provides access to a shared informer and lister for +// PodVolumeRestores. +type PodVolumeRestoreInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PodVolumeRestoreLister +} + +type podVolumeRestoreInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodVolumeRestoreInformer constructs a new informer for PodVolumeRestore type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodVolumeRestoreInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodVolumeRestoreInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodVolumeRestoreInformer constructs a new informer for PodVolumeRestore type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodVolumeRestoreInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().PodVolumeRestores(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().PodVolumeRestores(namespace).Watch(options) + }, + }, + &velero_v1.PodVolumeRestore{}, + resyncPeriod, + indexers, + ) +} + +func (f *podVolumeRestoreInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodVolumeRestoreInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podVolumeRestoreInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velero_v1.PodVolumeRestore{}, f.defaultInformer) +} + +func (f *podVolumeRestoreInformer) Lister() v1.PodVolumeRestoreLister { + return v1.NewPodVolumeRestoreLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/informers/externalversions/velero/v1/resticrepository.go b/pkg/generated/informers/externalversions/velero/v1/resticrepository.go new file mode 100644 index 0000000000..524cbd66d9 --- /dev/null +++ b/pkg/generated/informers/externalversions/velero/v1/resticrepository.go @@ -0,0 +1,89 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/velero/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ResticRepositoryInformer provides access to a shared informer and lister for +// ResticRepositories. +type ResticRepositoryInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ResticRepositoryLister +} + +type resticRepositoryInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewResticRepositoryInformer constructs a new informer for ResticRepository type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResticRepositoryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResticRepositoryInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredResticRepositoryInformer constructs a new informer for ResticRepository type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResticRepositoryInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().ResticRepositories(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().ResticRepositories(namespace).Watch(options) + }, + }, + &velero_v1.ResticRepository{}, + resyncPeriod, + indexers, + ) +} + +func (f *resticRepositoryInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResticRepositoryInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resticRepositoryInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velero_v1.ResticRepository{}, f.defaultInformer) +} + +func (f *resticRepositoryInformer) Lister() v1.ResticRepositoryLister { + return v1.NewResticRepositoryLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/informers/externalversions/velero/v1/restore.go b/pkg/generated/informers/externalversions/velero/v1/restore.go new file mode 100644 index 0000000000..5f614a131d --- /dev/null +++ b/pkg/generated/informers/externalversions/velero/v1/restore.go @@ -0,0 +1,89 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/velero/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// RestoreInformer provides access to a shared informer and lister for +// Restores. +type RestoreInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.RestoreLister +} + +type restoreInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRestoreInformer constructs a new informer for Restore type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRestoreInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRestoreInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRestoreInformer constructs a new informer for Restore type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRestoreInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().Restores(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().Restores(namespace).Watch(options) + }, + }, + &velero_v1.Restore{}, + resyncPeriod, + indexers, + ) +} + +func (f *restoreInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRestoreInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *restoreInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velero_v1.Restore{}, f.defaultInformer) +} + +func (f *restoreInformer) Lister() v1.RestoreLister { + return v1.NewRestoreLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/informers/externalversions/velero/v1/schedule.go b/pkg/generated/informers/externalversions/velero/v1/schedule.go new file mode 100644 index 0000000000..8f898586bc --- /dev/null +++ b/pkg/generated/informers/externalversions/velero/v1/schedule.go @@ -0,0 +1,89 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/velero/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ScheduleInformer provides access to a shared informer and lister for +// Schedules. +type ScheduleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ScheduleLister +} + +type scheduleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewScheduleInformer constructs a new informer for Schedule type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewScheduleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredScheduleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredScheduleInformer constructs a new informer for Schedule type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredScheduleInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().Schedules(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().Schedules(namespace).Watch(options) + }, + }, + &velero_v1.Schedule{}, + resyncPeriod, + indexers, + ) +} + +func (f *scheduleInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredScheduleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *scheduleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velero_v1.Schedule{}, f.defaultInformer) +} + +func (f *scheduleInformer) Lister() v1.ScheduleLister { + return v1.NewScheduleLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/informers/externalversions/velero/v1/serverstatusrequest.go b/pkg/generated/informers/externalversions/velero/v1/serverstatusrequest.go new file mode 100644 index 0000000000..a4d7651108 --- /dev/null +++ b/pkg/generated/informers/externalversions/velero/v1/serverstatusrequest.go @@ -0,0 +1,89 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/velero/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// ServerStatusRequestInformer provides access to a shared informer and lister for +// ServerStatusRequests. +type ServerStatusRequestInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ServerStatusRequestLister +} + +type serverStatusRequestInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewServerStatusRequestInformer constructs a new informer for ServerStatusRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewServerStatusRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServerStatusRequestInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredServerStatusRequestInformer constructs a new informer for ServerStatusRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServerStatusRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().ServerStatusRequests(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().ServerStatusRequests(namespace).Watch(options) + }, + }, + &velero_v1.ServerStatusRequest{}, + resyncPeriod, + indexers, + ) +} + +func (f *serverStatusRequestInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServerStatusRequestInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *serverStatusRequestInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velero_v1.ServerStatusRequest{}, f.defaultInformer) +} + +func (f *serverStatusRequestInformer) Lister() v1.ServerStatusRequestLister { + return v1.NewServerStatusRequestLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/informers/externalversions/velero/v1/volumesnapshotlocation.go b/pkg/generated/informers/externalversions/velero/v1/volumesnapshotlocation.go new file mode 100644 index 0000000000..842e467a17 --- /dev/null +++ b/pkg/generated/informers/externalversions/velero/v1/volumesnapshotlocation.go @@ -0,0 +1,89 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + velero_v1 "github.com/heptio/velero/pkg/apis/velero/v1" + versioned "github.com/heptio/velero/pkg/generated/clientset/versioned" + internalinterfaces "github.com/heptio/velero/pkg/generated/informers/externalversions/internalinterfaces" + v1 "github.com/heptio/velero/pkg/generated/listers/velero/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// VolumeSnapshotLocationInformer provides access to a shared informer and lister for +// VolumeSnapshotLocations. +type VolumeSnapshotLocationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.VolumeSnapshotLocationLister +} + +type volumeSnapshotLocationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewVolumeSnapshotLocationInformer constructs a new informer for VolumeSnapshotLocation type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeSnapshotLocationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeSnapshotLocationInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeSnapshotLocationInformer constructs a new informer for VolumeSnapshotLocation type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeSnapshotLocationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().VolumeSnapshotLocations(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.VeleroV1().VolumeSnapshotLocations(namespace).Watch(options) + }, + }, + &velero_v1.VolumeSnapshotLocation{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeSnapshotLocationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeSnapshotLocationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeSnapshotLocationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&velero_v1.VolumeSnapshotLocation{}, f.defaultInformer) +} + +func (f *volumeSnapshotLocationInformer) Lister() v1.VolumeSnapshotLocationLister { + return v1.NewVolumeSnapshotLocationLister(f.Informer().GetIndexer()) +} diff --git a/pkg/generated/listers/ark/v1/backup.go b/pkg/generated/listers/ark/v1/backup.go index 7fc7ed7336..eb781200a5 100644 --- a/pkg/generated/listers/ark/v1/backup.go +++ b/pkg/generated/listers/ark/v1/backup.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/generated/listers/ark/v1/backupstoragelocation.go b/pkg/generated/listers/ark/v1/backupstoragelocation.go index 81cd509e54..cc322aa262 100644 --- a/pkg/generated/listers/ark/v1/backupstoragelocation.go +++ b/pkg/generated/listers/ark/v1/backupstoragelocation.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/generated/listers/ark/v1/deletebackuprequest.go b/pkg/generated/listers/ark/v1/deletebackuprequest.go index dfed02d929..1a2b6de45f 100644 --- a/pkg/generated/listers/ark/v1/deletebackuprequest.go +++ b/pkg/generated/listers/ark/v1/deletebackuprequest.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/generated/listers/ark/v1/downloadrequest.go b/pkg/generated/listers/ark/v1/downloadrequest.go index 6663fd3015..06564fe2d0 100644 --- a/pkg/generated/listers/ark/v1/downloadrequest.go +++ b/pkg/generated/listers/ark/v1/downloadrequest.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/generated/listers/ark/v1/podvolumebackup.go b/pkg/generated/listers/ark/v1/podvolumebackup.go index 75f52bbe03..e3264b955e 100644 --- a/pkg/generated/listers/ark/v1/podvolumebackup.go +++ b/pkg/generated/listers/ark/v1/podvolumebackup.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/generated/listers/ark/v1/podvolumerestore.go b/pkg/generated/listers/ark/v1/podvolumerestore.go index 952d6dee6f..319bc840e1 100644 --- a/pkg/generated/listers/ark/v1/podvolumerestore.go +++ b/pkg/generated/listers/ark/v1/podvolumerestore.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/generated/listers/ark/v1/resticrepository.go b/pkg/generated/listers/ark/v1/resticrepository.go index bed3330132..a933c6167d 100644 --- a/pkg/generated/listers/ark/v1/resticrepository.go +++ b/pkg/generated/listers/ark/v1/resticrepository.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/generated/listers/ark/v1/restore.go b/pkg/generated/listers/ark/v1/restore.go index be5e6be1be..bb1c0b7954 100644 --- a/pkg/generated/listers/ark/v1/restore.go +++ b/pkg/generated/listers/ark/v1/restore.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/generated/listers/ark/v1/schedule.go b/pkg/generated/listers/ark/v1/schedule.go index 25ab0ac45e..819f2b952b 100644 --- a/pkg/generated/listers/ark/v1/schedule.go +++ b/pkg/generated/listers/ark/v1/schedule.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/generated/listers/ark/v1/serverstatusrequest.go b/pkg/generated/listers/ark/v1/serverstatusrequest.go index ec2851acde..d4399249f7 100644 --- a/pkg/generated/listers/ark/v1/serverstatusrequest.go +++ b/pkg/generated/listers/ark/v1/serverstatusrequest.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/generated/listers/ark/v1/volumesnapshotlocation.go b/pkg/generated/listers/ark/v1/volumesnapshotlocation.go index e7b2227cf5..2f1cecacad 100644 --- a/pkg/generated/listers/ark/v1/volumesnapshotlocation.go +++ b/pkg/generated/listers/ark/v1/volumesnapshotlocation.go @@ -19,7 +19,7 @@ limitations under the License. package v1 import ( - v1 "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/ark/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" diff --git a/pkg/generated/listers/velero/v1/backup.go b/pkg/generated/listers/velero/v1/backup.go new file mode 100644 index 0000000000..b4b623c14b --- /dev/null +++ b/pkg/generated/listers/velero/v1/backup.go @@ -0,0 +1,94 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// BackupLister helps list Backups. +type BackupLister interface { + // List lists all Backups in the indexer. + List(selector labels.Selector) (ret []*v1.Backup, err error) + // Backups returns an object that can list and get Backups. + Backups(namespace string) BackupNamespaceLister + BackupListerExpansion +} + +// backupLister implements the BackupLister interface. +type backupLister struct { + indexer cache.Indexer +} + +// NewBackupLister returns a new BackupLister. +func NewBackupLister(indexer cache.Indexer) BackupLister { + return &backupLister{indexer: indexer} +} + +// List lists all Backups in the indexer. +func (s *backupLister) List(selector labels.Selector) (ret []*v1.Backup, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Backup)) + }) + return ret, err +} + +// Backups returns an object that can list and get Backups. +func (s *backupLister) Backups(namespace string) BackupNamespaceLister { + return backupNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// BackupNamespaceLister helps list and get Backups. +type BackupNamespaceLister interface { + // List lists all Backups in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Backup, err error) + // Get retrieves the Backup from the indexer for a given namespace and name. + Get(name string) (*v1.Backup, error) + BackupNamespaceListerExpansion +} + +// backupNamespaceLister implements the BackupNamespaceLister +// interface. +type backupNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Backups in the indexer for a given namespace. +func (s backupNamespaceLister) List(selector labels.Selector) (ret []*v1.Backup, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Backup)) + }) + return ret, err +} + +// Get retrieves the Backup from the indexer for a given namespace and name. +func (s backupNamespaceLister) Get(name string) (*v1.Backup, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("backup"), name) + } + return obj.(*v1.Backup), nil +} diff --git a/pkg/generated/listers/velero/v1/backupstoragelocation.go b/pkg/generated/listers/velero/v1/backupstoragelocation.go new file mode 100644 index 0000000000..9a727ee2be --- /dev/null +++ b/pkg/generated/listers/velero/v1/backupstoragelocation.go @@ -0,0 +1,94 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// BackupStorageLocationLister helps list BackupStorageLocations. +type BackupStorageLocationLister interface { + // List lists all BackupStorageLocations in the indexer. + List(selector labels.Selector) (ret []*v1.BackupStorageLocation, err error) + // BackupStorageLocations returns an object that can list and get BackupStorageLocations. + BackupStorageLocations(namespace string) BackupStorageLocationNamespaceLister + BackupStorageLocationListerExpansion +} + +// backupStorageLocationLister implements the BackupStorageLocationLister interface. +type backupStorageLocationLister struct { + indexer cache.Indexer +} + +// NewBackupStorageLocationLister returns a new BackupStorageLocationLister. +func NewBackupStorageLocationLister(indexer cache.Indexer) BackupStorageLocationLister { + return &backupStorageLocationLister{indexer: indexer} +} + +// List lists all BackupStorageLocations in the indexer. +func (s *backupStorageLocationLister) List(selector labels.Selector) (ret []*v1.BackupStorageLocation, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.BackupStorageLocation)) + }) + return ret, err +} + +// BackupStorageLocations returns an object that can list and get BackupStorageLocations. +func (s *backupStorageLocationLister) BackupStorageLocations(namespace string) BackupStorageLocationNamespaceLister { + return backupStorageLocationNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// BackupStorageLocationNamespaceLister helps list and get BackupStorageLocations. +type BackupStorageLocationNamespaceLister interface { + // List lists all BackupStorageLocations in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.BackupStorageLocation, err error) + // Get retrieves the BackupStorageLocation from the indexer for a given namespace and name. + Get(name string) (*v1.BackupStorageLocation, error) + BackupStorageLocationNamespaceListerExpansion +} + +// backupStorageLocationNamespaceLister implements the BackupStorageLocationNamespaceLister +// interface. +type backupStorageLocationNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all BackupStorageLocations in the indexer for a given namespace. +func (s backupStorageLocationNamespaceLister) List(selector labels.Selector) (ret []*v1.BackupStorageLocation, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.BackupStorageLocation)) + }) + return ret, err +} + +// Get retrieves the BackupStorageLocation from the indexer for a given namespace and name. +func (s backupStorageLocationNamespaceLister) Get(name string) (*v1.BackupStorageLocation, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("backupstoragelocation"), name) + } + return obj.(*v1.BackupStorageLocation), nil +} diff --git a/pkg/generated/listers/velero/v1/deletebackuprequest.go b/pkg/generated/listers/velero/v1/deletebackuprequest.go new file mode 100644 index 0000000000..48e4d52854 --- /dev/null +++ b/pkg/generated/listers/velero/v1/deletebackuprequest.go @@ -0,0 +1,94 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DeleteBackupRequestLister helps list DeleteBackupRequests. +type DeleteBackupRequestLister interface { + // List lists all DeleteBackupRequests in the indexer. + List(selector labels.Selector) (ret []*v1.DeleteBackupRequest, err error) + // DeleteBackupRequests returns an object that can list and get DeleteBackupRequests. + DeleteBackupRequests(namespace string) DeleteBackupRequestNamespaceLister + DeleteBackupRequestListerExpansion +} + +// deleteBackupRequestLister implements the DeleteBackupRequestLister interface. +type deleteBackupRequestLister struct { + indexer cache.Indexer +} + +// NewDeleteBackupRequestLister returns a new DeleteBackupRequestLister. +func NewDeleteBackupRequestLister(indexer cache.Indexer) DeleteBackupRequestLister { + return &deleteBackupRequestLister{indexer: indexer} +} + +// List lists all DeleteBackupRequests in the indexer. +func (s *deleteBackupRequestLister) List(selector labels.Selector) (ret []*v1.DeleteBackupRequest, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.DeleteBackupRequest)) + }) + return ret, err +} + +// DeleteBackupRequests returns an object that can list and get DeleteBackupRequests. +func (s *deleteBackupRequestLister) DeleteBackupRequests(namespace string) DeleteBackupRequestNamespaceLister { + return deleteBackupRequestNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DeleteBackupRequestNamespaceLister helps list and get DeleteBackupRequests. +type DeleteBackupRequestNamespaceLister interface { + // List lists all DeleteBackupRequests in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.DeleteBackupRequest, err error) + // Get retrieves the DeleteBackupRequest from the indexer for a given namespace and name. + Get(name string) (*v1.DeleteBackupRequest, error) + DeleteBackupRequestNamespaceListerExpansion +} + +// deleteBackupRequestNamespaceLister implements the DeleteBackupRequestNamespaceLister +// interface. +type deleteBackupRequestNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all DeleteBackupRequests in the indexer for a given namespace. +func (s deleteBackupRequestNamespaceLister) List(selector labels.Selector) (ret []*v1.DeleteBackupRequest, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.DeleteBackupRequest)) + }) + return ret, err +} + +// Get retrieves the DeleteBackupRequest from the indexer for a given namespace and name. +func (s deleteBackupRequestNamespaceLister) Get(name string) (*v1.DeleteBackupRequest, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("deletebackuprequest"), name) + } + return obj.(*v1.DeleteBackupRequest), nil +} diff --git a/pkg/generated/listers/velero/v1/downloadrequest.go b/pkg/generated/listers/velero/v1/downloadrequest.go new file mode 100644 index 0000000000..bc64dcb020 --- /dev/null +++ b/pkg/generated/listers/velero/v1/downloadrequest.go @@ -0,0 +1,94 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DownloadRequestLister helps list DownloadRequests. +type DownloadRequestLister interface { + // List lists all DownloadRequests in the indexer. + List(selector labels.Selector) (ret []*v1.DownloadRequest, err error) + // DownloadRequests returns an object that can list and get DownloadRequests. + DownloadRequests(namespace string) DownloadRequestNamespaceLister + DownloadRequestListerExpansion +} + +// downloadRequestLister implements the DownloadRequestLister interface. +type downloadRequestLister struct { + indexer cache.Indexer +} + +// NewDownloadRequestLister returns a new DownloadRequestLister. +func NewDownloadRequestLister(indexer cache.Indexer) DownloadRequestLister { + return &downloadRequestLister{indexer: indexer} +} + +// List lists all DownloadRequests in the indexer. +func (s *downloadRequestLister) List(selector labels.Selector) (ret []*v1.DownloadRequest, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.DownloadRequest)) + }) + return ret, err +} + +// DownloadRequests returns an object that can list and get DownloadRequests. +func (s *downloadRequestLister) DownloadRequests(namespace string) DownloadRequestNamespaceLister { + return downloadRequestNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DownloadRequestNamespaceLister helps list and get DownloadRequests. +type DownloadRequestNamespaceLister interface { + // List lists all DownloadRequests in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.DownloadRequest, err error) + // Get retrieves the DownloadRequest from the indexer for a given namespace and name. + Get(name string) (*v1.DownloadRequest, error) + DownloadRequestNamespaceListerExpansion +} + +// downloadRequestNamespaceLister implements the DownloadRequestNamespaceLister +// interface. +type downloadRequestNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all DownloadRequests in the indexer for a given namespace. +func (s downloadRequestNamespaceLister) List(selector labels.Selector) (ret []*v1.DownloadRequest, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.DownloadRequest)) + }) + return ret, err +} + +// Get retrieves the DownloadRequest from the indexer for a given namespace and name. +func (s downloadRequestNamespaceLister) Get(name string) (*v1.DownloadRequest, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("downloadrequest"), name) + } + return obj.(*v1.DownloadRequest), nil +} diff --git a/pkg/generated/listers/velero/v1/expansion_generated.go b/pkg/generated/listers/velero/v1/expansion_generated.go new file mode 100644 index 0000000000..018228095e --- /dev/null +++ b/pkg/generated/listers/velero/v1/expansion_generated.go @@ -0,0 +1,107 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// BackupListerExpansion allows custom methods to be added to +// BackupLister. +type BackupListerExpansion interface{} + +// BackupNamespaceListerExpansion allows custom methods to be added to +// BackupNamespaceLister. +type BackupNamespaceListerExpansion interface{} + +// BackupStorageLocationListerExpansion allows custom methods to be added to +// BackupStorageLocationLister. +type BackupStorageLocationListerExpansion interface{} + +// BackupStorageLocationNamespaceListerExpansion allows custom methods to be added to +// BackupStorageLocationNamespaceLister. +type BackupStorageLocationNamespaceListerExpansion interface{} + +// DeleteBackupRequestListerExpansion allows custom methods to be added to +// DeleteBackupRequestLister. +type DeleteBackupRequestListerExpansion interface{} + +// DeleteBackupRequestNamespaceListerExpansion allows custom methods to be added to +// DeleteBackupRequestNamespaceLister. +type DeleteBackupRequestNamespaceListerExpansion interface{} + +// DownloadRequestListerExpansion allows custom methods to be added to +// DownloadRequestLister. +type DownloadRequestListerExpansion interface{} + +// DownloadRequestNamespaceListerExpansion allows custom methods to be added to +// DownloadRequestNamespaceLister. +type DownloadRequestNamespaceListerExpansion interface{} + +// PodVolumeBackupListerExpansion allows custom methods to be added to +// PodVolumeBackupLister. +type PodVolumeBackupListerExpansion interface{} + +// PodVolumeBackupNamespaceListerExpansion allows custom methods to be added to +// PodVolumeBackupNamespaceLister. +type PodVolumeBackupNamespaceListerExpansion interface{} + +// PodVolumeRestoreListerExpansion allows custom methods to be added to +// PodVolumeRestoreLister. +type PodVolumeRestoreListerExpansion interface{} + +// PodVolumeRestoreNamespaceListerExpansion allows custom methods to be added to +// PodVolumeRestoreNamespaceLister. +type PodVolumeRestoreNamespaceListerExpansion interface{} + +// ResticRepositoryListerExpansion allows custom methods to be added to +// ResticRepositoryLister. +type ResticRepositoryListerExpansion interface{} + +// ResticRepositoryNamespaceListerExpansion allows custom methods to be added to +// ResticRepositoryNamespaceLister. +type ResticRepositoryNamespaceListerExpansion interface{} + +// RestoreListerExpansion allows custom methods to be added to +// RestoreLister. +type RestoreListerExpansion interface{} + +// RestoreNamespaceListerExpansion allows custom methods to be added to +// RestoreNamespaceLister. +type RestoreNamespaceListerExpansion interface{} + +// ScheduleListerExpansion allows custom methods to be added to +// ScheduleLister. +type ScheduleListerExpansion interface{} + +// ScheduleNamespaceListerExpansion allows custom methods to be added to +// ScheduleNamespaceLister. +type ScheduleNamespaceListerExpansion interface{} + +// ServerStatusRequestListerExpansion allows custom methods to be added to +// ServerStatusRequestLister. +type ServerStatusRequestListerExpansion interface{} + +// ServerStatusRequestNamespaceListerExpansion allows custom methods to be added to +// ServerStatusRequestNamespaceLister. +type ServerStatusRequestNamespaceListerExpansion interface{} + +// VolumeSnapshotLocationListerExpansion allows custom methods to be added to +// VolumeSnapshotLocationLister. +type VolumeSnapshotLocationListerExpansion interface{} + +// VolumeSnapshotLocationNamespaceListerExpansion allows custom methods to be added to +// VolumeSnapshotLocationNamespaceLister. +type VolumeSnapshotLocationNamespaceListerExpansion interface{} diff --git a/pkg/generated/listers/velero/v1/podvolumebackup.go b/pkg/generated/listers/velero/v1/podvolumebackup.go new file mode 100644 index 0000000000..8599016b3c --- /dev/null +++ b/pkg/generated/listers/velero/v1/podvolumebackup.go @@ -0,0 +1,94 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodVolumeBackupLister helps list PodVolumeBackups. +type PodVolumeBackupLister interface { + // List lists all PodVolumeBackups in the indexer. + List(selector labels.Selector) (ret []*v1.PodVolumeBackup, err error) + // PodVolumeBackups returns an object that can list and get PodVolumeBackups. + PodVolumeBackups(namespace string) PodVolumeBackupNamespaceLister + PodVolumeBackupListerExpansion +} + +// podVolumeBackupLister implements the PodVolumeBackupLister interface. +type podVolumeBackupLister struct { + indexer cache.Indexer +} + +// NewPodVolumeBackupLister returns a new PodVolumeBackupLister. +func NewPodVolumeBackupLister(indexer cache.Indexer) PodVolumeBackupLister { + return &podVolumeBackupLister{indexer: indexer} +} + +// List lists all PodVolumeBackups in the indexer. +func (s *podVolumeBackupLister) List(selector labels.Selector) (ret []*v1.PodVolumeBackup, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PodVolumeBackup)) + }) + return ret, err +} + +// PodVolumeBackups returns an object that can list and get PodVolumeBackups. +func (s *podVolumeBackupLister) PodVolumeBackups(namespace string) PodVolumeBackupNamespaceLister { + return podVolumeBackupNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodVolumeBackupNamespaceLister helps list and get PodVolumeBackups. +type PodVolumeBackupNamespaceLister interface { + // List lists all PodVolumeBackups in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.PodVolumeBackup, err error) + // Get retrieves the PodVolumeBackup from the indexer for a given namespace and name. + Get(name string) (*v1.PodVolumeBackup, error) + PodVolumeBackupNamespaceListerExpansion +} + +// podVolumeBackupNamespaceLister implements the PodVolumeBackupNamespaceLister +// interface. +type podVolumeBackupNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PodVolumeBackups in the indexer for a given namespace. +func (s podVolumeBackupNamespaceLister) List(selector labels.Selector) (ret []*v1.PodVolumeBackup, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PodVolumeBackup)) + }) + return ret, err +} + +// Get retrieves the PodVolumeBackup from the indexer for a given namespace and name. +func (s podVolumeBackupNamespaceLister) Get(name string) (*v1.PodVolumeBackup, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("podvolumebackup"), name) + } + return obj.(*v1.PodVolumeBackup), nil +} diff --git a/pkg/generated/listers/velero/v1/podvolumerestore.go b/pkg/generated/listers/velero/v1/podvolumerestore.go new file mode 100644 index 0000000000..f5ec053e8f --- /dev/null +++ b/pkg/generated/listers/velero/v1/podvolumerestore.go @@ -0,0 +1,94 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodVolumeRestoreLister helps list PodVolumeRestores. +type PodVolumeRestoreLister interface { + // List lists all PodVolumeRestores in the indexer. + List(selector labels.Selector) (ret []*v1.PodVolumeRestore, err error) + // PodVolumeRestores returns an object that can list and get PodVolumeRestores. + PodVolumeRestores(namespace string) PodVolumeRestoreNamespaceLister + PodVolumeRestoreListerExpansion +} + +// podVolumeRestoreLister implements the PodVolumeRestoreLister interface. +type podVolumeRestoreLister struct { + indexer cache.Indexer +} + +// NewPodVolumeRestoreLister returns a new PodVolumeRestoreLister. +func NewPodVolumeRestoreLister(indexer cache.Indexer) PodVolumeRestoreLister { + return &podVolumeRestoreLister{indexer: indexer} +} + +// List lists all PodVolumeRestores in the indexer. +func (s *podVolumeRestoreLister) List(selector labels.Selector) (ret []*v1.PodVolumeRestore, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PodVolumeRestore)) + }) + return ret, err +} + +// PodVolumeRestores returns an object that can list and get PodVolumeRestores. +func (s *podVolumeRestoreLister) PodVolumeRestores(namespace string) PodVolumeRestoreNamespaceLister { + return podVolumeRestoreNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodVolumeRestoreNamespaceLister helps list and get PodVolumeRestores. +type PodVolumeRestoreNamespaceLister interface { + // List lists all PodVolumeRestores in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.PodVolumeRestore, err error) + // Get retrieves the PodVolumeRestore from the indexer for a given namespace and name. + Get(name string) (*v1.PodVolumeRestore, error) + PodVolumeRestoreNamespaceListerExpansion +} + +// podVolumeRestoreNamespaceLister implements the PodVolumeRestoreNamespaceLister +// interface. +type podVolumeRestoreNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PodVolumeRestores in the indexer for a given namespace. +func (s podVolumeRestoreNamespaceLister) List(selector labels.Selector) (ret []*v1.PodVolumeRestore, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PodVolumeRestore)) + }) + return ret, err +} + +// Get retrieves the PodVolumeRestore from the indexer for a given namespace and name. +func (s podVolumeRestoreNamespaceLister) Get(name string) (*v1.PodVolumeRestore, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("podvolumerestore"), name) + } + return obj.(*v1.PodVolumeRestore), nil +} diff --git a/pkg/generated/listers/velero/v1/resticrepository.go b/pkg/generated/listers/velero/v1/resticrepository.go new file mode 100644 index 0000000000..926537c55a --- /dev/null +++ b/pkg/generated/listers/velero/v1/resticrepository.go @@ -0,0 +1,94 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ResticRepositoryLister helps list ResticRepositories. +type ResticRepositoryLister interface { + // List lists all ResticRepositories in the indexer. + List(selector labels.Selector) (ret []*v1.ResticRepository, err error) + // ResticRepositories returns an object that can list and get ResticRepositories. + ResticRepositories(namespace string) ResticRepositoryNamespaceLister + ResticRepositoryListerExpansion +} + +// resticRepositoryLister implements the ResticRepositoryLister interface. +type resticRepositoryLister struct { + indexer cache.Indexer +} + +// NewResticRepositoryLister returns a new ResticRepositoryLister. +func NewResticRepositoryLister(indexer cache.Indexer) ResticRepositoryLister { + return &resticRepositoryLister{indexer: indexer} +} + +// List lists all ResticRepositories in the indexer. +func (s *resticRepositoryLister) List(selector labels.Selector) (ret []*v1.ResticRepository, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ResticRepository)) + }) + return ret, err +} + +// ResticRepositories returns an object that can list and get ResticRepositories. +func (s *resticRepositoryLister) ResticRepositories(namespace string) ResticRepositoryNamespaceLister { + return resticRepositoryNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ResticRepositoryNamespaceLister helps list and get ResticRepositories. +type ResticRepositoryNamespaceLister interface { + // List lists all ResticRepositories in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ResticRepository, err error) + // Get retrieves the ResticRepository from the indexer for a given namespace and name. + Get(name string) (*v1.ResticRepository, error) + ResticRepositoryNamespaceListerExpansion +} + +// resticRepositoryNamespaceLister implements the ResticRepositoryNamespaceLister +// interface. +type resticRepositoryNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ResticRepositories in the indexer for a given namespace. +func (s resticRepositoryNamespaceLister) List(selector labels.Selector) (ret []*v1.ResticRepository, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ResticRepository)) + }) + return ret, err +} + +// Get retrieves the ResticRepository from the indexer for a given namespace and name. +func (s resticRepositoryNamespaceLister) Get(name string) (*v1.ResticRepository, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("resticrepository"), name) + } + return obj.(*v1.ResticRepository), nil +} diff --git a/pkg/generated/listers/velero/v1/restore.go b/pkg/generated/listers/velero/v1/restore.go new file mode 100644 index 0000000000..641576851a --- /dev/null +++ b/pkg/generated/listers/velero/v1/restore.go @@ -0,0 +1,94 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RestoreLister helps list Restores. +type RestoreLister interface { + // List lists all Restores in the indexer. + List(selector labels.Selector) (ret []*v1.Restore, err error) + // Restores returns an object that can list and get Restores. + Restores(namespace string) RestoreNamespaceLister + RestoreListerExpansion +} + +// restoreLister implements the RestoreLister interface. +type restoreLister struct { + indexer cache.Indexer +} + +// NewRestoreLister returns a new RestoreLister. +func NewRestoreLister(indexer cache.Indexer) RestoreLister { + return &restoreLister{indexer: indexer} +} + +// List lists all Restores in the indexer. +func (s *restoreLister) List(selector labels.Selector) (ret []*v1.Restore, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Restore)) + }) + return ret, err +} + +// Restores returns an object that can list and get Restores. +func (s *restoreLister) Restores(namespace string) RestoreNamespaceLister { + return restoreNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RestoreNamespaceLister helps list and get Restores. +type RestoreNamespaceLister interface { + // List lists all Restores in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Restore, err error) + // Get retrieves the Restore from the indexer for a given namespace and name. + Get(name string) (*v1.Restore, error) + RestoreNamespaceListerExpansion +} + +// restoreNamespaceLister implements the RestoreNamespaceLister +// interface. +type restoreNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Restores in the indexer for a given namespace. +func (s restoreNamespaceLister) List(selector labels.Selector) (ret []*v1.Restore, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Restore)) + }) + return ret, err +} + +// Get retrieves the Restore from the indexer for a given namespace and name. +func (s restoreNamespaceLister) Get(name string) (*v1.Restore, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("restore"), name) + } + return obj.(*v1.Restore), nil +} diff --git a/pkg/generated/listers/velero/v1/schedule.go b/pkg/generated/listers/velero/v1/schedule.go new file mode 100644 index 0000000000..ce2a5af27a --- /dev/null +++ b/pkg/generated/listers/velero/v1/schedule.go @@ -0,0 +1,94 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ScheduleLister helps list Schedules. +type ScheduleLister interface { + // List lists all Schedules in the indexer. + List(selector labels.Selector) (ret []*v1.Schedule, err error) + // Schedules returns an object that can list and get Schedules. + Schedules(namespace string) ScheduleNamespaceLister + ScheduleListerExpansion +} + +// scheduleLister implements the ScheduleLister interface. +type scheduleLister struct { + indexer cache.Indexer +} + +// NewScheduleLister returns a new ScheduleLister. +func NewScheduleLister(indexer cache.Indexer) ScheduleLister { + return &scheduleLister{indexer: indexer} +} + +// List lists all Schedules in the indexer. +func (s *scheduleLister) List(selector labels.Selector) (ret []*v1.Schedule, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Schedule)) + }) + return ret, err +} + +// Schedules returns an object that can list and get Schedules. +func (s *scheduleLister) Schedules(namespace string) ScheduleNamespaceLister { + return scheduleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ScheduleNamespaceLister helps list and get Schedules. +type ScheduleNamespaceLister interface { + // List lists all Schedules in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Schedule, err error) + // Get retrieves the Schedule from the indexer for a given namespace and name. + Get(name string) (*v1.Schedule, error) + ScheduleNamespaceListerExpansion +} + +// scheduleNamespaceLister implements the ScheduleNamespaceLister +// interface. +type scheduleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Schedules in the indexer for a given namespace. +func (s scheduleNamespaceLister) List(selector labels.Selector) (ret []*v1.Schedule, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Schedule)) + }) + return ret, err +} + +// Get retrieves the Schedule from the indexer for a given namespace and name. +func (s scheduleNamespaceLister) Get(name string) (*v1.Schedule, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("schedule"), name) + } + return obj.(*v1.Schedule), nil +} diff --git a/pkg/generated/listers/velero/v1/serverstatusrequest.go b/pkg/generated/listers/velero/v1/serverstatusrequest.go new file mode 100644 index 0000000000..e58f58367e --- /dev/null +++ b/pkg/generated/listers/velero/v1/serverstatusrequest.go @@ -0,0 +1,94 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ServerStatusRequestLister helps list ServerStatusRequests. +type ServerStatusRequestLister interface { + // List lists all ServerStatusRequests in the indexer. + List(selector labels.Selector) (ret []*v1.ServerStatusRequest, err error) + // ServerStatusRequests returns an object that can list and get ServerStatusRequests. + ServerStatusRequests(namespace string) ServerStatusRequestNamespaceLister + ServerStatusRequestListerExpansion +} + +// serverStatusRequestLister implements the ServerStatusRequestLister interface. +type serverStatusRequestLister struct { + indexer cache.Indexer +} + +// NewServerStatusRequestLister returns a new ServerStatusRequestLister. +func NewServerStatusRequestLister(indexer cache.Indexer) ServerStatusRequestLister { + return &serverStatusRequestLister{indexer: indexer} +} + +// List lists all ServerStatusRequests in the indexer. +func (s *serverStatusRequestLister) List(selector labels.Selector) (ret []*v1.ServerStatusRequest, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ServerStatusRequest)) + }) + return ret, err +} + +// ServerStatusRequests returns an object that can list and get ServerStatusRequests. +func (s *serverStatusRequestLister) ServerStatusRequests(namespace string) ServerStatusRequestNamespaceLister { + return serverStatusRequestNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ServerStatusRequestNamespaceLister helps list and get ServerStatusRequests. +type ServerStatusRequestNamespaceLister interface { + // List lists all ServerStatusRequests in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ServerStatusRequest, err error) + // Get retrieves the ServerStatusRequest from the indexer for a given namespace and name. + Get(name string) (*v1.ServerStatusRequest, error) + ServerStatusRequestNamespaceListerExpansion +} + +// serverStatusRequestNamespaceLister implements the ServerStatusRequestNamespaceLister +// interface. +type serverStatusRequestNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ServerStatusRequests in the indexer for a given namespace. +func (s serverStatusRequestNamespaceLister) List(selector labels.Selector) (ret []*v1.ServerStatusRequest, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ServerStatusRequest)) + }) + return ret, err +} + +// Get retrieves the ServerStatusRequest from the indexer for a given namespace and name. +func (s serverStatusRequestNamespaceLister) Get(name string) (*v1.ServerStatusRequest, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("serverstatusrequest"), name) + } + return obj.(*v1.ServerStatusRequest), nil +} diff --git a/pkg/generated/listers/velero/v1/volumesnapshotlocation.go b/pkg/generated/listers/velero/v1/volumesnapshotlocation.go new file mode 100644 index 0000000000..912f10eb64 --- /dev/null +++ b/pkg/generated/listers/velero/v1/volumesnapshotlocation.go @@ -0,0 +1,94 @@ +/* +Copyright the Heptio Ark contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VolumeSnapshotLocationLister helps list VolumeSnapshotLocations. +type VolumeSnapshotLocationLister interface { + // List lists all VolumeSnapshotLocations in the indexer. + List(selector labels.Selector) (ret []*v1.VolumeSnapshotLocation, err error) + // VolumeSnapshotLocations returns an object that can list and get VolumeSnapshotLocations. + VolumeSnapshotLocations(namespace string) VolumeSnapshotLocationNamespaceLister + VolumeSnapshotLocationListerExpansion +} + +// volumeSnapshotLocationLister implements the VolumeSnapshotLocationLister interface. +type volumeSnapshotLocationLister struct { + indexer cache.Indexer +} + +// NewVolumeSnapshotLocationLister returns a new VolumeSnapshotLocationLister. +func NewVolumeSnapshotLocationLister(indexer cache.Indexer) VolumeSnapshotLocationLister { + return &volumeSnapshotLocationLister{indexer: indexer} +} + +// List lists all VolumeSnapshotLocations in the indexer. +func (s *volumeSnapshotLocationLister) List(selector labels.Selector) (ret []*v1.VolumeSnapshotLocation, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.VolumeSnapshotLocation)) + }) + return ret, err +} + +// VolumeSnapshotLocations returns an object that can list and get VolumeSnapshotLocations. +func (s *volumeSnapshotLocationLister) VolumeSnapshotLocations(namespace string) VolumeSnapshotLocationNamespaceLister { + return volumeSnapshotLocationNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// VolumeSnapshotLocationNamespaceLister helps list and get VolumeSnapshotLocations. +type VolumeSnapshotLocationNamespaceLister interface { + // List lists all VolumeSnapshotLocations in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.VolumeSnapshotLocation, err error) + // Get retrieves the VolumeSnapshotLocation from the indexer for a given namespace and name. + Get(name string) (*v1.VolumeSnapshotLocation, error) + VolumeSnapshotLocationNamespaceListerExpansion +} + +// volumeSnapshotLocationNamespaceLister implements the VolumeSnapshotLocationNamespaceLister +// interface. +type volumeSnapshotLocationNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all VolumeSnapshotLocations in the indexer for a given namespace. +func (s volumeSnapshotLocationNamespaceLister) List(selector labels.Selector) (ret []*v1.VolumeSnapshotLocation, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.VolumeSnapshotLocation)) + }) + return ret, err +} + +// Get retrieves the VolumeSnapshotLocation from the indexer for a given namespace and name. +func (s volumeSnapshotLocationNamespaceLister) Get(name string) (*v1.VolumeSnapshotLocation, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("volumesnapshotlocation"), name) + } + return obj.(*v1.VolumeSnapshotLocation), nil +} diff --git a/pkg/install/crd.go b/pkg/install/crd.go index 1ed643ce24..3c77601f03 100644 --- a/pkg/install/crd.go +++ b/pkg/install/crd.go @@ -22,14 +22,14 @@ import ( apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - arkv1 "github.com/heptio/ark/pkg/apis/ark/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" ) -// CRDs returns a list of the CRD types for all of the required Ark CRDs +// CRDs returns a list of the CRD types for all of the required Velero CRDs func CRDs() []*apiextv1beta1.CustomResourceDefinition { var crds []*apiextv1beta1.CustomResourceDefinition - for kind, typeInfo := range arkv1.CustomResources() { + for kind, typeInfo := range velerov1api.CustomResources() { crds = append(crds, crd(kind, typeInfo.PluralName)) } @@ -39,11 +39,11 @@ func CRDs() []*apiextv1beta1.CustomResourceDefinition { func crd(kind, plural string) *apiextv1beta1.CustomResourceDefinition { return &apiextv1beta1.CustomResourceDefinition{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s.%s", plural, arkv1.GroupName), + Name: fmt.Sprintf("%s.%s", plural, velerov1api.GroupName), }, Spec: apiextv1beta1.CustomResourceDefinitionSpec{ - Group: arkv1.GroupName, - Version: arkv1.SchemeGroupVersion.Version, + Group: velerov1api.GroupName, + Version: velerov1api.SchemeGroupVersion.Version, Scope: apiextv1beta1.NamespaceScoped, Names: apiextv1beta1.CustomResourceDefinitionNames{ Plural: plural, diff --git a/pkg/install/daemonset.go b/pkg/install/daemonset.go index 9b42a51e4f..539e62e773 100644 --- a/pkg/install/daemonset.go +++ b/pkg/install/daemonset.go @@ -26,7 +26,7 @@ import ( func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet { c := &podTemplateConfig{ - image: "gcr.io/heptio-images/ark:latest", + image: "gcr.io/heptio-images/velero:latest", } for _, opt := range opts { @@ -55,7 +55,7 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet { }, }, Spec: corev1.PodSpec{ - ServiceAccountName: "ark", + ServiceAccountName: "velero", Volumes: []corev1.Volume{ { Name: "host-pods", @@ -87,7 +87,7 @@ func DaemonSet(namespace string, opts ...podTemplateOption) *appsv1.DaemonSet { }, }, { - Name: "HEPTIO_ARK_NAMESPACE", + Name: "VELERO_NAMESPACE", ValueFrom: &corev1.EnvVarSource{ FieldRef: &corev1.ObjectFieldSelector{ FieldPath: "metadata.namespace", diff --git a/pkg/install/deployment.go b/pkg/install/deployment.go index fd0907cb4c..54eb27539d 100644 --- a/pkg/install/deployment.go +++ b/pkg/install/deployment.go @@ -62,7 +62,7 @@ func WithEnvFromSecretKey(varName, secret, key string) podTemplateOption { func Deployment(namespace string, opts ...podTemplateOption) *appsv1beta1.Deployment { c := &podTemplateConfig{ - image: "gcr.io/heptio-images/ark:latest", + image: "gcr.io/heptio-images/velero:latest", } for _, opt := range opts { @@ -77,7 +77,7 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1beta1.Deploy } deployment := &appsv1beta1.Deployment{ - ObjectMeta: objectMeta(namespace, "ark"), + ObjectMeta: objectMeta(namespace, "velero"), Spec: appsv1beta1.DeploymentSpec{ Template: corev1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ @@ -86,15 +86,15 @@ func Deployment(namespace string, opts ...podTemplateOption) *appsv1beta1.Deploy }, Spec: corev1.PodSpec{ RestartPolicy: corev1.RestartPolicyAlways, - ServiceAccountName: "ark", + ServiceAccountName: "velero", Containers: []corev1.Container{ { - Name: "ark", + Name: "velero", Image: c.image, Ports: containerPorts(), ImagePullPolicy: pullPolicy, Command: []string{ - "/ark", + "/velero", }, Args: []string{ "server", diff --git a/pkg/install/resources.go b/pkg/install/resources.go index eb18a3d8b1..d9e56a1c35 100644 --- a/pkg/install/resources.go +++ b/pkg/install/resources.go @@ -24,7 +24,7 @@ import ( func labels() map[string]string { return map[string]string{ - "component": "ark", + "component": "velero", } } @@ -55,20 +55,20 @@ func objectMeta(namespace, name string) metav1.ObjectMeta { func ServiceAccount(namespace string) *corev1.ServiceAccount { return &corev1.ServiceAccount{ - ObjectMeta: objectMeta(namespace, "ark"), + ObjectMeta: objectMeta(namespace, "velero"), } } func ClusterRoleBinding(namespace string) *rbacv1beta1.ClusterRoleBinding { return &rbacv1beta1.ClusterRoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: "ark", + Name: "velero", }, Subjects: []rbacv1beta1.Subject{ { Kind: "ServiceAccount", Namespace: namespace, - Name: "ark", + Name: "velero", }, }, RoleRef: rbacv1beta1.RoleRef{ diff --git a/pkg/metrics/metrics.go b/pkg/metrics/metrics.go index 516efc6ff2..b2b081faa9 100644 --- a/pkg/metrics/metrics.go +++ b/pkg/metrics/metrics.go @@ -22,13 +22,13 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -// ServerMetrics contains Prometheus metrics for the Ark server. +// ServerMetrics contains Prometheus metrics for the Velero server. type ServerMetrics struct { metrics map[string]prometheus.Collector } const ( - metricNamespace = "ark" + metricNamespace = "velero" backupTarballSizeBytesGauge = "backup_tarball_size_bytes" backupAttemptTotal = "backup_attempt_total" backupSuccessTotal = "backup_success_total" @@ -46,6 +46,27 @@ const ( backupNameLabel = "backupName" secondsInMinute = 60.0 + + // ------------------------------------------------------------------- + // TODO: remove this code to remove the ark-namespaced metrics + legacyMetricNamespace = "ark" + + // These variables are used only as the keys into a map; the standard variable names above are what is rendered for Prometheus + // The Prometheus metric types themselves will take a namespace (ark or velero) and the metric name to construct the output that is scraped. + legacyBackupTarballSizeBytesGauge = "ark-backup_tarball_size_bytes" + legacyBackupAttemptTotal = "ark-backup_attempt_total" + legacyBackupSuccessTotal = "ark-backup_success_total" + legacyBackupFailureTotal = "ark-backup_failure_total" + legacyBackupDurationSeconds = "ark-backup_duration_seconds" + legacyRestoreAttemptTotal = "ark-restore_attempt_total" + legacyRestoreValidationFailedTotal = "ark-restore_validation_failed_total" + legacyRestoreSuccessTotal = "ark-restore_success_total" + legacyRestoreFailedTotal = "ark-restore_failed_total" + legacyVolumeSnapshotAttemptTotal = "ark-volume_snapshot_attempt_total" + legacyVolumeSnapshotSuccessTotal = "ark-volume_snapshot_success_total" + legacyVolumeSnapshotFailureTotal = "ark-volume_snapshot_failure_total" + // TODO: remove code above this comment + // ------------------------------------------------------------------- ) // NewServerMetrics returns new ServerMetrics @@ -159,6 +180,118 @@ func NewServerMetrics() *ServerMetrics { }, []string{scheduleLabel}, ), + // ------------------------------------------------------------------- + // Ark backwards compatibility code + // TODO: remove this code to drop the ark-namespaced metrics. + legacyBackupTarballSizeBytesGauge: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: legacyMetricNamespace, + Name: backupTarballSizeBytesGauge, + Help: "Size, in bytes, of a backup", + }, + []string{scheduleLabel}, + ), + legacyBackupAttemptTotal: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: legacyMetricNamespace, + Name: backupAttemptTotal, + Help: "Total number of attempted backups", + }, + []string{scheduleLabel}, + ), + legacyBackupSuccessTotal: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: legacyMetricNamespace, + Name: backupSuccessTotal, + Help: "Total number of successful backups", + }, + []string{scheduleLabel}, + ), + legacyBackupFailureTotal: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: legacyMetricNamespace, + Name: backupFailureTotal, + Help: "Total number of failed backups", + }, + []string{scheduleLabel}, + ), + legacyBackupDurationSeconds: prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Namespace: legacyMetricNamespace, + Name: backupDurationSeconds, + Help: "Time taken to complete backup, in seconds", + Buckets: []float64{ + toSeconds(1 * time.Minute), + toSeconds(5 * time.Minute), + toSeconds(10 * time.Minute), + toSeconds(15 * time.Minute), + toSeconds(30 * time.Minute), + toSeconds(1 * time.Hour), + toSeconds(2 * time.Hour), + toSeconds(3 * time.Hour), + toSeconds(4 * time.Hour), + }, + }, + []string{scheduleLabel}, + ), + legacyRestoreAttemptTotal: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: legacyMetricNamespace, + Name: restoreAttemptTotal, + Help: "Total number of attempted restores", + }, + []string{scheduleLabel}, + ), + legacyRestoreSuccessTotal: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: legacyMetricNamespace, + Name: restoreSuccessTotal, + Help: "Total number of successful restores", + }, + []string{scheduleLabel}, + ), + legacyRestoreFailedTotal: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: legacyMetricNamespace, + Name: restoreFailedTotal, + Help: "Total number of failed restores", + }, + []string{scheduleLabel}, + ), + legacyRestoreValidationFailedTotal: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: legacyMetricNamespace, + Name: restoreValidationFailedTotal, + Help: "Total number of failed restores failing validations", + }, + []string{scheduleLabel}, + ), + legacyVolumeSnapshotAttemptTotal: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: legacyMetricNamespace, + Name: volumeSnapshotAttemptTotal, + Help: "Total number of attempted volume snapshots", + }, + []string{scheduleLabel}, + ), + legacyVolumeSnapshotSuccessTotal: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: legacyMetricNamespace, + Name: volumeSnapshotSuccessTotal, + Help: "Total number of successful volume snapshots", + }, + []string{scheduleLabel}, + ), + legacyVolumeSnapshotFailureTotal: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: legacyMetricNamespace, + Name: volumeSnapshotFailureTotal, + Help: "Total number of failed volume snapshots", + }, + []string{scheduleLabel}, + ), + // TODO: remove code above this comment + // ------------------------------------------------------------------- }, } } @@ -202,6 +335,41 @@ func (m *ServerMetrics) InitSchedule(scheduleName string) { if c, ok := m.metrics[volumeSnapshotFailureTotal].(*prometheus.CounterVec); ok { c.WithLabelValues(scheduleName).Set(0) } + + // ------------------------------------------------------------------- + // TODO: remove this code to remove the ark-namespaced metrics + if c, ok := m.metrics[legacyBackupAttemptTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(scheduleName).Set(0) + } + if c, ok := m.metrics[legacyBackupSuccessTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(scheduleName).Set(0) + } + if c, ok := m.metrics[legacyBackupFailureTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(scheduleName).Set(0) + } + if c, ok := m.metrics[legacyRestoreAttemptTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(scheduleName).Set(0) + } + if c, ok := m.metrics[legacyRestoreFailedTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(scheduleName).Set(0) + } + if c, ok := m.metrics[legacyRestoreSuccessTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(scheduleName).Set(0) + } + if c, ok := m.metrics[legacyRestoreValidationFailedTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(scheduleName).Set(0) + } + if c, ok := m.metrics[legacyVolumeSnapshotSuccessTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(scheduleName).Set(0) + } + if c, ok := m.metrics[legacyVolumeSnapshotAttemptTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(scheduleName).Set(0) + } + if c, ok := m.metrics[legacyVolumeSnapshotFailureTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(scheduleName).Set(0) + } + // TODO: remove code above this comment + // ------------------------------------------------------------------- } // SetBackupTarballSizeBytesGauge records the size, in bytes, of a backup tarball. @@ -209,6 +377,13 @@ func (m *ServerMetrics) SetBackupTarballSizeBytesGauge(backupSchedule string, si if g, ok := m.metrics[backupTarballSizeBytesGauge].(*prometheus.GaugeVec); ok { g.WithLabelValues(backupSchedule).Set(float64(size)) } + // ------------------------------------------------------------------- + // TODO: remove this code to remove the ark-namespaced metrics + if g, ok := m.metrics[legacyBackupTarballSizeBytesGauge].(*prometheus.GaugeVec); ok { + g.WithLabelValues(backupSchedule).Set(float64(size)) + } + // TODO: remove code above this comment + // ------------------------------------------------------------------- } // RegisterBackupAttempt records an backup attempt. @@ -216,6 +391,13 @@ func (m *ServerMetrics) RegisterBackupAttempt(backupSchedule string) { if c, ok := m.metrics[backupAttemptTotal].(*prometheus.CounterVec); ok { c.WithLabelValues(backupSchedule).Inc() } + // ------------------------------------------------------------------- + // TODO: remove this code to remove the ark-namespaced metrics + if c, ok := m.metrics[legacyBackupAttemptTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(backupSchedule).Inc() + } + // TODO: remove code above this comment + // ------------------------------------------------------------------- } // RegisterBackupSuccess records a successful completion of a backup. @@ -223,6 +405,13 @@ func (m *ServerMetrics) RegisterBackupSuccess(backupSchedule string) { if c, ok := m.metrics[backupSuccessTotal].(*prometheus.CounterVec); ok { c.WithLabelValues(backupSchedule).Inc() } + // ------------------------------------------------------------------- + // TODO: remove this code to remove the ark-namespaced metrics + if c, ok := m.metrics[legacyBackupSuccessTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(backupSchedule).Inc() + } + // TODO: remove code above this comment + // ------------------------------------------------------------------- } // RegisterBackupFailed records a failed backup. @@ -230,6 +419,13 @@ func (m *ServerMetrics) RegisterBackupFailed(backupSchedule string) { if c, ok := m.metrics[backupFailureTotal].(*prometheus.CounterVec); ok { c.WithLabelValues(backupSchedule).Inc() } + // ------------------------------------------------------------------- + // TODO: remove this code to remove the ark-namespaced metrics + if c, ok := m.metrics[legacyBackupFailureTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(backupSchedule).Inc() + } + // TODO: remove code above this comment + // ------------------------------------------------------------------- } // RegisterBackupDuration records the number of seconds a backup took. @@ -237,6 +433,13 @@ func (m *ServerMetrics) RegisterBackupDuration(backupSchedule string, seconds fl if c, ok := m.metrics[backupDurationSeconds].(*prometheus.HistogramVec); ok { c.WithLabelValues(backupSchedule).Observe(seconds) } + // ------------------------------------------------------------------- + // TODO: remove this code to remove the ark-namespaced metrics + if c, ok := m.metrics[legacyBackupDurationSeconds].(*prometheus.HistogramVec); ok { + c.WithLabelValues(backupSchedule).Observe(seconds) + } + // TODO: remove code above this comment + // ------------------------------------------------------------------- } // toSeconds translates a time.Duration value into a float64 @@ -250,6 +453,13 @@ func (m *ServerMetrics) RegisterRestoreAttempt(backupSchedule string) { if c, ok := m.metrics[restoreAttemptTotal].(*prometheus.CounterVec); ok { c.WithLabelValues(backupSchedule).Inc() } + // ------------------------------------------------------------------- + // TODO: remove this code to remove the ark-namespaced metrics + if c, ok := m.metrics[legacyRestoreAttemptTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(backupSchedule).Inc() + } + // TODO: remove code above this comment + // ------------------------------------------------------------------- } // RegisterRestoreSuccess records a successful (maybe partial) completion of a restore. @@ -257,6 +467,13 @@ func (m *ServerMetrics) RegisterRestoreSuccess(backupSchedule string) { if c, ok := m.metrics[restoreSuccessTotal].(*prometheus.CounterVec); ok { c.WithLabelValues(backupSchedule).Inc() } + // ------------------------------------------------------------------- + // TODO: remove this code to remove the ark-namespaced metrics + if c, ok := m.metrics[legacyRestoreSuccessTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(backupSchedule).Inc() + } + // TODO: remove code above this comment + // ------------------------------------------------------------------- } // RegisterRestoreFailed records a restore that failed. @@ -264,6 +481,13 @@ func (m *ServerMetrics) RegisterRestoreFailed(backupSchedule string) { if c, ok := m.metrics[restoreFailedTotal].(*prometheus.CounterVec); ok { c.WithLabelValues(backupSchedule).Inc() } + // ------------------------------------------------------------------- + // TODO: remove this code to remove the ark-namespaced metrics + if c, ok := m.metrics[legacyRestoreFailedTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(backupSchedule).Inc() + } + // TODO: remove code above this comment + // ------------------------------------------------------------------- } // RegisterRestoreValidationFailed records a restore that failed validation. @@ -271,6 +495,13 @@ func (m *ServerMetrics) RegisterRestoreValidationFailed(backupSchedule string) { if c, ok := m.metrics[restoreValidationFailedTotal].(*prometheus.CounterVec); ok { c.WithLabelValues(backupSchedule).Inc() } + // ------------------------------------------------------------------- + // TODO: remove this code to remove the ark-namespaced metrics + if c, ok := m.metrics[legacyRestoreValidationFailedTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(backupSchedule).Inc() + } + // TODO: remove code above this comment + // ------------------------------------------------------------------- } // RegisterVolumeSnapshotAttempts records an attempt to snapshot a volume. @@ -278,6 +509,13 @@ func (m *ServerMetrics) RegisterVolumeSnapshotAttempts(backupSchedule string, vo if c, ok := m.metrics[volumeSnapshotAttemptTotal].(*prometheus.CounterVec); ok { c.WithLabelValues(backupSchedule).Add(float64(volumeSnapshotsAttempted)) } + // ------------------------------------------------------------------- + // TODO: remove this code to remove the ark-namespaced metrics + if c, ok := m.metrics[legacyVolumeSnapshotAttemptTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(backupSchedule).Add(float64(volumeSnapshotsAttempted)) + } + // TODO: remove code above this comment + // ------------------------------------------------------------------- } // RegisterVolumeSnapshotSuccesses records a completed volume snapshot. @@ -285,6 +523,13 @@ func (m *ServerMetrics) RegisterVolumeSnapshotSuccesses(backupSchedule string, v if c, ok := m.metrics[volumeSnapshotSuccessTotal].(*prometheus.CounterVec); ok { c.WithLabelValues(backupSchedule).Add(float64(volumeSnapshotsCompleted)) } + // ------------------------------------------------------------------- + // TODO: remove this code to remove the ark-namespaced metrics + if c, ok := m.metrics[legacyVolumeSnapshotSuccessTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(backupSchedule).Add(float64(volumeSnapshotsCompleted)) + } + // TODO: remove code above this comment + // ------------------------------------------------------------------- } // RegisterVolumeSnapshotFailures records a failed volume snapshot. @@ -292,4 +537,11 @@ func (m *ServerMetrics) RegisterVolumeSnapshotFailures(backupSchedule string, vo if c, ok := m.metrics[volumeSnapshotFailureTotal].(*prometheus.CounterVec); ok { c.WithLabelValues(backupSchedule).Add(float64(volumeSnapshotsFailed)) } + // ------------------------------------------------------------------- + // TODO: remove this code to remove the ark-namespaced metrics + if c, ok := m.metrics[legacyVolumeSnapshotFailureTotal].(*prometheus.CounterVec); ok { + c.WithLabelValues(backupSchedule).Add(float64(volumeSnapshotsFailed)) + } + // TODO: remove code above this comment + // ------------------------------------------------------------------- } diff --git a/pkg/persistence/mocks/backup_store.go b/pkg/persistence/mocks/backup_store.go index 315bbe5190..ec30843b7d 100644 --- a/pkg/persistence/mocks/backup_store.go +++ b/pkg/persistence/mocks/backup_store.go @@ -4,8 +4,8 @@ package mocks import io "io" import mock "github.com/stretchr/testify/mock" -import v1 "github.com/heptio/ark/pkg/apis/ark/v1" -import volume "github.com/heptio/ark/pkg/volume" +import v1 "github.com/heptio/velero/pkg/apis/velero/v1" +import volume "github.com/heptio/velero/pkg/volume" // BackupStore is an autogenerated mock type for the BackupStore type type BackupStore struct { diff --git a/pkg/persistence/object_store.go b/pkg/persistence/object_store.go index aacd9618cf..4f375394ea 100644 --- a/pkg/persistence/object_store.go +++ b/pkg/persistence/object_store.go @@ -27,16 +27,18 @@ import ( "github.com/pkg/errors" "github.com/satori/uuid" "github.com/sirupsen/logrus" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" kerrors "k8s.io/apimachinery/pkg/util/errors" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/cloudprovider" - "github.com/heptio/ark/pkg/generated/clientset/versioned/scheme" - "github.com/heptio/ark/pkg/volume" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/cloudprovider" + "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" + "github.com/heptio/velero/pkg/volume" ) // BackupStore defines operations for creating, retrieving, and deleting -// Ark backup and restore data in/from a persistent backup store. +// Velero backup and restore data in/from a persistent backup store. type BackupStore interface { IsValid() error GetRevision() (string, error) @@ -44,7 +46,7 @@ type BackupStore interface { ListBackups() ([]string, error) PutBackup(name string, metadata, contents, log, volumeSnapshots io.Reader) error - GetBackupMetadata(name string) (*arkv1api.Backup, error) + GetBackupMetadata(name string) (*velerov1api.Backup, error) GetBackupVolumeSnapshots(name string) ([]*volume.Snapshot, error) GetBackupContents(name string) (io.ReadCloser, error) DeleteBackup(name string) error @@ -53,7 +55,7 @@ type BackupStore interface { PutRestoreResults(backup, restore string, results io.Reader) error DeleteRestore(name string) error - GetDownloadURL(target arkv1api.DownloadTarget) (string, error) + GetDownloadURL(target velerov1api.DownloadTarget) (string, error) } // DownloadURLTTL is how long a download URL is valid for. @@ -72,7 +74,7 @@ type ObjectStoreGetter interface { GetObjectStore(provider string) (cloudprovider.ObjectStore, error) } -func NewObjectBackupStore(location *arkv1api.BackupStorageLocation, objectStoreGetter ObjectStoreGetter, logger logrus.FieldLogger) (BackupStore, error) { +func NewObjectBackupStore(location *velerov1api.BackupStorageLocation, objectStoreGetter ObjectStoreGetter, logger logrus.FieldLogger) (BackupStore, error) { if location.Spec.ObjectStorage == nil { return nil, errors.New("backup storage location does not use object storage") } @@ -205,10 +207,51 @@ func (s *objectBackupStore) PutBackup(name string, metadata, contents, log, volu return nil } -func (s *objectBackupStore) GetBackupMetadata(name string) (*arkv1api.Backup, error) { - key := s.layout.getBackupMetadataKey(name) +func (s *objectBackupStore) GetBackupMetadata(name string) (*velerov1api.Backup, error) { + // We need to determine whether the backup metadata file is the legacy ark.heptio.com + // one (named ark-backup.json) or the current velero.io one (named velero-backup.json). + // Listing all objects in the backup directory and searching for them is easiest, because + // GetObject() calls don't immediately return an error if the object is not found due to + // a bug related to the plugin infrastructure, and even if they did, it's difficult to + // distinguish between a 404 and a different error. + // + // TODO once the plugin/error-related bugs are fixed, simplify this code by just calling + // GetObject() to check existence of the metadata files. + keys, err := s.objectStore.ListObjects(s.bucket, s.layout.getBackupDir(name)) + if err != nil { + return nil, errors.WithStack(err) + } - res, err := s.objectStore.GetObject(s.bucket, key) + var ( + metadataKey = s.layout.getBackupMetadataKey(name) + legacyMetadataKey = s.layout.getLegacyBackupMetadataKey(name) + legacyMetadata bool + ) + + var found bool + for _, key := range keys { + switch key { + case metadataKey: + found = true + case legacyMetadataKey: + found = true + legacyMetadata = true + } + + if found { + break + } + } + + if legacyMetadata { + s.logger.WithField("backup", name).Debug("Legacy metadata file found, converting") + return s.getAndConvertLegacyBackupMetadata(legacyMetadataKey) + } + + // TODO(1.0): remove everything in this method from here up, except the metadataKey + // declaration. + + res, err := s.objectStore.GetObject(s.bucket, metadataKey) if err != nil { return nil, err } @@ -219,20 +262,62 @@ func (s *objectBackupStore) GetBackupMetadata(name string) (*arkv1api.Backup, er return nil, errors.WithStack(err) } - decoder := scheme.Codecs.UniversalDecoder(arkv1api.SchemeGroupVersion) + decoder := scheme.Codecs.UniversalDecoder(velerov1api.SchemeGroupVersion) obj, _, err := decoder.Decode(data, nil, nil) if err != nil { return nil, errors.WithStack(err) } - backupObj, ok := obj.(*arkv1api.Backup) + backupObj, ok := obj.(*velerov1api.Backup) if !ok { - return nil, errors.Errorf("unexpected type for %s/%s: %T", s.bucket, key, obj) + return nil, errors.Errorf("unexpected type for %s/%s: %T", s.bucket, metadataKey, obj) } return backupObj, nil } +// TODO(1.0): remove +func (s *objectBackupStore) getAndConvertLegacyBackupMetadata(key string) (*velerov1api.Backup, error) { + obj, err := s.objectStore.GetObject(s.bucket, key) + if err != nil { + return nil, err + } + + data, err := ioutil.ReadAll(obj) + if err != nil { + return nil, errors.WithStack(err) + } + + res := new(unstructured.Unstructured) + if err := json.Unmarshal(data, &res); err != nil { + return nil, errors.WithStack(err) + } + + res.SetAPIVersion(velerov1api.SchemeGroupVersion.String()) + res.SetLabels(convertMapKeys(res.GetLabels(), "ark.heptio.com", "velero.io")) + res.SetLabels(convertMapKeys(res.GetLabels(), "ark-schedule", velerov1api.ScheduleNameLabel)) + res.SetAnnotations(convertMapKeys(res.GetAnnotations(), "ark.heptio.com", "velero.io")) + + backup := new(velerov1api.Backup) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(res.Object, backup); err != nil { + return nil, errors.WithStack(err) + } + + return backup, nil +} + +// TODO(1.0): remove +func convertMapKeys(m map[string]string, find, replace string) map[string]string { + for k, v := range m { + if updatedKey := strings.Replace(k, find, replace, -1); updatedKey != k { + m[updatedKey] = v + delete(m, k) + } + } + + return m +} + func keyExists(objectStore cloudprovider.ObjectStore, bucket, prefix, key string) (bool, error) { keys, err := objectStore.ListObjects(bucket, prefix) if err != nil { @@ -342,17 +427,17 @@ func (s *objectBackupStore) PutRestoreResults(backup string, restore string, res return s.objectStore.PutObject(s.bucket, s.layout.getRestoreResultsKey(restore), results) } -func (s *objectBackupStore) GetDownloadURL(target arkv1api.DownloadTarget) (string, error) { +func (s *objectBackupStore) GetDownloadURL(target velerov1api.DownloadTarget) (string, error) { switch target.Kind { - case arkv1api.DownloadTargetKindBackupContents: + case velerov1api.DownloadTargetKindBackupContents: return s.objectStore.CreateSignedURL(s.bucket, s.layout.getBackupContentsKey(target.Name), DownloadURLTTL) - case arkv1api.DownloadTargetKindBackupLog: + case velerov1api.DownloadTargetKindBackupLog: return s.objectStore.CreateSignedURL(s.bucket, s.layout.getBackupLogKey(target.Name), DownloadURLTTL) - case arkv1api.DownloadTargetKindBackupVolumeSnapshots: + case velerov1api.DownloadTargetKindBackupVolumeSnapshots: return s.objectStore.CreateSignedURL(s.bucket, s.layout.getBackupVolumeSnapshotsKey(target.Name), DownloadURLTTL) - case arkv1api.DownloadTargetKindRestoreLog: + case velerov1api.DownloadTargetKindRestoreLog: return s.objectStore.CreateSignedURL(s.bucket, s.layout.getRestoreLogKey(target.Name), DownloadURLTTL) - case arkv1api.DownloadTargetKindRestoreResults: + case velerov1api.DownloadTargetKindRestoreResults: return s.objectStore.CreateSignedURL(s.bucket, s.layout.getRestoreResultsKey(target.Name), DownloadURLTTL) default: return "", errors.Errorf("unsupported download target kind %q", target.Kind) diff --git a/pkg/persistence/object_store_layout.go b/pkg/persistence/object_store_layout.go index 6d95d7c29a..2661f2de20 100644 --- a/pkg/persistence/object_store_layout.go +++ b/pkg/persistence/object_store_layout.go @@ -22,7 +22,7 @@ import ( "strings" ) -// ObjectStoreLayout defines how Ark's persisted files map to +// ObjectStoreLayout defines how Velero's persisted files map to // keys in an object storage bucket. type ObjectStoreLayout struct { rootPrefix string @@ -72,6 +72,11 @@ func (l *ObjectStoreLayout) getRestoreDir(restore string) string { } func (l *ObjectStoreLayout) getBackupMetadataKey(backup string) string { + return path.Join(l.subdirs["backups"], backup, "velero-backup.json") +} + +// TODO(1.0): remove +func (l *ObjectStoreLayout) getLegacyBackupMetadataKey(backup string) string { return path.Join(l.subdirs["backups"], backup, "ark-backup.json") } diff --git a/pkg/persistence/object_store_test.go b/pkg/persistence/object_store_test.go index 6d4bd0d3cf..0f700ed569 100644 --- a/pkg/persistence/object_store_test.go +++ b/pkg/persistence/object_store_test.go @@ -34,12 +34,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/cloudprovider" - cloudprovidermocks "github.com/heptio/ark/pkg/cloudprovider/mocks" - "github.com/heptio/ark/pkg/util/encode" - arktest "github.com/heptio/ark/pkg/util/test" - "github.com/heptio/ark/pkg/volume" + arkv1api "github.com/heptio/velero/pkg/apis/ark/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/cloudprovider" + cloudprovidermocks "github.com/heptio/velero/pkg/cloudprovider/mocks" + "github.com/heptio/velero/pkg/util/encode" + velerotest "github.com/heptio/velero/pkg/util/test" + "github.com/heptio/velero/pkg/volume" ) type objectBackupStoreTestHarness struct { @@ -58,7 +59,7 @@ func newObjectBackupStoreTestHarness(bucket, prefix string) *objectBackupStoreTe objectStore: objectStore, bucket: bucket, layout: NewObjectStoreLayout(prefix), - logger: arktest.NewLogger(), + logger: velerotest.NewLogger(), }, objectStore: objectStore, bucket: bucket, @@ -85,8 +86,8 @@ func TestIsValid(t *testing.T) { { name: "backup store with no prefix and only unsupported directories is invalid", storageData: map[string][]byte{ - "backup-1/ark-backup.json": {}, - "backup-2/ark-backup.json": {}, + "backup-1/velero-backup.json": {}, + "backup-2/velero-backup.json": {}, }, expectErr: true, }, @@ -94,18 +95,18 @@ func TestIsValid(t *testing.T) { name: "backup store with a prefix and only unsupported directories is invalid", prefix: "backups", storageData: map[string][]byte{ - "backups/backup-1/ark-backup.json": {}, - "backups/backup-2/ark-backup.json": {}, + "backups/backup-1/velero-backup.json": {}, + "backups/backup-2/velero-backup.json": {}, }, expectErr: true, }, { name: "backup store with no prefix and both supported and unsupported directories is invalid", storageData: map[string][]byte{ - "backups/backup-1/ark-backup.json": {}, - "backups/backup-2/ark-backup.json": {}, - "restores/restore-1/foo": {}, - "unsupported-dir/foo": {}, + "backups/backup-1/velero-backup.json": {}, + "backups/backup-2/velero-backup.json": {}, + "restores/restore-1/foo": {}, + "unsupported-dir/foo": {}, }, expectErr: true, }, @@ -113,19 +114,19 @@ func TestIsValid(t *testing.T) { name: "backup store with a prefix and both supported and unsupported directories is invalid", prefix: "cluster-1", storageData: map[string][]byte{ - "cluster-1/backups/backup-1/ark-backup.json": {}, - "cluster-1/backups/backup-2/ark-backup.json": {}, - "cluster-1/restores/restore-1/foo": {}, - "cluster-1/unsupported-dir/foo": {}, + "cluster-1/backups/backup-1/velero-backup.json": {}, + "cluster-1/backups/backup-2/velero-backup.json": {}, + "cluster-1/restores/restore-1/foo": {}, + "cluster-1/unsupported-dir/foo": {}, }, expectErr: true, }, { name: "backup store with no prefix and only supported directories is valid", storageData: map[string][]byte{ - "backups/backup-1/ark-backup.json": {}, - "backups/backup-2/ark-backup.json": {}, - "restores/restore-1/foo": {}, + "backups/backup-1/velero-backup.json": {}, + "backups/backup-2/velero-backup.json": {}, + "restores/restore-1/foo": {}, }, expectErr: false, }, @@ -133,9 +134,9 @@ func TestIsValid(t *testing.T) { name: "backup store with a prefix and only supported directories is valid", prefix: "cluster-1", storageData: map[string][]byte{ - "cluster-1/backups/backup-1/ark-backup.json": {}, - "cluster-1/backups/backup-2/ark-backup.json": {}, - "cluster-1/restores/restore-1/foo": {}, + "cluster-1/backups/backup-1/velero-backup.json": {}, + "cluster-1/backups/backup-2/velero-backup.json": {}, + "cluster-1/restores/restore-1/foo": {}, }, expectErr: false, }, @@ -170,17 +171,17 @@ func TestListBackups(t *testing.T) { { name: "normal case", storageData: map[string][]byte{ - "backups/backup-1/ark-backup.json": encodeToBytes(&api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-1"}}), - "backups/backup-2/ark-backup.json": encodeToBytes(&api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-2"}}), + "backups/backup-1/velero-backup.json": encodeToBytes(&velerov1api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-1"}}), + "backups/backup-2/velero-backup.json": encodeToBytes(&velerov1api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-2"}}), }, expectedRes: []string{"backup-1", "backup-2"}, }, { name: "normal case with backup store prefix", - prefix: "ark-backups/", + prefix: "velero-backups/", storageData: map[string][]byte{ - "ark-backups/backups/backup-1/ark-backup.json": encodeToBytes(&api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-1"}}), - "ark-backups/backups/backup-2/ark-backup.json": encodeToBytes(&api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-2"}}), + "velero-backups/backups/backup-1/velero-backup.json": encodeToBytes(&velerov1api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-1"}}), + "velero-backups/backups/backup-2/velero-backup.json": encodeToBytes(&velerov1api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-2"}}), }, expectedRes: []string{"backup-1", "backup-2"}, }, @@ -196,7 +197,7 @@ func TestListBackups(t *testing.T) { res, err := harness.ListBackups() - arktest.AssertErrorMatches(t, tc.expectedErr, err) + velerotest.AssertErrorMatches(t, tc.expectedErr, err) sort.Strings(tc.expectedRes) sort.Strings(res) @@ -225,7 +226,7 @@ func TestPutBackup(t *testing.T) { snapshots: newStringReadSeeker("snapshots"), expectedErr: "", expectedKeys: []string{ - "backups/backup-1/ark-backup.json", + "backups/backup-1/velero-backup.json", "backups/backup-1/backup-1.tar.gz", "backups/backup-1/backup-1-logs.gz", "backups/backup-1/backup-1-volumesnapshots.json.gz", @@ -241,7 +242,7 @@ func TestPutBackup(t *testing.T) { snapshots: newStringReadSeeker("snapshots"), expectedErr: "", expectedKeys: []string{ - "prefix-1/backups/backup-1/ark-backup.json", + "prefix-1/backups/backup-1/velero-backup.json", "prefix-1/backups/backup-1/backup-1.tar.gz", "prefix-1/backups/backup-1/backup-1-logs.gz", "prefix-1/backups/backup-1/backup-1-volumesnapshots.json.gz", @@ -274,7 +275,7 @@ func TestPutBackup(t *testing.T) { snapshots: newStringReadSeeker("snapshots"), expectedErr: "", expectedKeys: []string{ - "backups/backup-1/ark-backup.json", + "backups/backup-1/velero-backup.json", "backups/backup-1/backup-1.tar.gz", "backups/backup-1/backup-1-volumesnapshots.json.gz", "metadata/revision", @@ -297,7 +298,7 @@ func TestPutBackup(t *testing.T) { err := harness.PutBackup("backup-1", tc.metadata, tc.contents, tc.log, tc.snapshots) - arktest.AssertErrorMatches(t, tc.expectedErr, err) + velerotest.AssertErrorMatches(t, tc.expectedErr, err) assert.Len(t, harness.objectStore.Data[harness.bucket], len(tc.expectedKeys)) for _, key := range tc.expectedKeys { assert.Contains(t, harness.objectStore.Data[harness.bucket], key) @@ -306,11 +307,159 @@ func TestPutBackup(t *testing.T) { } } +func TestGetBackupMetadata(t *testing.T) { + tests := []struct { + name string + backupName string + key string + obj metav1.Object + wantErr error + }{ + { + name: "legacy metadata file returns correctly", + backupName: "foo", + key: "backups/foo/ark-backup.json", + obj: &arkv1api.Backup{ + TypeMeta: metav1.TypeMeta{ + Kind: "Backup", + APIVersion: arkv1api.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: arkv1api.DefaultNamespace, + Name: "foo", + }, + }, + }, + { + name: "current metadata file returns correctly", + backupName: "foo", + key: "backups/foo/velero-backup.json", + obj: &velerov1api.Backup{ + TypeMeta: metav1.TypeMeta{ + Kind: "Backup", + APIVersion: velerov1api.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: velerov1api.DefaultNamespace, + Name: "foo", + }, + }, + }, + { + name: "no metadata file returns an error", + backupName: "foo", + wantErr: errors.New("key not found"), + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + harness := newObjectBackupStoreTestHarness("test-bucket", "") + + if tc.obj != nil { + jsonBytes, err := json.Marshal(tc.obj) + require.NoError(t, err) + + require.NoError(t, harness.objectStore.PutObject(harness.bucket, tc.key, bytes.NewReader(jsonBytes))) + } + + res, err := harness.GetBackupMetadata(tc.backupName) + if tc.wantErr != nil { + assert.Equal(t, tc.wantErr, err) + } else { + require.NoError(t, err) + + assert.Equal(t, tc.obj.GetNamespace(), res.Namespace) + assert.Equal(t, tc.obj.GetName(), res.Name) + } + }) + } +} + +func TestGetAndConvertLegacyBackupMetadata(t *testing.T) { + tests := []struct { + name string + key string + obj metav1.Object + want *velerov1api.Backup + wantErr error + }{ + { + name: "velerov1api group, labels and annotations all get converted", + key: "backups/foo/ark-backup.json", + obj: &arkv1api.Backup{ + TypeMeta: metav1.TypeMeta{ + Kind: "Backup", + APIVersion: arkv1api.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: arkv1api.DefaultNamespace, + Name: "foo", + Labels: map[string]string{ + "ark.heptio.com/foo": "bar", + "ark.heptio.com/tango": "foxtrot", + "prefix.ark.heptio.com/zaz": "zoo", + "non-matching": "no-change", + }, + Annotations: map[string]string{ + "ark.heptio.com/foo": "bar", + "ark.heptio.com/tango": "foxtrot", + "prefix.ark.heptio.com/zaz": "zoo", + "non-matching": "no-change", + }, + }, + }, + want: &velerov1api.Backup{ + TypeMeta: metav1.TypeMeta{ + Kind: "Backup", + APIVersion: velerov1api.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: arkv1api.DefaultNamespace, + Name: "foo", + Labels: map[string]string{ + "velero.io/foo": "bar", + "velero.io/tango": "foxtrot", + "prefix.velero.io/zaz": "zoo", + "non-matching": "no-change", + }, + Annotations: map[string]string{ + "velero.io/foo": "bar", + "velero.io/tango": "foxtrot", + "prefix.velero.io/zaz": "zoo", + "non-matching": "no-change", + }, + }, + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + harness := newObjectBackupStoreTestHarness("test-bucket", "") + + jsonBytes, err := json.Marshal(tc.obj) + require.NoError(t, err) + + require.NoError(t, harness.objectStore.PutObject(harness.bucket, tc.key, bytes.NewReader(jsonBytes))) + + res, err := harness.getAndConvertLegacyBackupMetadata(tc.key) + if tc.wantErr != nil { + assert.Equal(t, tc.wantErr, err) + } else { + require.NoError(t, err) + + assert.Equal(t, tc.want, res) + } + }) + } +} + func TestGetBackupVolumeSnapshots(t *testing.T) { harness := newObjectBackupStoreTestHarness("test-bucket", "") // volumesnapshots file not found should not error - harness.objectStore.PutObject(harness.bucket, "backups/test-backup/ark-backup.json", newStringReadSeeker("foo")) + harness.objectStore.PutObject(harness.bucket, "backups/test-backup/velero-backup.json", newStringReadSeeker("foo")) res, err := harness.GetBackupVolumeSnapshots("test-backup") assert.NoError(t, err) assert.Nil(t, res) @@ -375,7 +524,7 @@ func TestDeleteBackup(t *testing.T) { }, { name: "normal case with backup store prefix", - prefix: "ark-backups/", + prefix: "velero-backups/", }, { name: "some delete errors, do as much as we can", @@ -391,11 +540,11 @@ func TestDeleteBackup(t *testing.T) { objectStore: objectStore, bucket: "test-bucket", layout: NewObjectStoreLayout(test.prefix), - logger: arktest.NewLogger(), + logger: velerotest.NewLogger(), } defer objectStore.AssertExpectations(t) - objects := []string{test.prefix + "backups/bak/ark-backup.json", test.prefix + "backups/bak/bak.tar.gz", test.prefix + "backups/bak/bak.log.gz"} + objects := []string{test.prefix + "backups/bak/velero-backup.json", test.prefix + "backups/bak/bak.tar.gz", test.prefix + "backups/bak/bak.log.gz"} objectStore.On("ListObjects", backupStore.bucket, test.prefix+"backups/bak/").Return(objects, test.listObjectsError) for i, obj := range objects { @@ -410,7 +559,7 @@ func TestDeleteBackup(t *testing.T) { err := backupStore.DeleteBackup("bak") - arktest.AssertErrorMatches(t, test.expectedErr, err) + velerotest.AssertErrorMatches(t, test.expectedErr, err) }) } } @@ -418,57 +567,57 @@ func TestDeleteBackup(t *testing.T) { func TestGetDownloadURL(t *testing.T) { tests := []struct { name string - targetKind api.DownloadTargetKind + targetKind velerov1api.DownloadTargetKind targetName string prefix string expectedKey string }{ { name: "backup contents", - targetKind: api.DownloadTargetKindBackupContents, + targetKind: velerov1api.DownloadTargetKindBackupContents, targetName: "my-backup", expectedKey: "backups/my-backup/my-backup.tar.gz", }, { name: "backup log", - targetKind: api.DownloadTargetKindBackupLog, + targetKind: velerov1api.DownloadTargetKindBackupLog, targetName: "my-backup", expectedKey: "backups/my-backup/my-backup-logs.gz", }, { name: "scheduled backup contents", - targetKind: api.DownloadTargetKindBackupContents, + targetKind: velerov1api.DownloadTargetKindBackupContents, targetName: "my-backup-20170913154901", expectedKey: "backups/my-backup-20170913154901/my-backup-20170913154901.tar.gz", }, { name: "scheduled backup log", - targetKind: api.DownloadTargetKindBackupLog, + targetKind: velerov1api.DownloadTargetKindBackupLog, targetName: "my-backup-20170913154901", expectedKey: "backups/my-backup-20170913154901/my-backup-20170913154901-logs.gz", }, { name: "backup contents with backup store prefix", - targetKind: api.DownloadTargetKindBackupContents, + targetKind: velerov1api.DownloadTargetKindBackupContents, targetName: "my-backup", - prefix: "ark-backups/", - expectedKey: "ark-backups/backups/my-backup/my-backup.tar.gz", + prefix: "velero-backups/", + expectedKey: "velero-backups/backups/my-backup/my-backup.tar.gz", }, { name: "restore log", - targetKind: api.DownloadTargetKindRestoreLog, + targetKind: velerov1api.DownloadTargetKindRestoreLog, targetName: "b-20170913154901", expectedKey: "restores/b-20170913154901/restore-b-20170913154901-logs.gz", }, { name: "restore results", - targetKind: api.DownloadTargetKindRestoreResults, + targetKind: velerov1api.DownloadTargetKindRestoreResults, targetName: "b-20170913154901", expectedKey: "restores/b-20170913154901/restore-b-20170913154901-results.gz", }, { name: "restore results - backup has multiple dashes (e.g. restore of scheduled backup)", - targetKind: api.DownloadTargetKindRestoreResults, + targetKind: velerov1api.DownloadTargetKindRestoreResults, targetName: "b-cool-20170913154901-20170913154902", expectedKey: "restores/b-cool-20170913154901-20170913154902/restore-b-cool-20170913154901-20170913154902-results.gz", }, @@ -480,7 +629,7 @@ func TestGetDownloadURL(t *testing.T) { require.NoError(t, harness.objectStore.PutObject("test-bucket", test.expectedKey, newStringReadSeeker("foo"))) - url, err := harness.GetDownloadURL(api.DownloadTarget{Kind: test.targetKind, Name: test.targetName}) + url, err := harness.GetDownloadURL(velerov1api.DownloadTarget{Kind: test.targetKind, Name: test.targetName}) require.NoError(t, err) assert.Equal(t, "a-url", url) }) diff --git a/pkg/plugin/backup_item_action.go b/pkg/plugin/backup_item_action.go index 49c95a27a5..1b76842cb6 100644 --- a/pkg/plugin/backup_item_action.go +++ b/pkg/plugin/backup_item_action.go @@ -19,7 +19,7 @@ package plugin import ( "encoding/json" - "github.com/hashicorp/go-plugin" + plugin "github.com/hashicorp/go-plugin" "github.com/pkg/errors" "golang.org/x/net/context" "google.golang.org/grpc" @@ -27,9 +27,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - api "github.com/heptio/ark/pkg/apis/ark/v1" - arkbackup "github.com/heptio/ark/pkg/backup" - proto "github.com/heptio/ark/pkg/plugin/generated" + api "github.com/heptio/velero/pkg/apis/velero/v1" + velerobackup "github.com/heptio/velero/pkg/backup" + proto "github.com/heptio/velero/pkg/plugin/generated" ) // BackupItemActionPlugin is an implementation of go-plugin's Plugin @@ -70,13 +70,13 @@ func newBackupItemActionGRPCClient(base *clientBase, clientConn *grpc.ClientConn } } -func (c *BackupItemActionGRPCClient) AppliesTo() (arkbackup.ResourceSelector, error) { +func (c *BackupItemActionGRPCClient) AppliesTo() (velerobackup.ResourceSelector, error) { res, err := c.grpcClient.AppliesTo(context.Background(), &proto.AppliesToRequest{Plugin: c.plugin}) if err != nil { - return arkbackup.ResourceSelector{}, err + return velerobackup.ResourceSelector{}, err } - return arkbackup.ResourceSelector{ + return velerobackup.ResourceSelector{ IncludedNamespaces: res.IncludedNamespaces, ExcludedNamespaces: res.ExcludedNamespaces, IncludedResources: res.IncludedResources, @@ -85,7 +85,7 @@ func (c *BackupItemActionGRPCClient) AppliesTo() (arkbackup.ResourceSelector, er }, nil } -func (c *BackupItemActionGRPCClient) Execute(item runtime.Unstructured, backup *api.Backup) (runtime.Unstructured, []arkbackup.ResourceIdentifier, error) { +func (c *BackupItemActionGRPCClient) Execute(item runtime.Unstructured, backup *api.Backup) (runtime.Unstructured, []velerobackup.ResourceIdentifier, error) { itemJSON, err := json.Marshal(item.UnstructuredContent()) if err != nil { return nil, nil, err @@ -112,10 +112,10 @@ func (c *BackupItemActionGRPCClient) Execute(item runtime.Unstructured, backup * return nil, nil, err } - var additionalItems []arkbackup.ResourceIdentifier + var additionalItems []velerobackup.ResourceIdentifier for _, itm := range res.AdditionalItems { - newItem := arkbackup.ResourceIdentifier{ + newItem := velerobackup.ResourceIdentifier{ GroupResource: schema.GroupResource{ Group: itm.Group, Resource: itm.Resource, @@ -146,13 +146,13 @@ type BackupItemActionGRPCServer struct { mux *serverMux } -func (s *BackupItemActionGRPCServer) getImpl(name string) (arkbackup.ItemAction, error) { +func (s *BackupItemActionGRPCServer) getImpl(name string) (velerobackup.ItemAction, error) { impl, err := s.mux.getHandler(name) if err != nil { return nil, err } - itemAction, ok := impl.(arkbackup.ItemAction) + itemAction, ok := impl.(velerobackup.ItemAction) if !ok { return nil, errors.Errorf("%T is not a backup item action", impl) } @@ -224,7 +224,7 @@ func (s *BackupItemActionGRPCServer) Execute(ctx context.Context, req *proto.Exe return res, nil } -func backupResourceIdentifierToProto(id arkbackup.ResourceIdentifier) *proto.ResourceIdentifier { +func backupResourceIdentifierToProto(id velerobackup.ResourceIdentifier) *proto.ResourceIdentifier { return &proto.ResourceIdentifier{ Group: id.Group, Resource: id.Resource, diff --git a/pkg/plugin/backup_item_action_test.go b/pkg/plugin/backup_item_action_test.go index d02b831c23..66d7e16ff5 100644 --- a/pkg/plugin/backup_item_action_test.go +++ b/pkg/plugin/backup_item_action_test.go @@ -28,11 +28,11 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/backup" - "github.com/heptio/ark/pkg/backup/mocks" - proto "github.com/heptio/ark/pkg/plugin/generated" - arktest "github.com/heptio/ark/pkg/util/test" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/backup" + "github.com/heptio/velero/pkg/backup/mocks" + proto "github.com/heptio/velero/pkg/plugin/generated" + velerotest "github.com/heptio/velero/pkg/util/test" ) func TestBackupItemActionGRPCServerExecute(t *testing.T) { @@ -72,7 +72,7 @@ func TestBackupItemActionGRPCServerExecute(t *testing.T) { invalidBackup := []byte("this is gibberish json") validBackup := []byte(` { - "apiVersion": "ark.heptio.com/v1", + "apiVersion": "velero.io/v1", "kind": "Backup", "metadata": { "namespace": "myns", @@ -154,7 +154,7 @@ func TestBackupItemActionGRPCServerExecute(t *testing.T) { } s := &BackupItemActionGRPCServer{mux: &serverMux{ - serverLog: arktest.NewLogger(), + serverLog: velerotest.NewLogger(), handlers: map[string]interface{}{ "xyz": itemAction, }, diff --git a/pkg/plugin/block_store.go b/pkg/plugin/block_store.go index 95ae1ddbee..3db8f19f23 100644 --- a/pkg/plugin/block_store.go +++ b/pkg/plugin/block_store.go @@ -26,8 +26,8 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "github.com/heptio/ark/pkg/cloudprovider" - proto "github.com/heptio/ark/pkg/plugin/generated" + "github.com/heptio/velero/pkg/cloudprovider" + proto "github.com/heptio/velero/pkg/plugin/generated" ) // BlockStorePlugin is an implementation of go-plugin's Plugin diff --git a/pkg/plugin/client_builder.go b/pkg/plugin/client_builder.go index 4f5ec9ae27..06ad79cef6 100644 --- a/pkg/plugin/client_builder.go +++ b/pkg/plugin/client_builder.go @@ -33,7 +33,7 @@ type clientBuilder struct { } // newClientBuilder returns a new clientBuilder with commandName to name. If the command matches the currently running -// process (i.e. ark), this also sets commandArgs to the internal Ark command to run plugins. +// process (i.e. velero), this also sets commandArgs to the internal Velero command to run plugins. func newClientBuilder(command string, logger logrus.FieldLogger, logLevel logrus.Level) *clientBuilder { b := &clientBuilder{ commandName: command, @@ -41,7 +41,7 @@ func newClientBuilder(command string, logger logrus.FieldLogger, logLevel logrus pluginLogger: newLogrusAdapter(logger, logLevel), } if command == os.Args[0] { - // For plugins compiled into the ark executable, we need to run "ark run-plugins" + // For plugins compiled into the velero executable, we need to run "velero run-plugins" b.commandArgs = []string{"run-plugins"} } return b @@ -67,7 +67,7 @@ func (b *clientBuilder) clientConfig() *hcplugin.ClientConfig { } } -// client creates a new go-plugin Client with support for all of Ark's plugin kinds (BackupItemAction, BlockStore, +// client creates a new go-plugin Client with support for all of Velero's plugin kinds (BackupItemAction, BlockStore, // ObjectStore, PluginLister, RestoreItemAction). func (b *clientBuilder) client() *hcplugin.Client { return hcplugin.NewClient(b.clientConfig()) diff --git a/pkg/plugin/client_builder_test.go b/pkg/plugin/client_builder_test.go index a782d6eb70..e79f77646a 100644 --- a/pkg/plugin/client_builder_test.go +++ b/pkg/plugin/client_builder_test.go @@ -24,14 +24,14 @@ import ( "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" - "github.com/heptio/ark/pkg/util/test" + "github.com/heptio/velero/pkg/util/test" ) func TestNewClientBuilder(t *testing.T) { logger := test.NewLogger() logLevel := logrus.InfoLevel - cb := newClientBuilder("ark", logger, logLevel) - assert.Equal(t, cb.commandName, "ark") + cb := newClientBuilder("velero", logger, logLevel) + assert.Equal(t, cb.commandName, "velero") assert.Empty(t, cb.commandArgs) assert.Equal(t, newLogrusAdapter(logger, logLevel), cb.pluginLogger) @@ -44,7 +44,7 @@ func TestNewClientBuilder(t *testing.T) { func TestClientConfig(t *testing.T) { logger := test.NewLogger() logLevel := logrus.InfoLevel - cb := newClientBuilder("ark", logger, logLevel) + cb := newClientBuilder("velero", logger, logLevel) expected := &hcplugin.ClientConfig{ HandshakeConfig: Handshake, diff --git a/pkg/plugin/client_dispenser_test.go b/pkg/plugin/client_dispenser_test.go index 68b7daa624..c83009dcb9 100644 --- a/pkg/plugin/client_dispenser_test.go +++ b/pkg/plugin/client_dispenser_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/grpc" - "github.com/heptio/ark/pkg/util/test" + "github.com/heptio/velero/pkg/util/test" ) type fakeClient struct { diff --git a/pkg/plugin/interface.go b/pkg/plugin/interface.go index 3b66a4f51c..c140644223 100644 --- a/pkg/plugin/interface.go +++ b/pkg/plugin/interface.go @@ -18,7 +18,7 @@ package plugin import plugin "github.com/hashicorp/go-plugin" -// Interface represents an Ark plugin. +// Interface represents a Velero plugin. type Interface interface { plugin.Plugin diff --git a/pkg/plugin/logger.go b/pkg/plugin/logger.go index 26b26a6ffa..b673f09be7 100644 --- a/pkg/plugin/logger.go +++ b/pkg/plugin/logger.go @@ -19,11 +19,11 @@ package plugin import ( "github.com/sirupsen/logrus" - "github.com/heptio/ark/pkg/util/logging" + "github.com/heptio/velero/pkg/util/logging" ) // NewLogger returns a logger that is suitable for use within an -// Ark plugin. +// Velero plugin. func NewLogger() logrus.FieldLogger { logger := logrus.New() /* @@ -31,7 +31,7 @@ func NewLogger() logrus.FieldLogger { go-plugin uses stdout for a communications protocol between client and server. - stderr is used for log messages from server to client. The ark server makes sure they are logged to stdout. + stderr is used for log messages from server to client. The velero server makes sure they are logged to stdout. */ // we use the JSON formatter because go-plugin will parse incoming @@ -41,18 +41,18 @@ func NewLogger() logrus.FieldLogger { // this is the hclog-compatible message field logrus.FieldKeyMsg: "@message", }, - // Ark server already adds timestamps when emitting logs, so + // Velero server already adds timestamps when emitting logs, so // don't do it within the plugin. DisableTimestamp: true, } - // set a logger name for the location hook which will signal to the Ark + // set a logger name for the location hook which will signal to the Velero // server logger that the location has been set within a hook. logger.Hooks.Add((&logging.LogLocationHook{}).WithLoggerName("plugin")) // this hook adjusts the string representation of WarnLevel to "warn" // rather than "warning" to make it parseable by go-plugin within the - // Ark server code + // Velero server code logger.Hooks.Add(&logging.HcLogLevelHook{}) return logger diff --git a/pkg/plugin/logger_test.go b/pkg/plugin/logger_test.go index e4c0739048..06375fef47 100644 --- a/pkg/plugin/logger_test.go +++ b/pkg/plugin/logger_test.go @@ -21,7 +21,7 @@ import ( "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" - "github.com/heptio/ark/pkg/util/logging" + "github.com/heptio/velero/pkg/util/logging" ) func TestNewLogger(t *testing.T) { diff --git a/pkg/plugin/logrus_adapter.go b/pkg/plugin/logrus_adapter.go index a8b3bc2568..c6c699c4bf 100644 --- a/pkg/plugin/logrus_adapter.go +++ b/pkg/plugin/logrus_adapter.go @@ -43,10 +43,10 @@ func argsToFields(args ...interface{}) logrus.Fields { switch args[i] { case "time", "timestamp", "level": // remove `time` & `timestamp` because this info will be added - // by the Ark logger and we don't want to have duplicated + // by the Velero logger and we don't want to have duplicated // fields. // - // remove `level` because it'll be added by the Ark logger based + // remove `level` because it'll be added by the Velero logger based // on the call we make (and go-plugin is determining which level // to log at based on the hclog-compatible `@level` field which // we're adding via HcLogLevelHook). diff --git a/pkg/plugin/manager.go b/pkg/plugin/manager.go index 698c1c1b77..be8faf207d 100644 --- a/pkg/plugin/manager.go +++ b/pkg/plugin/manager.go @@ -21,9 +21,9 @@ import ( "github.com/sirupsen/logrus" - "github.com/heptio/ark/pkg/backup" - "github.com/heptio/ark/pkg/cloudprovider" - "github.com/heptio/ark/pkg/restore" + "github.com/heptio/velero/pkg/backup" + "github.com/heptio/velero/pkg/cloudprovider" + "github.com/heptio/velero/pkg/restore" ) // Manager manages the lifecycles of plugins. diff --git a/pkg/plugin/manager_test.go b/pkg/plugin/manager_test.go index 8b2c6b55a1..4f54cc283c 100644 --- a/pkg/plugin/manager_test.go +++ b/pkg/plugin/manager_test.go @@ -26,7 +26,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/heptio/ark/pkg/util/test" + "github.com/heptio/velero/pkg/util/test" ) type mockRegistry struct { diff --git a/pkg/plugin/mocks/manager.go b/pkg/plugin/mocks/manager.go index 0d0a261820..73dd85fbdb 100644 --- a/pkg/plugin/mocks/manager.go +++ b/pkg/plugin/mocks/manager.go @@ -16,11 +16,11 @@ limitations under the License. // Code generated by mockery v1.0.0. DO NOT EDIT. package mocks -import backup "github.com/heptio/ark/pkg/backup" -import cloudprovider "github.com/heptio/ark/pkg/cloudprovider" +import backup "github.com/heptio/velero/pkg/backup" +import cloudprovider "github.com/heptio/velero/pkg/cloudprovider" import mock "github.com/stretchr/testify/mock" -import restore "github.com/heptio/ark/pkg/restore" +import restore "github.com/heptio/velero/pkg/restore" // Manager is an autogenerated mock type for the Manager type type Manager struct { diff --git a/pkg/plugin/mocks/process_factory.go b/pkg/plugin/mocks/process_factory.go index 8ec4af37f9..1374fc5582 100644 --- a/pkg/plugin/mocks/process_factory.go +++ b/pkg/plugin/mocks/process_factory.go @@ -18,7 +18,7 @@ package mocks import logrus "github.com/sirupsen/logrus" import mock "github.com/stretchr/testify/mock" -import plugin "github.com/heptio/ark/pkg/plugin" +import plugin "github.com/heptio/velero/pkg/plugin" // ProcessFactory is an autogenerated mock type for the ProcessFactory type type ProcessFactory struct { diff --git a/pkg/plugin/object_store.go b/pkg/plugin/object_store.go index aac5f6a914..3d9214cb62 100644 --- a/pkg/plugin/object_store.go +++ b/pkg/plugin/object_store.go @@ -25,8 +25,8 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" - "github.com/heptio/ark/pkg/cloudprovider" - proto "github.com/heptio/ark/pkg/plugin/generated" + "github.com/heptio/velero/pkg/cloudprovider" + proto "github.com/heptio/velero/pkg/plugin/generated" ) const byteChunkSize = 16384 diff --git a/pkg/plugin/plugin_base_test.go b/pkg/plugin/plugin_base_test.go index 6d7eab2c86..557240b556 100644 --- a/pkg/plugin/plugin_base_test.go +++ b/pkg/plugin/plugin_base_test.go @@ -20,7 +20,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/heptio/ark/pkg/util/test" + "github.com/heptio/velero/pkg/util/test" ) func TestClientLogger(t *testing.T) { diff --git a/pkg/plugin/plugin_kinds.go b/pkg/plugin/plugin_kinds.go index 3891fce836..70d17de7ce 100644 --- a/pkg/plugin/plugin_kinds.go +++ b/pkg/plugin/plugin_kinds.go @@ -20,7 +20,7 @@ import ( ) // PluginKind is a type alias for a string that describes -// the kind of an Ark-supported plugin. +// the kind of a Velero-supported plugin. type PluginKind string // String returns the string for k. @@ -45,8 +45,8 @@ const ( PluginKindPluginLister PluginKind = "PluginLister" ) -// allPluginKinds contains all the valid plugin kinds that Ark supports, excluding PluginLister because that is not a -// kind that a developer would ever need to implement (it's handled by Ark and the Ark plugin library code). +// allPluginKinds contains all the valid plugin kinds that Velero supports, excluding PluginLister because that is not a +// kind that a developer would ever need to implement (it's handled by Velero and the Velero plugin library code). var allPluginKinds = sets.NewString( PluginKindObjectStore.String(), PluginKindBlockStore.String(), diff --git a/pkg/plugin/plugin_lister.go b/pkg/plugin/plugin_lister.go index eea77c8550..3f43c174a9 100644 --- a/pkg/plugin/plugin_lister.go +++ b/pkg/plugin/plugin_lister.go @@ -21,7 +21,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" - proto "github.com/heptio/ark/pkg/plugin/generated" + proto "github.com/heptio/velero/pkg/plugin/generated" ) // PluginIdenitifer uniquely identifies a plugin by command, kind, and name. diff --git a/pkg/plugin/registry.go b/pkg/plugin/registry.go index 07d7879ea4..4c79053c2e 100644 --- a/pkg/plugin/registry.go +++ b/pkg/plugin/registry.go @@ -23,7 +23,7 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/heptio/ark/pkg/util/filesystem" + "github.com/heptio/velero/pkg/util/filesystem" ) // Registry manages information about available plugins. @@ -75,7 +75,7 @@ func (r *registry) DiscoverPlugins() error { return err } - // Start by adding ark's internal plugins + // Start by adding velero's internal plugins commands := []string{os.Args[0]} // Then add the discovered plugin executables commands = append(commands, plugins...) diff --git a/pkg/plugin/registry_test.go b/pkg/plugin/registry_test.go index e36ee03d57..a2b9a556a3 100644 --- a/pkg/plugin/registry_test.go +++ b/pkg/plugin/registry_test.go @@ -24,7 +24,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/heptio/ark/pkg/util/test" + "github.com/heptio/velero/pkg/util/test" ) func TestNewRegistry(t *testing.T) { diff --git a/pkg/plugin/restartable_backup_item_action.go b/pkg/plugin/restartable_backup_item_action.go index 4a8cca0a29..1c66af6891 100644 --- a/pkg/plugin/restartable_backup_item_action.go +++ b/pkg/plugin/restartable_backup_item_action.go @@ -19,8 +19,8 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/backup" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/backup" ) // restartableBackupItemAction is a backup item action for a given implementation (such as "pod"). It is associated with diff --git a/pkg/plugin/restartable_backup_item_action_test.go b/pkg/plugin/restartable_backup_item_action_test.go index 299be2b80c..10dfecbd17 100644 --- a/pkg/plugin/restartable_backup_item_action_test.go +++ b/pkg/plugin/restartable_backup_item_action_test.go @@ -25,9 +25,9 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" - "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/backup" - "github.com/heptio/ark/pkg/backup/mocks" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/backup" + "github.com/heptio/velero/pkg/backup/mocks" ) func TestRestartableGetBackupItemAction(t *testing.T) { @@ -115,7 +115,7 @@ func TestRestartableBackupItemActionDelegatedFunctions(t *testing.T) { additionalItems := []backup.ResourceIdentifier{ { - GroupResource: schema.GroupResource{Group: "ark.heptio.com", Resource: "backups"}, + GroupResource: schema.GroupResource{Group: "velero.io", Resource: "backups"}, }, } diff --git a/pkg/plugin/restartable_block_store.go b/pkg/plugin/restartable_block_store.go index 533bcfae47..9ef86e6a9b 100644 --- a/pkg/plugin/restartable_block_store.go +++ b/pkg/plugin/restartable_block_store.go @@ -19,7 +19,7 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" - "github.com/heptio/ark/pkg/cloudprovider" + "github.com/heptio/velero/pkg/cloudprovider" ) // restartableBlockStore is an object store for a given implementation (such as "aws"). It is associated with diff --git a/pkg/plugin/restartable_block_store_test.go b/pkg/plugin/restartable_block_store_test.go index 8fc3ac629f..2c64eff22f 100644 --- a/pkg/plugin/restartable_block_store_test.go +++ b/pkg/plugin/restartable_block_store_test.go @@ -25,7 +25,7 @@ import ( "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "github.com/heptio/ark/pkg/cloudprovider/mocks" + "github.com/heptio/velero/pkg/cloudprovider/mocks" ) func TestRestartableGetBlockStore(t *testing.T) { diff --git a/pkg/plugin/restartable_object_store.go b/pkg/plugin/restartable_object_store.go index 94bd155e27..3684982dd0 100644 --- a/pkg/plugin/restartable_object_store.go +++ b/pkg/plugin/restartable_object_store.go @@ -21,7 +21,7 @@ import ( "github.com/pkg/errors" - "github.com/heptio/ark/pkg/cloudprovider" + "github.com/heptio/velero/pkg/cloudprovider" ) // restartableObjectStore is an object store for a given implementation (such as "aws"). It is associated with diff --git a/pkg/plugin/restartable_object_store_test.go b/pkg/plugin/restartable_object_store_test.go index 3c3b801601..f5146855e7 100644 --- a/pkg/plugin/restartable_object_store_test.go +++ b/pkg/plugin/restartable_object_store_test.go @@ -26,7 +26,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - cloudprovidermocks "github.com/heptio/ark/pkg/cloudprovider/mocks" + cloudprovidermocks "github.com/heptio/velero/pkg/cloudprovider/mocks" ) func TestRestartableGetObjectStore(t *testing.T) { diff --git a/pkg/plugin/restartable_restore_item_action.go b/pkg/plugin/restartable_restore_item_action.go index d70caaacdf..254cd4414a 100644 --- a/pkg/plugin/restartable_restore_item_action.go +++ b/pkg/plugin/restartable_restore_item_action.go @@ -19,8 +19,8 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/restore" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/restore" ) // restartableRestoreItemAction is a restore item action for a given implementation (such as "pod"). It is associated with diff --git a/pkg/plugin/restartable_restore_item_action_test.go b/pkg/plugin/restartable_restore_item_action_test.go index a7960b707e..a5fe9b5b3e 100644 --- a/pkg/plugin/restartable_restore_item_action_test.go +++ b/pkg/plugin/restartable_restore_item_action_test.go @@ -24,9 +24,9 @@ import ( "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/restore" - "github.com/heptio/ark/pkg/restore/mocks" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/restore" + "github.com/heptio/velero/pkg/restore/mocks" ) func TestRestartableGetRestoreItemAction(t *testing.T) { diff --git a/pkg/plugin/restore_item_action.go b/pkg/plugin/restore_item_action.go index 5c6f8bfeca..e6897faff7 100644 --- a/pkg/plugin/restore_item_action.go +++ b/pkg/plugin/restore_item_action.go @@ -19,16 +19,16 @@ package plugin import ( "encoding/json" - "github.com/hashicorp/go-plugin" + plugin "github.com/hashicorp/go-plugin" "github.com/pkg/errors" "golang.org/x/net/context" "google.golang.org/grpc" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - api "github.com/heptio/ark/pkg/apis/ark/v1" - proto "github.com/heptio/ark/pkg/plugin/generated" - "github.com/heptio/ark/pkg/restore" + api "github.com/heptio/velero/pkg/apis/velero/v1" + proto "github.com/heptio/velero/pkg/plugin/generated" + "github.com/heptio/velero/pkg/restore" ) // RestoreItemActionPlugin is an implementation of go-plugin's Plugin diff --git a/pkg/plugin/server_mux.go b/pkg/plugin/server_mux.go index 1aaeb72bb7..e1a1b1820d 100644 --- a/pkg/plugin/server_mux.go +++ b/pkg/plugin/server_mux.go @@ -21,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" ) -// HandlerInitializer is a function that initializes and returns a new instance of one of Ark's plugin interfaces +// HandlerInitializer is a function that initializes and returns a new instance of one of Velero's plugin interfaces // (ObjectStore, BlockStore, BackupItemAction, RestoreItemAction). type HandlerInitializer func(logger logrus.FieldLogger) (interface{}, error) diff --git a/pkg/podexec/pod_command_executor.go b/pkg/podexec/pod_command_executor.go index eaff18c934..08afe824d0 100644 --- a/pkg/podexec/pod_command_executor.go +++ b/pkg/podexec/pod_command_executor.go @@ -28,8 +28,8 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/util/collections" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/util/collections" ) const defaultTimeout = 30 * time.Second diff --git a/pkg/podexec/pod_command_executor_test.go b/pkg/podexec/pod_command_executor_test.go index 95f7921cb7..da8e00aa3d 100644 --- a/pkg/podexec/pod_command_executor_test.go +++ b/pkg/podexec/pod_command_executor_test.go @@ -33,8 +33,8 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/remotecommand" - "github.com/heptio/ark/pkg/apis/ark/v1" - arktest "github.com/heptio/ark/pkg/util/test" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + velerotest "github.com/heptio/velero/pkg/util/test" ) func TestNewPodCommandExecutor(t *testing.T) { @@ -82,7 +82,7 @@ func TestExecutePodCommandMissingInputs(t *testing.T) { }, { name: "container not found", - item: arktest.UnstructuredOrDie(`{"kind":"Pod","spec":{"containers":[{"name":"foo"}]}}`).Object, + item: velerotest.UnstructuredOrDie(`{"kind":"Pod","spec":{"containers":[{"name":"foo"}]}}`).Object, podNamespace: "ns", podName: "pod", hookName: "hook", @@ -92,7 +92,7 @@ func TestExecutePodCommandMissingInputs(t *testing.T) { }, { name: "command missing", - item: arktest.UnstructuredOrDie(`{"kind":"Pod","spec":{"containers":[{"name":"foo"}]}}`).Object, + item: velerotest.UnstructuredOrDie(`{"kind":"Pod","spec":{"containers":[{"name":"foo"}]}}`).Object, podNamespace: "ns", podName: "pod", hookName: "hook", @@ -105,7 +105,7 @@ func TestExecutePodCommandMissingInputs(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { e := &defaultPodCommandExecutor{} - err := e.ExecutePodCommand(arktest.NewLogger(), test.item, test.podNamespace, test.podName, test.hookName, test.hook) + err := e.ExecutePodCommand(velerotest.NewLogger(), test.item, test.podNamespace, test.podName, test.hookName, test.hook) assert.Error(t, err) }) } @@ -161,7 +161,7 @@ func TestExecutePodCommand(t *testing.T) { Timeout: metav1.Duration{Duration: test.timeout}, } - pod, err := arktest.GetAsMap(` + pod, err := velerotest.GetAsMap(` { "metadata": { "namespace": "namespace", @@ -209,7 +209,7 @@ func TestExecutePodCommand(t *testing.T) { } streamExecutor.On("Stream", expectedStreamOptions).Return(test.hookError) - err = podCommandExecutor.ExecutePodCommand(arktest.NewLogger(), pod, "namespace", "name", "hookName", &hook) + err = podCommandExecutor.ExecutePodCommand(velerotest.NewLogger(), pod, "namespace", "name", "hookName", &hook) if test.expectedError != "" { assert.EqualError(t, err, test.expectedError) return diff --git a/pkg/restic/backupper.go b/pkg/restic/backupper.go index 4a6aeaaa0a..1952fa9a7f 100644 --- a/pkg/restic/backupper.go +++ b/pkg/restic/backupper.go @@ -27,14 +27,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/util/boolptr" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/util/boolptr" ) // Backupper can execute restic backups of volumes in a pod. type Backupper interface { // BackupPodVolumes backs up all annotated volumes in a pod. - BackupPodVolumes(backup *arkv1api.Backup, pod *corev1api.Pod, log logrus.FieldLogger) (map[string]string, []error) + BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.Pod, log logrus.FieldLogger) (map[string]string, []error) } type backupper struct { @@ -42,7 +42,7 @@ type backupper struct { repoManager *repositoryManager repoEnsurer *repositoryEnsurer - results map[string]chan *arkv1api.PodVolumeBackup + results map[string]chan *velerov1api.PodVolumeBackup resultsLock sync.Mutex } @@ -58,15 +58,15 @@ func newBackupper( repoManager: repoManager, repoEnsurer: repoEnsurer, - results: make(map[string]chan *arkv1api.PodVolumeBackup), + results: make(map[string]chan *velerov1api.PodVolumeBackup), } podVolumeBackupInformer.AddEventHandler( cache.ResourceEventHandlerFuncs{ UpdateFunc: func(_, obj interface{}) { - pvb := obj.(*arkv1api.PodVolumeBackup) + pvb := obj.(*velerov1api.PodVolumeBackup) - if pvb.Status.Phase == arkv1api.PodVolumeBackupPhaseCompleted || pvb.Status.Phase == arkv1api.PodVolumeBackupPhaseFailed { + if pvb.Status.Phase == velerov1api.PodVolumeBackupPhaseCompleted || pvb.Status.Phase == velerov1api.PodVolumeBackupPhaseFailed { b.resultsLock.Lock() defer b.resultsLock.Unlock() @@ -88,7 +88,7 @@ func resultsKey(ns, name string) string { return fmt.Sprintf("%s/%s", ns, name) } -func (b *backupper) BackupPodVolumes(backup *arkv1api.Backup, pod *corev1api.Pod, log logrus.FieldLogger) (map[string]string, []error) { +func (b *backupper) BackupPodVolumes(backup *velerov1api.Backup, pod *corev1api.Pod, log logrus.FieldLogger) (map[string]string, []error) { // get volumes to backup from pod's annotations volumesToBackup := GetVolumesToBackup(pod) if len(volumesToBackup) == 0 { @@ -105,7 +105,7 @@ func (b *backupper) BackupPodVolumes(backup *arkv1api.Backup, pod *corev1api.Pod b.repoManager.repoLocker.Lock(repo.Name) defer b.repoManager.repoLocker.Unlock(repo.Name) - resultsChan := make(chan *arkv1api.PodVolumeBackup) + resultsChan := make(chan *velerov1api.PodVolumeBackup) b.resultsLock.Lock() b.results[resultsKey(pod.Namespace, pod.Name)] = resultsChan @@ -137,7 +137,7 @@ func (b *backupper) BackupPodVolumes(backup *arkv1api.Backup, pod *corev1api.Pod volumeBackup := newPodVolumeBackup(backup, pod, volumeName, repo.Spec.ResticIdentifier) - if err := errorOnly(b.repoManager.arkClient.ArkV1().PodVolumeBackups(volumeBackup.Namespace).Create(volumeBackup)); err != nil { + if err := errorOnly(b.repoManager.veleroClient.VeleroV1().PodVolumeBackups(volumeBackup.Namespace).Create(volumeBackup)); err != nil { errs = append(errs, err) continue } @@ -153,9 +153,9 @@ ForEachVolume: break ForEachVolume case res := <-resultsChan: switch res.Status.Phase { - case arkv1api.PodVolumeBackupPhaseCompleted: + case velerov1api.PodVolumeBackupPhaseCompleted: volumeSnapshots[res.Spec.Volume] = res.Status.SnapshotID - case arkv1api.PodVolumeBackupPhaseFailed: + case velerov1api.PodVolumeBackupPhaseFailed: errs = append(errs, errors.Errorf("pod volume backup failed: %s", res.Status.Message)) delete(volumeSnapshots, res.Spec.Volume) } @@ -183,14 +183,14 @@ func isHostPathVolume(podVolumes map[string]corev1api.Volume, volumeName string) return volume.HostPath != nil } -func newPodVolumeBackup(backup *arkv1api.Backup, pod *corev1api.Pod, volumeName, repoIdentifier string) *arkv1api.PodVolumeBackup { - return &arkv1api.PodVolumeBackup{ +func newPodVolumeBackup(backup *velerov1api.Backup, pod *corev1api.Pod, volumeName, repoIdentifier string) *velerov1api.PodVolumeBackup { + return &velerov1api.PodVolumeBackup{ ObjectMeta: metav1.ObjectMeta{ Namespace: backup.Namespace, GenerateName: backup.Name + "-", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: arkv1api.SchemeGroupVersion.String(), + APIVersion: velerov1api.SchemeGroupVersion.String(), Kind: "Backup", Name: backup.Name, UID: backup.UID, @@ -198,11 +198,11 @@ func newPodVolumeBackup(backup *arkv1api.Backup, pod *corev1api.Pod, volumeName, }, }, Labels: map[string]string{ - arkv1api.BackupNameLabel: backup.Name, - arkv1api.BackupUIDLabel: string(backup.UID), + velerov1api.BackupNameLabel: backup.Name, + velerov1api.BackupUIDLabel: string(backup.UID), }, }, - Spec: arkv1api.PodVolumeBackupSpec{ + Spec: velerov1api.PodVolumeBackupSpec{ Node: pod.Spec.NodeName, Pod: corev1api.ObjectReference{ Kind: "Pod", diff --git a/pkg/restic/command.go b/pkg/restic/command.go index ca50b57da8..266e1e4e8c 100644 --- a/pkg/restic/command.go +++ b/pkg/restic/command.go @@ -52,10 +52,10 @@ func (c *Command) StringSlice() []string { res = append(res, passwordFlag(c.PasswordFile)) } - // If ARK_SCRATCH_DIR is defined, put the restic cache within it. If not, + // If VELERO_SCRATCH_DIR is defined, put the restic cache within it. If not, // allow restic to choose the location. This makes running either in-cluster // or local (dev) work properly. - if scratch := os.Getenv("ARK_SCRATCH_DIR"); scratch != "" { + if scratch := os.Getenv("VELERO_SCRATCH_DIR"); scratch != "" { res = append(res, cacheDirFlag(filepath.Join(scratch, ".cache", "restic"))) } diff --git a/pkg/restic/command_factory.go b/pkg/restic/command_factory.go index 591d6f9d38..8e929e118b 100644 --- a/pkg/restic/command_factory.go +++ b/pkg/restic/command_factory.go @@ -34,7 +34,7 @@ func BackupCommand(repoIdentifier, passwordFile, path string, tags map[string]st PasswordFile: passwordFile, Dir: path, Args: []string{"."}, - ExtraFlags: append(backupTagFlags(tags), "--hostname=ark"), + ExtraFlags: append(backupTagFlags(tags), "--hostname=velero"), } } diff --git a/pkg/restic/command_factory_test.go b/pkg/restic/command_factory_test.go index 5e2714038e..7e8e342d1c 100644 --- a/pkg/restic/command_factory_test.go +++ b/pkg/restic/command_factory_test.go @@ -32,7 +32,7 @@ func TestBackupCommand(t *testing.T) { assert.Equal(t, "path", c.Dir) assert.Equal(t, []string{"."}, c.Args) - expected := []string{"--tag=foo=bar", "--tag=c=d", "--hostname=ark"} + expected := []string{"--tag=foo=bar", "--tag=c=d", "--hostname=velero"} sort.Strings(expected) sort.Strings(c.ExtraFlags) assert.Equal(t, expected, c.ExtraFlags) diff --git a/pkg/restic/command_test.go b/pkg/restic/command_test.go index bf2856c8d2..693f0adcbd 100644 --- a/pkg/restic/command_test.go +++ b/pkg/restic/command_test.go @@ -48,7 +48,7 @@ func TestStringSlice(t *testing.T) { ExtraFlags: []string{"--foo=bar"}, } - require.NoError(t, os.Unsetenv("ARK_SCRATCH_DIR")) + require.NoError(t, os.Unsetenv("VELERO_SCRATCH_DIR")) assert.Equal(t, []string{ "restic", "cmd", @@ -59,7 +59,7 @@ func TestStringSlice(t *testing.T) { "--foo=bar", }, c.StringSlice()) - os.Setenv("ARK_SCRATCH_DIR", "/foo") + os.Setenv("VELERO_SCRATCH_DIR", "/foo") assert.Equal(t, []string{ "restic", "cmd", @@ -71,7 +71,7 @@ func TestStringSlice(t *testing.T) { "--foo=bar", }, c.StringSlice()) - require.NoError(t, os.Unsetenv("ARK_SCRATCH_DIR")) + require.NoError(t, os.Unsetenv("VELERO_SCRATCH_DIR")) } func TestString(t *testing.T) { @@ -84,7 +84,7 @@ func TestString(t *testing.T) { ExtraFlags: []string{"--foo=bar"}, } - require.NoError(t, os.Unsetenv("ARK_SCRATCH_DIR")) + require.NoError(t, os.Unsetenv("VELERO_SCRATCH_DIR")) assert.Equal(t, "restic cmd --repo=repo-id --password-file=/path/to/password-file arg-1 arg-2 --foo=bar", c.String()) } @@ -98,7 +98,7 @@ func TestCmd(t *testing.T) { ExtraFlags: []string{"--foo=bar"}, } - require.NoError(t, os.Unsetenv("ARK_SCRATCH_DIR")) + require.NoError(t, os.Unsetenv("VELERO_SCRATCH_DIR")) execCmd := c.Cmd() assert.Equal(t, c.StringSlice(), execCmd.Args) diff --git a/pkg/restic/common.go b/pkg/restic/common.go index f7c1a342f4..e230ff2ddb 100644 --- a/pkg/restic/common.go +++ b/pkg/restic/common.go @@ -27,10 +27,10 @@ import ( "k8s.io/apimachinery/pkg/labels" corev1listers "k8s.io/client-go/listers/core/v1" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/cloudprovider/azure" - arkv1listers "github.com/heptio/ark/pkg/generated/listers/ark/v1" - "github.com/heptio/ark/pkg/util/filesystem" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/cloudprovider/azure" + velerov1listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" + "github.com/heptio/velero/pkg/util/filesystem" ) const ( @@ -38,8 +38,12 @@ const ( InitContainer = "restic-wait" DefaultMaintenanceFrequency = 24 * time.Hour - podAnnotationPrefix = "snapshot.ark.heptio.com/" - volumesToBackupAnnotation = "backup.ark.heptio.com/backup-volumes" + podAnnotationPrefix = "snapshot.velero.io/" + volumesToBackupAnnotation = "backup.velero.io/backup-volumes" + + // TODO(1.0) remove both legacy annotations + podAnnotationLegacyPrefix = "snapshot.ark.heptio.com/" + volumesToBackupLegacyAnnotation = "backup.ark.heptio.com/backup-volumes" ) // PodHasSnapshotAnnotation returns true if the object has an annotation @@ -50,6 +54,11 @@ func PodHasSnapshotAnnotation(obj metav1.Object) bool { if strings.HasPrefix(key, podAnnotationPrefix) { return true } + + // TODO(1.0): remove if statement & contents + if strings.HasPrefix(key, podAnnotationLegacyPrefix) { + return true + } } return false @@ -60,13 +69,26 @@ func PodHasSnapshotAnnotation(obj metav1.Object) bool { func GetPodSnapshotAnnotations(obj metav1.Object) map[string]string { var res map[string]string + insertSafe := func(k, v string) { + if res == nil { + res = make(map[string]string) + } + res[k] = v + } + for k, v := range obj.GetAnnotations() { if strings.HasPrefix(k, podAnnotationPrefix) { - if res == nil { - res = make(map[string]string) - } + insertSafe(k[len(podAnnotationPrefix):], v) + } + + if strings.HasPrefix(k, podAnnotationLegacyPrefix) { + volume := k[len(podAnnotationLegacyPrefix):] - res[k[len(podAnnotationPrefix):]] = v + // if it has the legacy prefix, only use it if there's not + // already a value in res for the volume + if _, ok := res[volume]; !ok { + insertSafe(volume, v) + } } } @@ -95,7 +117,12 @@ func GetVolumesToBackup(obj metav1.Object) []string { return nil } - backupsValue := annotations[volumesToBackupAnnotation] + backupsValue, ok := annotations[volumesToBackupAnnotation] + // TODO(1.0) remove the following if statement & contents + if !ok { + backupsValue = annotations[volumesToBackupLegacyAnnotation] + } + if backupsValue == "" { return nil } @@ -104,7 +131,7 @@ func GetVolumesToBackup(obj metav1.Object) []string { } // SnapshotIdentifier uniquely identifies a restic snapshot -// taken by Ark. +// taken by Velero. type SnapshotIdentifier struct { // VolumeNamespace is the namespace of the pod/volume that // the restic snapshot is for. @@ -119,10 +146,10 @@ type SnapshotIdentifier struct { } // GetSnapshotsInBackup returns a list of all restic snapshot ids associated with -// a given Ark backup. -func GetSnapshotsInBackup(backup *arkv1api.Backup, podVolumeBackupLister arkv1listers.PodVolumeBackupLister) ([]SnapshotIdentifier, error) { +// a given Velero backup. +func GetSnapshotsInBackup(backup *velerov1api.Backup, podVolumeBackupLister velerov1listers.PodVolumeBackupLister) ([]SnapshotIdentifier, error) { selector := labels.Set(map[string]string{ - arkv1api.BackupNameLabel: backup.Name, + velerov1api.BackupNameLabel: backup.Name, }).AsSelector() podVolumeBackups, err := podVolumeBackupLister.List(selector) @@ -149,14 +176,14 @@ func GetSnapshotsInBackup(backup *arkv1api.Backup, podVolumeBackupLister arkv1li // encryption key for the given repo and returns its path. The // caller should generally call os.Remove() to remove the file // when done with it. -func TempCredentialsFile(secretLister corev1listers.SecretLister, arkNamespace, repoName string, fs filesystem.Interface) (string, error) { +func TempCredentialsFile(secretLister corev1listers.SecretLister, veleroNamespace, repoName string, fs filesystem.Interface) (string, error) { secretGetter := NewListerSecretGetter(secretLister) // For now, all restic repos share the same key so we don't need the repoName to fetch it. // When we move to full-backup encryption, we'll likely have a separate key per restic repo - // (all within the Ark server's namespace) so GetRepositoryKey will need to take the repo + // (all within the Velero server's namespace) so GetRepositoryKey will need to take the repo // name as an argument as well. - repoKey, err := GetRepositoryKey(secretGetter, arkNamespace) + repoKey, err := GetRepositoryKey(secretGetter, veleroNamespace) if err != nil { return "", err } @@ -183,18 +210,18 @@ func TempCredentialsFile(secretLister corev1listers.SecretLister, arkNamespace, } // NewPodVolumeBackupListOptions creates a ListOptions with a label selector configured to -// find PodVolumeBackups for the backup identified by name and uid. -func NewPodVolumeBackupListOptions(name, uid string) metav1.ListOptions { +// find PodVolumeBackups for the backup identified by name. +func NewPodVolumeBackupListOptions(name string) metav1.ListOptions { return metav1.ListOptions{ - LabelSelector: fmt.Sprintf("%s=%s,%s=%s", arkv1api.BackupNameLabel, name, arkv1api.BackupUIDLabel, uid), + LabelSelector: fmt.Sprintf("%s=%s", velerov1api.BackupNameLabel, name), } } // NewPodVolumeRestoreListOptions creates a ListOptions with a label selector configured to -// find PodVolumeRestores for the restore identified by name and uid. -func NewPodVolumeRestoreListOptions(name, uid string) metav1.ListOptions { +// find PodVolumeRestores for the restore identified by name. +func NewPodVolumeRestoreListOptions(name string) metav1.ListOptions { return metav1.ListOptions{ - LabelSelector: fmt.Sprintf("%s=%s,%s=%s", arkv1api.RestoreNameLabel, name, arkv1api.RestoreUIDLabel, uid), + LabelSelector: fmt.Sprintf("%s=%s", velerov1api.RestoreNameLabel, name), } } @@ -202,7 +229,7 @@ func NewPodVolumeRestoreListOptions(name, uid string) metav1.ListOptions { // should be used when running a restic command for an Azure backend. This list is // the current environment, plus the Azure-specific variables restic needs, namely // a storage account name and key. -func AzureCmdEnv(backupLocationLister arkv1listers.BackupStorageLocationLister, namespace, backupLocation string) ([]string, error) { +func AzureCmdEnv(backupLocationLister velerov1listers.BackupStorageLocationLister, namespace, backupLocation string) ([]string, error) { loc, err := backupLocationLister.BackupStorageLocations(namespace).Get(backupLocation) if err != nil { return nil, errors.Wrap(err, "error getting backup storage location") diff --git a/pkg/restic/common_test.go b/pkg/restic/common_test.go index 5b42ab1f50..86625a4709 100644 --- a/pkg/restic/common_test.go +++ b/pkg/restic/common_test.go @@ -27,10 +27,10 @@ import ( corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/generated/clientset/versioned/fake" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions" - arktest "github.com/heptio/ark/pkg/util/test" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions" + velerotest "github.com/heptio/velero/pkg/util/test" ) func TestPodHasSnapshotAnnotation(t *testing.T) { @@ -69,6 +69,16 @@ func TestPodHasSnapshotAnnotation(t *testing.T) { annotations: map[string]string{"foo": "bar", podAnnotationPrefix + "foo": "bar"}, expected: true, }, + { + name: "has legacy snapshot annotation only, with suffix", + annotations: map[string]string{podAnnotationLegacyPrefix + "foo": "bar"}, + expected: true, + }, + { + name: "has legacy and current snapshot annotations, with suffixes", + annotations: map[string]string{podAnnotationPrefix + "curr": "baz", podAnnotationLegacyPrefix + "foo": "bar"}, + expected: true, + }, } for _, test := range tests { @@ -116,6 +126,20 @@ func TestGetPodSnapshotAnnotations(t *testing.T) { annotations: map[string]string{"x": "y", podAnnotationPrefix + "foo": "bar", podAnnotationPrefix + "abc": "123"}, expected: map[string]string{"foo": "bar", "abc": "123"}, }, + { + name: "has legacy snapshot annotation only", + annotations: map[string]string{podAnnotationLegacyPrefix + "foo": "bar"}, + expected: map[string]string{"foo": "bar"}, + }, + { + name: "when current and legacy snapshot annotations exist, current wins", + annotations: map[string]string{ + podAnnotationPrefix + "foo": "current", + podAnnotationLegacyPrefix + "foo": "legacy", + podAnnotationLegacyPrefix + "bar": "baz", + }, + expected: map[string]string{"foo": "current", "bar": "baz"}, + }, } for _, test := range tests { @@ -195,6 +219,16 @@ func TestGetVolumesToBackup(t *testing.T) { annotations: map[string]string{"foo": "bar", volumesToBackupAnnotation: "volume-1,volume-2,volume-3"}, expected: []string{"volume-1", "volume-2", "volume-3"}, }, + { + name: "legacy annotation", + annotations: map[string]string{"foo": "bar", volumesToBackupLegacyAnnotation: "volume-1"}, + expected: []string{"volume-1"}, + }, + { + name: "when legacy and current annotations are both specified, current wins", + annotations: map[string]string{volumesToBackupAnnotation: "current", volumesToBackupLegacyAnnotation: "legacy"}, + expected: []string{"current"}, + }, } for _, test := range tests { @@ -216,7 +250,7 @@ func TestGetVolumesToBackup(t *testing.T) { func TestGetSnapshotsInBackup(t *testing.T) { tests := []struct { name string - podVolumeBackups []arkv1api.PodVolumeBackup + podVolumeBackups []velerov1api.PodVolumeBackup expected []SnapshotIdentifier }{ { @@ -226,61 +260,61 @@ func TestGetSnapshotsInBackup(t *testing.T) { }, { name: "no pod volume backups with matching label", - podVolumeBackups: []arkv1api.PodVolumeBackup{ + podVolumeBackups: []velerov1api.PodVolumeBackup{ { - ObjectMeta: metav1.ObjectMeta{Name: "foo", Labels: map[string]string{arkv1api.BackupNameLabel: "non-matching-backup-1"}}, - Spec: arkv1api.PodVolumeBackupSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-1"}}, + Spec: velerov1api.PodVolumeBackupSpec{ Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, }, - Status: arkv1api.PodVolumeBackupStatus{SnapshotID: "snap-1"}, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-1"}, }, { - ObjectMeta: metav1.ObjectMeta{Name: "bar", Labels: map[string]string{arkv1api.BackupNameLabel: "non-matching-backup-2"}}, - Spec: arkv1api.PodVolumeBackupSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "bar", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-2"}}, + Spec: velerov1api.PodVolumeBackupSpec{ Pod: corev1api.ObjectReference{Name: "pod-2", Namespace: "ns-2"}, }, - Status: arkv1api.PodVolumeBackupStatus{SnapshotID: "snap-2"}, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-2"}, }, }, expected: nil, }, { name: "some pod volume backups with matching label", - podVolumeBackups: []arkv1api.PodVolumeBackup{ + podVolumeBackups: []velerov1api.PodVolumeBackup{ { - ObjectMeta: metav1.ObjectMeta{Name: "foo", Labels: map[string]string{arkv1api.BackupNameLabel: "non-matching-backup-1"}}, - Spec: arkv1api.PodVolumeBackupSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "foo", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-1"}}, + Spec: velerov1api.PodVolumeBackupSpec{ Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, }, - Status: arkv1api.PodVolumeBackupStatus{SnapshotID: "snap-1"}, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-1"}, }, { - ObjectMeta: metav1.ObjectMeta{Name: "bar", Labels: map[string]string{arkv1api.BackupNameLabel: "non-matching-backup-2"}}, - Spec: arkv1api.PodVolumeBackupSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "bar", Labels: map[string]string{velerov1api.BackupNameLabel: "non-matching-backup-2"}}, + Spec: velerov1api.PodVolumeBackupSpec{ Pod: corev1api.ObjectReference{Name: "pod-2", Namespace: "ns-2"}, }, - Status: arkv1api.PodVolumeBackupStatus{SnapshotID: "snap-2"}, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-2"}, }, { - ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb", Labels: map[string]string{arkv1api.BackupNameLabel: "backup-1"}}, - Spec: arkv1api.PodVolumeBackupSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}}, + Spec: velerov1api.PodVolumeBackupSpec{ Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, }, - Status: arkv1api.PodVolumeBackupStatus{SnapshotID: "snap-3"}, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-3"}, }, { - ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb-2", Labels: map[string]string{arkv1api.BackupNameLabel: "backup-1"}}, - Spec: arkv1api.PodVolumeBackupSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "completed-pvb-2", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}}, + Spec: velerov1api.PodVolumeBackupSpec{ Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-1"}, }, - Status: arkv1api.PodVolumeBackupStatus{SnapshotID: "snap-4"}, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: "snap-4"}, }, { - ObjectMeta: metav1.ObjectMeta{Name: "incomplete-or-failed-pvb", Labels: map[string]string{arkv1api.BackupNameLabel: "backup-1"}}, - Spec: arkv1api.PodVolumeBackupSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "incomplete-or-failed-pvb", Labels: map[string]string{velerov1api.BackupNameLabel: "backup-1"}}, + Spec: velerov1api.PodVolumeBackupSpec{ Pod: corev1api.ObjectReference{Name: "pod-1", Namespace: "ns-2"}, }, - Status: arkv1api.PodVolumeBackupStatus{SnapshotID: ""}, + Status: velerov1api.PodVolumeBackupStatus{SnapshotID: ""}, }, }, expected: []SnapshotIdentifier{ @@ -301,17 +335,17 @@ func TestGetSnapshotsInBackup(t *testing.T) { var ( client = fake.NewSimpleClientset() sharedInformers = informers.NewSharedInformerFactory(client, 0) - pvbInformer = sharedInformers.Ark().V1().PodVolumeBackups() - arkBackup = &arkv1api.Backup{} + pvbInformer = sharedInformers.Velero().V1().PodVolumeBackups() + veleroBackup = &velerov1api.Backup{} ) - arkBackup.Name = "backup-1" + veleroBackup.Name = "backup-1" for _, pvb := range test.podVolumeBackups { require.NoError(t, pvbInformer.Informer().GetStore().Add(pvb.DeepCopy())) } - res, err := GetSnapshotsInBackup(arkBackup, pvbInformer.Lister()) + res, err := GetSnapshotsInBackup(veleroBackup, pvbInformer.Lister()) assert.NoError(t, err) // sort to ensure good compare of slices @@ -337,10 +371,10 @@ func TestTempCredentialsFile(t *testing.T) { var ( secretInformer = cache.NewSharedIndexInformer(nil, new(corev1api.Secret), 0, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) secretLister = corev1listers.NewSecretLister(secretInformer.GetIndexer()) - fs = arktest.NewFakeFileSystem() + fs = velerotest.NewFakeFileSystem() secret = &corev1api.Secret{ ObjectMeta: metav1.ObjectMeta{ - Namespace: "heptio-ark", + Namespace: "velero", Name: CredentialsSecretName, }, Data: map[string][]byte{ @@ -350,14 +384,14 @@ func TestTempCredentialsFile(t *testing.T) { ) // secret not in lister: expect an error - fileName, err := TempCredentialsFile(secretLister, "heptio-ark", "default", fs) + fileName, err := TempCredentialsFile(secretLister, "velero", "default", fs) assert.Error(t, err) // now add secret to lister require.NoError(t, secretInformer.GetStore().Add(secret)) // secret in lister: expect temp file to be created with password - fileName, err = TempCredentialsFile(secretLister, "heptio-ark", "default", fs) + fileName, err = TempCredentialsFile(secretLister, "velero", "default", fs) require.NoError(t, err) contents, err := fs.ReadFile(fileName) diff --git a/pkg/restic/config.go b/pkg/restic/config.go index 7d3e417324..4ee77eed77 100644 --- a/pkg/restic/config.go +++ b/pkg/restic/config.go @@ -21,9 +21,9 @@ import ( "path" "strings" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/cloudprovider/aws" - "github.com/heptio/ark/pkg/persistence" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/cloudprovider/aws" + "github.com/heptio/velero/pkg/persistence" ) type BackendType string @@ -40,7 +40,7 @@ var getAWSBucketRegion = aws.GetBucketRegion // getRepoPrefix returns the prefix of the value of the --repo flag for // restic commands, i.e. everything except the "/". -func getRepoPrefix(location *arkv1api.BackupStorageLocation) string { +func getRepoPrefix(location *velerov1api.BackupStorageLocation) string { var provider, bucket, prefix, bucketAndPrefix string if location.Spec.ObjectStorage != nil { @@ -80,7 +80,7 @@ func getRepoPrefix(location *arkv1api.BackupStorageLocation) string { // GetRepoIdentifier returns the string to be used as the value of the --repo flag in // restic commands for the given repository. -func GetRepoIdentifier(location *arkv1api.BackupStorageLocation, name string) string { +func GetRepoIdentifier(location *velerov1api.BackupStorageLocation, name string) string { prefix := getRepoPrefix(location) return fmt.Sprintf("%s/%s", strings.TrimSuffix(prefix, "/"), name) diff --git a/pkg/restic/config_test.go b/pkg/restic/config_test.go index 31ff68140c..c0e327662c 100644 --- a/pkg/restic/config_test.go +++ b/pkg/restic/config_test.go @@ -22,7 +22,7 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/assert" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" ) func TestGetRepoIdentifier(t *testing.T) { @@ -31,11 +31,11 @@ func TestGetRepoIdentifier(t *testing.T) { return "", errors.New("no region found") } - backupLocation := &arkv1api.BackupStorageLocation{ - Spec: arkv1api.BackupStorageLocationSpec{ + backupLocation := &velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ Provider: "aws", - StorageType: arkv1api.StorageType{ - ObjectStorage: &arkv1api.ObjectStorageLocation{ + StorageType: velerov1api.StorageType{ + ObjectStorage: &velerov1api.ObjectStorageLocation{ Bucket: "bucket", Prefix: "prefix", }, @@ -49,11 +49,11 @@ func TestGetRepoIdentifier(t *testing.T) { return "us-west-2", nil } - backupLocation = &arkv1api.BackupStorageLocation{ - Spec: arkv1api.BackupStorageLocationSpec{ + backupLocation = &velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ Provider: "aws", - StorageType: arkv1api.StorageType{ - ObjectStorage: &arkv1api.ObjectStorageLocation{ + StorageType: velerov1api.StorageType{ + ObjectStorage: &velerov1api.ObjectStorageLocation{ Bucket: "bucket", }, }, @@ -61,11 +61,11 @@ func TestGetRepoIdentifier(t *testing.T) { } assert.Equal(t, "s3:s3-us-west-2.amazonaws.com/bucket/restic/repo-1", GetRepoIdentifier(backupLocation, "repo-1")) - backupLocation = &arkv1api.BackupStorageLocation{ - Spec: arkv1api.BackupStorageLocationSpec{ + backupLocation = &velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ Provider: "aws", - StorageType: arkv1api.StorageType{ - ObjectStorage: &arkv1api.ObjectStorageLocation{ + StorageType: velerov1api.StorageType{ + ObjectStorage: &velerov1api.ObjectStorageLocation{ Bucket: "bucket", Prefix: "prefix", }, @@ -74,14 +74,14 @@ func TestGetRepoIdentifier(t *testing.T) { } assert.Equal(t, "s3:s3-us-west-2.amazonaws.com/bucket/prefix/restic/repo-1", GetRepoIdentifier(backupLocation, "repo-1")) - backupLocation = &arkv1api.BackupStorageLocation{ - Spec: arkv1api.BackupStorageLocationSpec{ + backupLocation = &velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ Provider: "aws", Config: map[string]string{ "s3Url": "alternate-url", }, - StorageType: arkv1api.StorageType{ - ObjectStorage: &arkv1api.ObjectStorageLocation{ + StorageType: velerov1api.StorageType{ + ObjectStorage: &velerov1api.ObjectStorageLocation{ Bucket: "bucket", Prefix: "prefix", }, @@ -90,11 +90,11 @@ func TestGetRepoIdentifier(t *testing.T) { } assert.Equal(t, "s3:alternate-url/bucket/prefix/restic/repo-1", GetRepoIdentifier(backupLocation, "repo-1")) - backupLocation = &arkv1api.BackupStorageLocation{ - Spec: arkv1api.BackupStorageLocationSpec{ + backupLocation = &velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ Provider: "azure", - StorageType: arkv1api.StorageType{ - ObjectStorage: &arkv1api.ObjectStorageLocation{ + StorageType: velerov1api.StorageType{ + ObjectStorage: &velerov1api.ObjectStorageLocation{ Bucket: "bucket", Prefix: "prefix", }, @@ -103,11 +103,11 @@ func TestGetRepoIdentifier(t *testing.T) { } assert.Equal(t, "azure:bucket:/prefix/restic/repo-1", GetRepoIdentifier(backupLocation, "repo-1")) - backupLocation = &arkv1api.BackupStorageLocation{ - Spec: arkv1api.BackupStorageLocationSpec{ + backupLocation = &velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ Provider: "gcp", - StorageType: arkv1api.StorageType{ - ObjectStorage: &arkv1api.ObjectStorageLocation{ + StorageType: velerov1api.StorageType{ + ObjectStorage: &velerov1api.ObjectStorageLocation{ Bucket: "bucket-2", Prefix: "prefix-2", }, diff --git a/pkg/restic/exec_commands.go b/pkg/restic/exec_commands.go index 751b429a38..ee55b62c15 100644 --- a/pkg/restic/exec_commands.go +++ b/pkg/restic/exec_commands.go @@ -21,7 +21,7 @@ import ( "github.com/pkg/errors" - "github.com/heptio/ark/pkg/util/exec" + "github.com/heptio/velero/pkg/util/exec" ) // GetSnapshotID runs a 'restic snapshots' command to get the ID of the snapshot diff --git a/pkg/restic/mocks/backupper.go b/pkg/restic/mocks/backupper.go index feb7167408..6065b3fb88 100644 --- a/pkg/restic/mocks/backupper.go +++ b/pkg/restic/mocks/backupper.go @@ -5,7 +5,7 @@ import corev1 "k8s.io/api/core/v1" import logrus "github.com/sirupsen/logrus" import mock "github.com/stretchr/testify/mock" -import v1 "github.com/heptio/ark/pkg/apis/ark/v1" +import v1 "github.com/heptio/velero/pkg/apis/velero/v1" // Backupper is an autogenerated mock type for the Backupper type type Backupper struct { diff --git a/pkg/restic/repository_ensurer.go b/pkg/restic/repository_ensurer.go index 3810c7ec22..d0d38a35a0 100644 --- a/pkg/restic/repository_ensurer.go +++ b/pkg/restic/repository_ensurer.go @@ -27,35 +27,35 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" - arkv1informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" - arkv1listers "github.com/heptio/ark/pkg/generated/listers/ark/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" + velerov1informers "github.com/heptio/velero/pkg/generated/informers/externalversions/velero/v1" + velerov1listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" ) -// repositoryEnsurer ensures that Ark restic repositories are created and ready. +// repositoryEnsurer ensures that Velero restic repositories are created and ready. type repositoryEnsurer struct { - repoLister arkv1listers.ResticRepositoryLister - repoClient arkv1client.ResticRepositoriesGetter + repoLister velerov1listers.ResticRepositoryLister + repoClient velerov1client.ResticRepositoriesGetter readyChansLock sync.Mutex - readyChans map[string]chan *arkv1api.ResticRepository + readyChans map[string]chan *velerov1api.ResticRepository } -func newRepositoryEnsurer(repoInformer arkv1informers.ResticRepositoryInformer, repoClient arkv1client.ResticRepositoriesGetter, log logrus.FieldLogger) *repositoryEnsurer { +func newRepositoryEnsurer(repoInformer velerov1informers.ResticRepositoryInformer, repoClient velerov1client.ResticRepositoriesGetter, log logrus.FieldLogger) *repositoryEnsurer { r := &repositoryEnsurer{ repoLister: repoInformer.Lister(), repoClient: repoClient, - readyChans: make(map[string]chan *arkv1api.ResticRepository), + readyChans: make(map[string]chan *velerov1api.ResticRepository), } repoInformer.Informer().AddEventHandler( cache.ResourceEventHandlerFuncs{ UpdateFunc: func(old, upd interface{}) { - oldObj := old.(*arkv1api.ResticRepository) - newObj := upd.(*arkv1api.ResticRepository) + oldObj := old.(*velerov1api.ResticRepository) + newObj := upd.(*velerov1api.ResticRepository) - if oldObj.Status.Phase != arkv1api.ResticRepositoryPhaseReady && newObj.Status.Phase == arkv1api.ResticRepositoryPhaseReady { + if oldObj.Status.Phase != velerov1api.ResticRepositoryPhaseReady && newObj.Status.Phase == velerov1api.ResticRepositoryPhaseReady { r.readyChansLock.Lock() defer r.readyChansLock.Unlock() @@ -78,12 +78,12 @@ func newRepositoryEnsurer(repoInformer arkv1informers.ResticRepositoryInformer, func repoLabels(volumeNamespace, backupLocation string) labels.Set { return map[string]string{ - arkv1api.ResticVolumeNamespaceLabel: volumeNamespace, - arkv1api.StorageLocationLabel: backupLocation, + velerov1api.ResticVolumeNamespaceLabel: volumeNamespace, + velerov1api.StorageLocationLabel: backupLocation, } } -func (r *repositoryEnsurer) EnsureRepo(ctx context.Context, namespace, volumeNamespace, backupLocation string) (*arkv1api.ResticRepository, error) { +func (r *repositoryEnsurer) EnsureRepo(ctx context.Context, namespace, volumeNamespace, backupLocation string) (*velerov1api.ResticRepository, error) { selector := labels.SelectorFromSet(repoLabels(volumeNamespace, backupLocation)) repos, err := r.repoLister.ResticRepositories(namespace).List(selector) @@ -94,7 +94,7 @@ func (r *repositoryEnsurer) EnsureRepo(ctx context.Context, namespace, volumeNam return nil, errors.Errorf("more than one ResticRepository found for workload namespace %q, backup storage location %q", volumeNamespace, backupLocation) } if len(repos) == 1 { - if repos[0].Status.Phase != arkv1api.ResticRepositoryPhaseReady { + if repos[0].Status.Phase != velerov1api.ResticRepositoryPhaseReady { return nil, errors.New("restic repository is not ready") } return repos[0], nil @@ -102,13 +102,13 @@ func (r *repositoryEnsurer) EnsureRepo(ctx context.Context, namespace, volumeNam // no repo found: create one and wait for it to be ready - repo := &arkv1api.ResticRepository{ + repo := &velerov1api.ResticRepository{ ObjectMeta: metav1.ObjectMeta{ Namespace: namespace, GenerateName: fmt.Sprintf("%s-%s-", volumeNamespace, backupLocation), Labels: repoLabels(volumeNamespace, backupLocation), }, - Spec: arkv1api.ResticRepositorySpec{ + Spec: velerov1api.ResticRepositorySpec{ VolumeNamespace: volumeNamespace, BackupStorageLocation: backupLocation, MaintenanceFrequency: metav1.Duration{Duration: DefaultMaintenanceFrequency}, @@ -130,10 +130,10 @@ func (r *repositoryEnsurer) EnsureRepo(ctx context.Context, namespace, volumeNam } } -func (r *repositoryEnsurer) getReadyChan(name string) chan *arkv1api.ResticRepository { +func (r *repositoryEnsurer) getReadyChan(name string) chan *velerov1api.ResticRepository { r.readyChansLock.Lock() defer r.readyChansLock.Unlock() - r.readyChans[name] = make(chan *arkv1api.ResticRepository) + r.readyChans[name] = make(chan *velerov1api.ResticRepository) return r.readyChans[name] } diff --git a/pkg/restic/repository_keys.go b/pkg/restic/repository_keys.go index 34d2d90144..2c37cddb6f 100644 --- a/pkg/restic/repository_keys.go +++ b/pkg/restic/repository_keys.go @@ -26,7 +26,7 @@ import ( ) const ( - CredentialsSecretName = "ark-restic-credentials" + CredentialsSecretName = "velero-restic-credentials" CredentialsKey = "repository-password" encryptionKey = "static-passw0rd" diff --git a/pkg/restic/repository_manager.go b/pkg/restic/repository_manager.go index 65e307d688..7342576044 100644 --- a/pkg/restic/repository_manager.go +++ b/pkg/restic/repository_manager.go @@ -28,25 +28,25 @@ import ( corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - clientset "github.com/heptio/ark/pkg/generated/clientset/versioned" - arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" - arkv1informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1" - arkv1listers "github.com/heptio/ark/pkg/generated/listers/ark/v1" - arkexec "github.com/heptio/ark/pkg/util/exec" - "github.com/heptio/ark/pkg/util/filesystem" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + clientset "github.com/heptio/velero/pkg/generated/clientset/versioned" + velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" + velerov1informers "github.com/heptio/velero/pkg/generated/informers/externalversions/velero/v1" + velerov1listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" + veleroexec "github.com/heptio/velero/pkg/util/exec" + "github.com/heptio/velero/pkg/util/filesystem" ) // RepositoryManager executes commands against restic repositories. type RepositoryManager interface { // InitRepo initializes a repo with the specified name and identifier. - InitRepo(repo *arkv1api.ResticRepository) error + InitRepo(repo *velerov1api.ResticRepository) error // CheckRepo checks the specified repo for errors. - CheckRepo(repo *arkv1api.ResticRepository) error + CheckRepo(repo *velerov1api.ResticRepository) error // PruneRepo deletes unused data from a repo. - PruneRepo(repo *arkv1api.ResticRepository) error + PruneRepo(repo *velerov1api.ResticRepository) error // Forget removes a snapshot from the list of // available snapshots in a repo. @@ -60,24 +60,24 @@ type RepositoryManager interface { // BackupperFactory can construct restic backuppers. type BackupperFactory interface { // NewBackupper returns a restic backupper for use during a single - // Ark backup. - NewBackupper(context.Context, *arkv1api.Backup) (Backupper, error) + // Velero backup. + NewBackupper(context.Context, *velerov1api.Backup) (Backupper, error) } // RestorerFactory can construct restic restorers. type RestorerFactory interface { // NewRestorer returns a restic restorer for use during a single - // Ark restore. - NewRestorer(context.Context, *arkv1api.Restore) (Restorer, error) + // Velero restore. + NewRestorer(context.Context, *velerov1api.Restore) (Restorer, error) } type repositoryManager struct { namespace string - arkClient clientset.Interface + veleroClient clientset.Interface secretsLister corev1listers.SecretLister - repoLister arkv1listers.ResticRepositoryLister + repoLister velerov1listers.ResticRepositoryLister repoInformerSynced cache.InformerSynced - backupLocationLister arkv1listers.BackupStorageLocationLister + backupLocationLister velerov1listers.BackupStorageLocationLister backupLocationInformerSynced cache.InformerSynced log logrus.FieldLogger repoLocker *repoLocker @@ -90,16 +90,16 @@ type repositoryManager struct { func NewRepositoryManager( ctx context.Context, namespace string, - arkClient clientset.Interface, + veleroClient clientset.Interface, secretsInformer cache.SharedIndexInformer, - repoInformer arkv1informers.ResticRepositoryInformer, - repoClient arkv1client.ResticRepositoriesGetter, - backupLocationInformer arkv1informers.BackupStorageLocationInformer, + repoInformer velerov1informers.ResticRepositoryInformer, + repoClient velerov1client.ResticRepositoriesGetter, + backupLocationInformer velerov1informers.BackupStorageLocationInformer, log logrus.FieldLogger, ) (RepositoryManager, error) { rm := &repositoryManager{ namespace: namespace, - arkClient: arkClient, + veleroClient: veleroClient, secretsLister: corev1listers.NewSecretLister(secretsInformer.GetIndexer()), repoLister: repoInformer.Lister(), repoInformerSynced: repoInformer.Informer().HasSynced, @@ -120,14 +120,14 @@ func NewRepositoryManager( return rm, nil } -func (rm *repositoryManager) NewBackupper(ctx context.Context, backup *arkv1api.Backup) (Backupper, error) { - informer := arkv1informers.NewFilteredPodVolumeBackupInformer( - rm.arkClient, +func (rm *repositoryManager) NewBackupper(ctx context.Context, backup *velerov1api.Backup) (Backupper, error) { + informer := velerov1informers.NewFilteredPodVolumeBackupInformer( + rm.veleroClient, backup.Namespace, 0, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, func(opts *metav1.ListOptions) { - opts.LabelSelector = fmt.Sprintf("%s=%s", arkv1api.BackupUIDLabel, backup.UID) + opts.LabelSelector = fmt.Sprintf("%s=%s", velerov1api.BackupUIDLabel, backup.UID) }, ) @@ -141,14 +141,14 @@ func (rm *repositoryManager) NewBackupper(ctx context.Context, backup *arkv1api. return b, nil } -func (rm *repositoryManager) NewRestorer(ctx context.Context, restore *arkv1api.Restore) (Restorer, error) { - informer := arkv1informers.NewFilteredPodVolumeRestoreInformer( - rm.arkClient, +func (rm *repositoryManager) NewRestorer(ctx context.Context, restore *velerov1api.Restore) (Restorer, error) { + informer := velerov1informers.NewFilteredPodVolumeRestoreInformer( + rm.veleroClient, restore.Namespace, 0, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, func(opts *metav1.ListOptions) { - opts.LabelSelector = fmt.Sprintf("%s=%s", arkv1api.RestoreUIDLabel, restore.UID) + opts.LabelSelector = fmt.Sprintf("%s=%s", velerov1api.RestoreUIDLabel, restore.UID) }, ) @@ -162,7 +162,7 @@ func (rm *repositoryManager) NewRestorer(ctx context.Context, restore *arkv1api. return r, nil } -func (rm *repositoryManager) InitRepo(repo *arkv1api.ResticRepository) error { +func (rm *repositoryManager) InitRepo(repo *velerov1api.ResticRepository) error { // restic init requires an exclusive lock rm.repoLocker.LockExclusive(repo.Name) defer rm.repoLocker.UnlockExclusive(repo.Name) @@ -170,7 +170,7 @@ func (rm *repositoryManager) InitRepo(repo *arkv1api.ResticRepository) error { return rm.exec(InitCommand(repo.Spec.ResticIdentifier), repo.Spec.BackupStorageLocation) } -func (rm *repositoryManager) CheckRepo(repo *arkv1api.ResticRepository) error { +func (rm *repositoryManager) CheckRepo(repo *velerov1api.ResticRepository) error { // restic check requires an exclusive lock rm.repoLocker.LockExclusive(repo.Name) defer rm.repoLocker.UnlockExclusive(repo.Name) @@ -178,7 +178,7 @@ func (rm *repositoryManager) CheckRepo(repo *arkv1api.ResticRepository) error { return rm.exec(CheckCommand(repo.Spec.ResticIdentifier), repo.Spec.BackupStorageLocation) } -func (rm *repositoryManager) PruneRepo(repo *arkv1api.ResticRepository) error { +func (rm *repositoryManager) PruneRepo(repo *velerov1api.ResticRepository) error { // restic prune requires an exclusive lock rm.repoLocker.LockExclusive(repo.Name) defer rm.repoLocker.UnlockExclusive(repo.Name) @@ -229,7 +229,7 @@ func (rm *repositoryManager) exec(cmd *Command, backupLocation string) error { cmd.Env = env } - stdout, stderr, err := arkexec.RunCommand(cmd.Cmd()) + stdout, stderr, err := veleroexec.RunCommand(cmd.Cmd()) rm.log.WithFields(logrus.Fields{ "repository": cmd.RepoName(), "command": cmd.String(), diff --git a/pkg/restic/restorer.go b/pkg/restic/restorer.go index 9cf29a8ca8..ea285a738a 100644 --- a/pkg/restic/restorer.go +++ b/pkg/restic/restorer.go @@ -26,14 +26,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/cache" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/util/boolptr" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/util/boolptr" ) // Restorer can execute restic restores of volumes in a pod. type Restorer interface { // RestorePodVolumes restores all annotated volumes in a pod. - RestorePodVolumes(restore *arkv1api.Restore, pod *corev1api.Pod, sourceNamespace, backupLocation string, log logrus.FieldLogger) []error + RestorePodVolumes(restore *velerov1api.Restore, pod *corev1api.Pod, sourceNamespace, backupLocation string, log logrus.FieldLogger) []error } type restorer struct { @@ -42,7 +42,7 @@ type restorer struct { repoEnsurer *repositoryEnsurer resultsLock sync.Mutex - results map[string]chan *arkv1api.PodVolumeRestore + results map[string]chan *velerov1api.PodVolumeRestore } func newRestorer( @@ -57,15 +57,15 @@ func newRestorer( repoManager: rm, repoEnsurer: repoEnsurer, - results: make(map[string]chan *arkv1api.PodVolumeRestore), + results: make(map[string]chan *velerov1api.PodVolumeRestore), } podVolumeRestoreInformer.AddEventHandler( cache.ResourceEventHandlerFuncs{ UpdateFunc: func(_, obj interface{}) { - pvr := obj.(*arkv1api.PodVolumeRestore) + pvr := obj.(*velerov1api.PodVolumeRestore) - if pvr.Status.Phase == arkv1api.PodVolumeRestorePhaseCompleted || pvr.Status.Phase == arkv1api.PodVolumeRestorePhaseFailed { + if pvr.Status.Phase == velerov1api.PodVolumeRestorePhaseCompleted || pvr.Status.Phase == velerov1api.PodVolumeRestorePhaseFailed { r.resultsLock.Lock() defer r.resultsLock.Unlock() @@ -83,7 +83,7 @@ func newRestorer( return r } -func (r *restorer) RestorePodVolumes(restore *arkv1api.Restore, pod *corev1api.Pod, sourceNamespace, backupLocation string, log logrus.FieldLogger) []error { +func (r *restorer) RestorePodVolumes(restore *velerov1api.Restore, pod *corev1api.Pod, sourceNamespace, backupLocation string, log logrus.FieldLogger) []error { // get volumes to restore from pod's annotations volumesToRestore := GetPodSnapshotAnnotations(pod) if len(volumesToRestore) == 0 { @@ -100,7 +100,7 @@ func (r *restorer) RestorePodVolumes(restore *arkv1api.Restore, pod *corev1api.P r.repoManager.repoLocker.Lock(repo.Name) defer r.repoManager.repoLocker.Unlock(repo.Name) - resultsChan := make(chan *arkv1api.PodVolumeRestore) + resultsChan := make(chan *velerov1api.PodVolumeRestore) r.resultsLock.Lock() r.results[resultsKey(pod.Namespace, pod.Name)] = resultsChan @@ -114,7 +114,7 @@ func (r *restorer) RestorePodVolumes(restore *arkv1api.Restore, pod *corev1api.P for volume, snapshot := range volumesToRestore { volumeRestore := newPodVolumeRestore(restore, pod, volume, snapshot, backupLocation, repo.Spec.ResticIdentifier) - if err := errorOnly(r.repoManager.arkClient.ArkV1().PodVolumeRestores(volumeRestore.Namespace).Create(volumeRestore)); err != nil { + if err := errorOnly(r.repoManager.veleroClient.VeleroV1().PodVolumeRestores(volumeRestore.Namespace).Create(volumeRestore)); err != nil { errs = append(errs, errors.WithStack(err)) continue } @@ -128,7 +128,7 @@ ForEachVolume: errs = append(errs, errors.New("timed out waiting for all PodVolumeRestores to complete")) break ForEachVolume case res := <-resultsChan: - if res.Status.Phase == arkv1api.PodVolumeRestorePhaseFailed { + if res.Status.Phase == velerov1api.PodVolumeRestorePhaseFailed { errs = append(errs, errors.Errorf("pod volume restore failed: %s", res.Status.Message)) } } @@ -141,14 +141,14 @@ ForEachVolume: return errs } -func newPodVolumeRestore(restore *arkv1api.Restore, pod *corev1api.Pod, volume, snapshot, backupLocation, repoIdentifier string) *arkv1api.PodVolumeRestore { - return &arkv1api.PodVolumeRestore{ +func newPodVolumeRestore(restore *velerov1api.Restore, pod *corev1api.Pod, volume, snapshot, backupLocation, repoIdentifier string) *velerov1api.PodVolumeRestore { + return &velerov1api.PodVolumeRestore{ ObjectMeta: metav1.ObjectMeta{ Namespace: restore.Namespace, GenerateName: restore.Name + "-", OwnerReferences: []metav1.OwnerReference{ { - APIVersion: arkv1api.SchemeGroupVersion.String(), + APIVersion: velerov1api.SchemeGroupVersion.String(), Kind: "Restore", Name: restore.Name, UID: restore.UID, @@ -156,12 +156,12 @@ func newPodVolumeRestore(restore *arkv1api.Restore, pod *corev1api.Pod, volume, }, }, Labels: map[string]string{ - arkv1api.RestoreNameLabel: restore.Name, - arkv1api.RestoreUIDLabel: string(restore.UID), - arkv1api.PodUIDLabel: string(pod.UID), + velerov1api.RestoreNameLabel: restore.Name, + velerov1api.RestoreUIDLabel: string(restore.UID), + velerov1api.PodUIDLabel: string(pod.UID), }, }, - Spec: arkv1api.PodVolumeRestoreSpec{ + Spec: velerov1api.PodVolumeRestoreSpec{ Pod: corev1api.ObjectReference{ Kind: "Pod", Namespace: pod.Namespace, diff --git a/pkg/restore/item_action.go b/pkg/restore/item_action.go index b5f6233f3d..eaf0526433 100644 --- a/pkg/restore/item_action.go +++ b/pkg/restore/item_action.go @@ -19,7 +19,7 @@ package restore import ( "k8s.io/apimachinery/pkg/runtime" - api "github.com/heptio/ark/pkg/apis/ark/v1" + api "github.com/heptio/velero/pkg/apis/velero/v1" ) // ItemAction is an actor that performs an operation on an individual item being restored. diff --git a/pkg/restore/job_action.go b/pkg/restore/job_action.go index a5229c4b09..5e8668fd1f 100644 --- a/pkg/restore/job_action.go +++ b/pkg/restore/job_action.go @@ -20,8 +20,8 @@ import ( "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/runtime" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/util/collections" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/util/collections" ) type jobAction struct { diff --git a/pkg/restore/job_action_test.go b/pkg/restore/job_action_test.go index 238b067b29..d0096b892f 100644 --- a/pkg/restore/job_action_test.go +++ b/pkg/restore/job_action_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/runtime" - arktest "github.com/heptio/ark/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/util/test" ) func TestJobActionExecute(t *testing.T) { @@ -122,7 +122,7 @@ func TestJobActionExecute(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - action := NewJobAction(arktest.NewLogger()) + action := NewJobAction(velerotest.NewLogger()) res, _, err := action.Execute(test.obj, nil) diff --git a/pkg/restore/merge_service_account.go b/pkg/restore/merge_service_account.go index dcbe030763..53b05ab75a 100644 --- a/pkg/restore/merge_service_account.go +++ b/pkg/restore/merge_service_account.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "github.com/heptio/ark/pkg/util/collections" + "github.com/heptio/velero/pkg/util/collections" ) // mergeServiceAccount takes a backed up serviceaccount and merges attributes into the current in-cluster service account. diff --git a/pkg/restore/merge_service_account_test.go b/pkg/restore/merge_service_account_test.go index 655ec205c1..0e5a044527 100644 --- a/pkg/restore/merge_service_account_test.go +++ b/pkg/restore/merge_service_account_test.go @@ -25,7 +25,7 @@ import ( corev1api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - arktest "github.com/heptio/ark/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/util/test" ) var mergedServiceAccountsBenchmarkResult *unstructured.Unstructured @@ -38,7 +38,7 @@ func BenchmarkMergeServiceAccountBasic(b *testing.B) { }{ { name: "only default tokens present", - fromCluster: arktest.UnstructuredOrDie( + fromCluster: velerotest.UnstructuredOrDie( `{ "apiVersion": "v1", "kind": "ServiceAccount", @@ -51,7 +51,7 @@ func BenchmarkMergeServiceAccountBasic(b *testing.B) { ] }`, ), - fromBackup: arktest.UnstructuredOrDie( + fromBackup: velerotest.UnstructuredOrDie( `{ "kind": "ServiceAccount", "apiVersion": "v1", @@ -67,7 +67,7 @@ func BenchmarkMergeServiceAccountBasic(b *testing.B) { }, { name: "service accounts with multiple secrets", - fromCluster: arktest.UnstructuredOrDie( + fromCluster: velerotest.UnstructuredOrDie( `{ "apiVersion": "v1", "kind": "ServiceAccount", @@ -83,7 +83,7 @@ func BenchmarkMergeServiceAccountBasic(b *testing.B) { }`, ), - fromBackup: arktest.UnstructuredOrDie( + fromBackup: velerotest.UnstructuredOrDie( `{ "kind": "ServiceAccount", "apiVersion": "v1", @@ -101,7 +101,7 @@ func BenchmarkMergeServiceAccountBasic(b *testing.B) { }, { name: "service accounts with labels and annotations", - fromCluster: arktest.UnstructuredOrDie( + fromCluster: velerotest.UnstructuredOrDie( `{ "apiVersion": "v1", "kind": "ServiceAccount", @@ -126,7 +126,7 @@ func BenchmarkMergeServiceAccountBasic(b *testing.B) { }`, ), - fromBackup: arktest.UnstructuredOrDie( + fromBackup: velerotest.UnstructuredOrDie( `{ "kind": "ServiceAccount", "apiVersion": "v1", @@ -325,7 +325,7 @@ func TestGeneratePatch(t *testing.T) { }{ { name: "objects are equal, no patch needed", - fromCluster: arktest.UnstructuredOrDie( + fromCluster: velerotest.UnstructuredOrDie( `{ "apiVersion": "v1", "kind": "ServiceAccount", @@ -338,7 +338,7 @@ func TestGeneratePatch(t *testing.T) { ] }`, ), - desired: arktest.UnstructuredOrDie( + desired: velerotest.UnstructuredOrDie( `{ "apiVersion": "v1", "kind": "ServiceAccount", @@ -356,7 +356,7 @@ func TestGeneratePatch(t *testing.T) { }, { name: "patch is required when labels are present", - fromCluster: arktest.UnstructuredOrDie( + fromCluster: velerotest.UnstructuredOrDie( `{ "apiVersion": "v1", "kind": "ServiceAccount", @@ -369,7 +369,7 @@ func TestGeneratePatch(t *testing.T) { ] }`, ), - desired: arktest.UnstructuredOrDie( + desired: velerotest.UnstructuredOrDie( `{ "apiVersion": "v1", "kind": "ServiceAccount", @@ -400,7 +400,7 @@ func TestGeneratePatch(t *testing.T) { }, { name: "patch is required when annotations are present", - fromCluster: arktest.UnstructuredOrDie( + fromCluster: velerotest.UnstructuredOrDie( `{ "apiVersion": "v1", "kind": "ServiceAccount", @@ -413,7 +413,7 @@ func TestGeneratePatch(t *testing.T) { ] }`, ), - desired: arktest.UnstructuredOrDie( + desired: velerotest.UnstructuredOrDie( `{ "apiVersion": "v1", "kind": "ServiceAccount", @@ -444,7 +444,7 @@ func TestGeneratePatch(t *testing.T) { }, { name: "patch is required many secrets are present", - fromCluster: arktest.UnstructuredOrDie( + fromCluster: velerotest.UnstructuredOrDie( `{ "apiVersion": "v1", "kind": "ServiceAccount", @@ -457,7 +457,7 @@ func TestGeneratePatch(t *testing.T) { ] }`, ), - desired: arktest.UnstructuredOrDie( + desired: velerotest.UnstructuredOrDie( `{ "apiVersion": "v1", "kind": "ServiceAccount", @@ -505,7 +505,7 @@ func TestMergeServiceAccountBasic(t *testing.T) { }{ { name: "only default token", - fromCluster: arktest.UnstructuredOrDie( + fromCluster: velerotest.UnstructuredOrDie( `{ "apiVersion": "v1", "kind": "ServiceAccount", @@ -520,7 +520,7 @@ func TestMergeServiceAccountBasic(t *testing.T) { ), // fromBackup doesn't have the default token because it is expected to already have been removed // by the service account action - fromBackup: arktest.UnstructuredOrDie( + fromBackup: velerotest.UnstructuredOrDie( `{ "kind": "ServiceAccount", "apiVersion": "v1", @@ -531,7 +531,7 @@ func TestMergeServiceAccountBasic(t *testing.T) { "secrets": [] }`, ), - expectedRes: arktest.UnstructuredOrDie( + expectedRes: velerotest.UnstructuredOrDie( `{ "apiVersion": "v1", "kind": "ServiceAccount", @@ -547,7 +547,7 @@ func TestMergeServiceAccountBasic(t *testing.T) { }, { name: "service accounts with multiple secrets", - fromCluster: arktest.UnstructuredOrDie( + fromCluster: velerotest.UnstructuredOrDie( `{ "apiVersion": "v1", "kind": "ServiceAccount", @@ -564,7 +564,7 @@ func TestMergeServiceAccountBasic(t *testing.T) { ), // fromBackup doesn't have the default token because it is expected to already have been removed // by the service account action - fromBackup: arktest.UnstructuredOrDie( + fromBackup: velerotest.UnstructuredOrDie( `{ "kind": "ServiceAccount", "apiVersion": "v1", @@ -578,7 +578,7 @@ func TestMergeServiceAccountBasic(t *testing.T) { ] }`, ), - expectedRes: arktest.UnstructuredOrDie( + expectedRes: velerotest.UnstructuredOrDie( `{ "apiVersion": "v1", "kind": "ServiceAccount", @@ -598,7 +598,7 @@ func TestMergeServiceAccountBasic(t *testing.T) { }, { name: "service accounts with labels and annotations", - fromCluster: arktest.UnstructuredOrDie( + fromCluster: velerotest.UnstructuredOrDie( `{ "apiVersion": "v1", "kind": "ServiceAccount", @@ -624,7 +624,7 @@ func TestMergeServiceAccountBasic(t *testing.T) { ), // fromBackup doesn't have the default token because it is expected to already have been removed // by the service account action - fromBackup: arktest.UnstructuredOrDie( + fromBackup: velerotest.UnstructuredOrDie( `{ "kind": "ServiceAccount", "apiVersion": "v1", @@ -650,7 +650,7 @@ func TestMergeServiceAccountBasic(t *testing.T) { "secrets": [] }`, ), - expectedRes: arktest.UnstructuredOrDie( + expectedRes: velerotest.UnstructuredOrDie( `{ "kind": "ServiceAccount", "apiVersion": "v1", diff --git a/pkg/restore/mocks/item_action.go b/pkg/restore/mocks/item_action.go index 39d8a51264..6edd047a94 100644 --- a/pkg/restore/mocks/item_action.go +++ b/pkg/restore/mocks/item_action.go @@ -17,9 +17,9 @@ limitations under the License. package mocks import mock "github.com/stretchr/testify/mock" -import restore "github.com/heptio/ark/pkg/restore" +import restore "github.com/heptio/velero/pkg/restore" import runtime "k8s.io/apimachinery/pkg/runtime" -import v1 "github.com/heptio/ark/pkg/apis/ark/v1" +import v1 "github.com/heptio/velero/pkg/apis/velero/v1" // ItemAction is an autogenerated mock type for the ItemAction type type ItemAction struct { diff --git a/pkg/restore/pod_action.go b/pkg/restore/pod_action.go index bcdf299de0..c7748c4129 100644 --- a/pkg/restore/pod_action.go +++ b/pkg/restore/pod_action.go @@ -22,8 +22,8 @@ import ( "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/runtime" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/util/collections" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/util/collections" ) type podAction struct { diff --git a/pkg/restore/pod_action_test.go b/pkg/restore/pod_action_test.go index 9c1077df62..1a5409000b 100644 --- a/pkg/restore/pod_action_test.go +++ b/pkg/restore/pod_action_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/runtime" - arktest "github.com/heptio/ark/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/util/test" ) func TestPodActionExecute(t *testing.T) { @@ -167,7 +167,7 @@ func TestPodActionExecute(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - action := NewPodAction(arktest.NewLogger()) + action := NewPodAction(velerotest.NewLogger()) res, warning, err := action.Execute(test.obj, nil) diff --git a/pkg/restore/restic_restore_action.go b/pkg/restore/restic_restore_action.go index 45e4bfe81c..e82e1613b1 100644 --- a/pkg/restore/restic_restore_action.go +++ b/pkg/restore/restic_restore_action.go @@ -25,10 +25,10 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/buildinfo" - "github.com/heptio/ark/pkg/restic" - "github.com/heptio/ark/pkg/util/kube" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/buildinfo" + "github.com/heptio/velero/pkg/restic" + "github.com/heptio/velero/pkg/util/kube" ) type resticRestoreAction struct { @@ -50,7 +50,7 @@ func initContainerImage() string { } // TODO allow full image URL to be overriden via CLI flag. - return fmt.Sprintf("gcr.io/heptio-images/ark-restic-restore-helper:%s", tag) + return fmt.Sprintf("gcr.io/heptio-images/velero-restic-restore-helper:%s", tag) } func (a *resticRestoreAction) AppliesTo() (ResourceSelector, error) { diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index e660142cb8..3f0ce759e9 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -32,7 +32,7 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -45,19 +45,19 @@ import ( "k8s.io/apimachinery/pkg/watch" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/client" - "github.com/heptio/ark/pkg/cloudprovider" - "github.com/heptio/ark/pkg/discovery" - listers "github.com/heptio/ark/pkg/generated/listers/ark/v1" - "github.com/heptio/ark/pkg/kuberesource" - "github.com/heptio/ark/pkg/restic" - "github.com/heptio/ark/pkg/util/boolptr" - "github.com/heptio/ark/pkg/util/collections" - "github.com/heptio/ark/pkg/util/filesystem" - "github.com/heptio/ark/pkg/util/kube" - arksync "github.com/heptio/ark/pkg/util/sync" - "github.com/heptio/ark/pkg/volume" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/client" + "github.com/heptio/velero/pkg/cloudprovider" + "github.com/heptio/velero/pkg/discovery" + listers "github.com/heptio/velero/pkg/generated/listers/velero/v1" + "github.com/heptio/velero/pkg/kuberesource" + "github.com/heptio/velero/pkg/restic" + "github.com/heptio/velero/pkg/util/boolptr" + "github.com/heptio/velero/pkg/util/collections" + "github.com/heptio/velero/pkg/util/filesystem" + "github.com/heptio/velero/pkg/util/kube" + velerosync "github.com/heptio/velero/pkg/util/sync" + "github.com/heptio/velero/pkg/volume" ) type BlockStoreGetter interface { @@ -339,7 +339,7 @@ type context struct { actions []resolvedAction blockStoreGetter BlockStoreGetter resticRestorer restic.Restorer - globalWaitGroup arksync.ErrorGroup + globalWaitGroup velerosync.ErrorGroup resourceWaitGroup sync.WaitGroup resourceWatches []watch.Interface pvsToProvision sets.String @@ -373,17 +373,17 @@ func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreRe resourcesDir := filepath.Join(dir, api.ResourcesDir) rde, err := ctx.fileSystem.DirExists(resourcesDir) if err != nil { - addArkError(&errs, err) + addVeleroError(&errs, err) return warnings, errs } if !rde { - addArkError(&errs, errors.New("backup does not contain top level resources directory")) + addVeleroError(&errs, errors.New("backup does not contain top level resources directory")) return warnings, errs } resourceDirs, err := ctx.fileSystem.ReadDir(resourcesDir) if err != nil { - addArkError(&errs, err) + addVeleroError(&errs, err) return warnings, errs } @@ -423,7 +423,7 @@ func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreRe clusterSubDir := filepath.Join(resourcePath, api.ClusterScopedDir) clusterSubDirExists, err := ctx.fileSystem.DirExists(clusterSubDir) if err != nil { - addArkError(&errs, err) + addVeleroError(&errs, err) return warnings, errs } if clusterSubDirExists { @@ -436,7 +436,7 @@ func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreRe nsSubDir := filepath.Join(resourcePath, api.NamespaceScopedDir) nsSubDirExists, err := ctx.fileSystem.DirExists(nsSubDir) if err != nil { - addArkError(&errs, err) + addVeleroError(&errs, err) return warnings, errs } if !nsSubDirExists { @@ -445,7 +445,7 @@ func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreRe nsDirs, err := ctx.fileSystem.ReadDir(nsSubDir) if err != nil { - addArkError(&errs, err) + addVeleroError(&errs, err) return warnings, errs } @@ -475,7 +475,7 @@ func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreRe logger := ctx.log.WithField("namespace", nsName) ns := getNamespace(logger, filepath.Join(dir, api.ResourcesDir, "namespaces", api.ClusterScopedDir, nsName+".json"), mappedNsName) if _, err := kube.EnsureNamespaceExists(ns, ctx.namespaceClient); err != nil { - addArkError(&errs, err) + addVeleroError(&errs, err) continue } @@ -501,10 +501,10 @@ func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreRe ctx.log.Debug("Done waiting on global wait group") for _, err := range waitErrs { - // TODO not ideal to be adding these to Ark-level errors + // TODO not ideal to be adding these to Velero-level errors // rather than a specific namespace, but don't have a way // to track the namespace right now. - errs.Ark = append(errs.Ark, err.Error()) + errs.Velero = append(errs.Velero, err.Error()) } return warnings, errs @@ -550,7 +550,7 @@ func getNamespace(logger logrus.FieldLogger, path, remappedName string) *v1.Name // by appending the corresponding lists to one another. func merge(a, b *api.RestoreResult) { a.Cluster = append(a.Cluster, b.Cluster...) - a.Ark = append(a.Ark, b.Ark...) + a.Velero = append(a.Velero, b.Velero...) for k, v := range b.Namespaces { if a.Namespaces == nil { a.Namespaces = make(map[string][]string) @@ -559,9 +559,9 @@ func merge(a, b *api.RestoreResult) { } } -// addArkError appends an error to the provided RestoreResult's Ark list. -func addArkError(r *api.RestoreResult, err error) { - r.Ark = append(r.Ark, err.Error()) +// addVeleroError appends an error to the provided RestoreResult's Velero list. +func addVeleroError(r *api.RestoreResult, err error) { + r.Velero = append(r.Velero, err.Error()) } // addToResult appends an error to the provided RestoreResult, either within @@ -659,7 +659,7 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a var err error resourceClient, err = ctx.dynamicFactory.ClientForGroupVersionResource(obj.GroupVersionKind().GroupVersion(), resource, namespace) if err != nil { - addArkError(&errs, fmt.Errorf("error getting resource client for namespace %q, resource %q: %v", namespace, &groupResource, err)) + addVeleroError(&errs, fmt.Errorf("error getting resource client for namespace %q, resource %q: %v", namespace, &groupResource, err)) return warnings, errs } } @@ -721,7 +721,7 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a if _, err := waitForReady(resourceWatch.ResultChan(), name, isPVReady, time.Minute, ctx.log); err != nil { ctx.log.Warnf("Timeout reached waiting for persistent volume %s to become ready", name) - addArkError(&warnings, fmt.Errorf("timeout reached waiting for persistent volume %s to become ready", name)) + addVeleroError(&warnings, fmt.Errorf("timeout reached waiting for persistent volume %s to become ready", name)) } }() } diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index 5d35dc2a43..c8f8eb2dae 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -26,7 +26,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -38,16 +38,16 @@ import ( "k8s.io/client-go/kubernetes/scheme" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/cloudprovider" - cloudprovidermocks "github.com/heptio/ark/pkg/cloudprovider/mocks" - "github.com/heptio/ark/pkg/generated/clientset/versioned/fake" - informers "github.com/heptio/ark/pkg/generated/informers/externalversions" - "github.com/heptio/ark/pkg/kuberesource" - "github.com/heptio/ark/pkg/util/collections" - "github.com/heptio/ark/pkg/util/logging" - arktest "github.com/heptio/ark/pkg/util/test" - "github.com/heptio/ark/pkg/volume" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/cloudprovider" + cloudprovidermocks "github.com/heptio/velero/pkg/cloudprovider/mocks" + "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" + informers "github.com/heptio/velero/pkg/generated/informers/externalversions" + "github.com/heptio/velero/pkg/kuberesource" + "github.com/heptio/velero/pkg/util/collections" + "github.com/heptio/velero/pkg/util/logging" + velerotest "github.com/heptio/velero/pkg/util/test" + "github.com/heptio/velero/pkg/volume" ) func TestPrioritizeResources(t *testing.T) { @@ -89,7 +89,7 @@ func TestPrioritizeResources(t *testing.T) { }, } - logger := arktest.NewLogger() + logger := velerotest.NewLogger() for _, test := range tests { t.Run(test.name, func(t *testing.T) { @@ -103,7 +103,7 @@ func TestPrioritizeResources(t *testing.T) { helperResourceList = append(helperResourceList, resourceList) } - helper := arktest.NewFakeDiscoveryHelper(true, nil) + helper := velerotest.NewFakeDiscoveryHelper(true, nil) helper.ResourceList = helperResourceList includesExcludes := collections.NewIncludesExcludes().Includes(test.includes...).Excludes(test.excludes...) @@ -127,7 +127,7 @@ func TestPrioritizeResources(t *testing.T) { func TestRestoreNamespaceFiltering(t *testing.T) { tests := []struct { name string - fileSystem *arktest.FakeFileSystem + fileSystem *velerotest.FakeFileSystem baseDir string restore *api.Restore expectedReadDirs []string @@ -135,7 +135,7 @@ func TestRestoreNamespaceFiltering(t *testing.T) { }{ { name: "namespacesToRestore having * restores all namespaces", - fileSystem: arktest.NewFakeFileSystem().WithDirectories("bak/resources/nodes/cluster", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"), + fileSystem: velerotest.NewFakeFileSystem().WithDirectories("bak/resources/nodes/cluster", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"), baseDir: "bak", restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}}}, expectedReadDirs: []string{"bak/resources", "bak/resources/nodes/cluster", "bak/resources/secrets/namespaces", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"}, @@ -146,7 +146,7 @@ func TestRestoreNamespaceFiltering(t *testing.T) { }, { name: "namespacesToRestore properly filters", - fileSystem: arktest.NewFakeFileSystem().WithDirectories("bak/resources/nodes/cluster", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"), + fileSystem: velerotest.NewFakeFileSystem().WithDirectories("bak/resources/nodes/cluster", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"), baseDir: "bak", restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"b", "c"}}}, expectedReadDirs: []string{"bak/resources", "bak/resources/nodes/cluster", "bak/resources/secrets/namespaces", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"}, @@ -157,7 +157,7 @@ func TestRestoreNamespaceFiltering(t *testing.T) { }, { name: "namespacesToRestore properly filters with exclusion filter", - fileSystem: arktest.NewFakeFileSystem().WithDirectories("bak/resources/nodes/cluster", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"), + fileSystem: velerotest.NewFakeFileSystem().WithDirectories("bak/resources/nodes/cluster", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"), baseDir: "bak", restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}, ExcludedNamespaces: []string{"a"}}}, expectedReadDirs: []string{"bak/resources", "bak/resources/nodes/cluster", "bak/resources/secrets/namespaces", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"}, @@ -168,7 +168,7 @@ func TestRestoreNamespaceFiltering(t *testing.T) { }, { name: "namespacesToRestore properly filters with inclusion & exclusion filters", - fileSystem: arktest.NewFakeFileSystem().WithDirectories("bak/resources/nodes/cluster", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"), + fileSystem: velerotest.NewFakeFileSystem().WithDirectories("bak/resources/nodes/cluster", "bak/resources/secrets/namespaces/a", "bak/resources/secrets/namespaces/b", "bak/resources/secrets/namespaces/c"), baseDir: "bak", restore: &api.Restore{ Spec: api.RestoreSpec{ @@ -186,7 +186,7 @@ func TestRestoreNamespaceFiltering(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - log := arktest.NewLogger() + log := velerotest.NewLogger() ctx := &context{ restore: test.restore, @@ -198,10 +198,10 @@ func TestRestoreNamespaceFiltering(t *testing.T) { warnings, errors := ctx.restoreFromDir(test.baseDir) - assert.Empty(t, warnings.Ark) + assert.Empty(t, warnings.Velero) assert.Empty(t, warnings.Cluster) assert.Empty(t, warnings.Namespaces) - assert.Empty(t, errors.Ark) + assert.Empty(t, errors.Velero) assert.Empty(t, errors.Cluster) assert.Empty(t, errors.Namespaces) assert.Equal(t, test.expectedReadDirs, test.fileSystem.ReadDirCalls) @@ -212,7 +212,7 @@ func TestRestoreNamespaceFiltering(t *testing.T) { func TestRestorePriority(t *testing.T) { tests := []struct { name string - fileSystem *arktest.FakeFileSystem + fileSystem *velerotest.FakeFileSystem restore *api.Restore baseDir string prioritizedResources []schema.GroupResource @@ -221,7 +221,7 @@ func TestRestorePriority(t *testing.T) { }{ { name: "cluster test", - fileSystem: arktest.NewFakeFileSystem().WithDirectory("bak/resources/a/cluster").WithDirectory("bak/resources/c/cluster"), + fileSystem: velerotest.NewFakeFileSystem().WithDirectory("bak/resources/a/cluster").WithDirectory("bak/resources/c/cluster"), baseDir: "bak", restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}}}, prioritizedResources: []schema.GroupResource{ @@ -233,7 +233,7 @@ func TestRestorePriority(t *testing.T) { }, { name: "resource priorities are applied", - fileSystem: arktest.NewFakeFileSystem().WithDirectory("bak/resources/a/cluster").WithDirectory("bak/resources/c/cluster"), + fileSystem: velerotest.NewFakeFileSystem().WithDirectory("bak/resources/a/cluster").WithDirectory("bak/resources/c/cluster"), restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}}}, baseDir: "bak", prioritizedResources: []schema.GroupResource{ @@ -245,7 +245,7 @@ func TestRestorePriority(t *testing.T) { }, { name: "basic namespace", - fileSystem: arktest.NewFakeFileSystem().WithDirectory("bak/resources/a/namespaces/ns-1").WithDirectory("bak/resources/c/namespaces/ns-1"), + fileSystem: velerotest.NewFakeFileSystem().WithDirectory("bak/resources/a/namespaces/ns-1").WithDirectory("bak/resources/c/namespaces/ns-1"), restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}}}, baseDir: "bak", prioritizedResources: []schema.GroupResource{ @@ -257,7 +257,7 @@ func TestRestorePriority(t *testing.T) { }, { name: "error in a single resource doesn't terminate restore immediately, but is returned", - fileSystem: arktest.NewFakeFileSystem(). + fileSystem: velerotest.NewFakeFileSystem(). WithFile("bak/resources/a/namespaces/ns-1/invalid-json.json", []byte("invalid json")). WithDirectory("bak/resources/c/namespaces/ns-1"), restore: &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}}}, @@ -278,7 +278,7 @@ func TestRestorePriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - log := arktest.NewLogger() + log := velerotest.NewLogger() ctx := &context{ restore: test.restore, @@ -290,7 +290,7 @@ func TestRestorePriority(t *testing.T) { warnings, errors := ctx.restoreFromDir(test.baseDir) - assert.Empty(t, warnings.Ark) + assert.Empty(t, warnings.Velero) assert.Empty(t, warnings.Cluster) assert.Empty(t, warnings.Namespaces) assert.Equal(t, test.expectedErrors, errors) @@ -306,20 +306,20 @@ func TestNamespaceRemapping(t *testing.T) { restore = &api.Restore{Spec: api.RestoreSpec{IncludedNamespaces: []string{"*"}, NamespaceMapping: map[string]string{"ns-1": "ns-2"}}} prioritizedResources = []schema.GroupResource{{Resource: "namespaces"}, {Resource: "configmaps"}} labelSelector = labels.NewSelector() - fileSystem = arktest.NewFakeFileSystem(). + fileSystem = velerotest.NewFakeFileSystem(). WithFile("bak/resources/configmaps/namespaces/ns-1/cm-1.json", newTestConfigMap().WithNamespace("ns-1").ToJSON()). WithFile("bak/resources/namespaces/cluster/ns-1.json", newTestNamespace("ns-1").ToJSON()) expectedNS = "ns-2" expectedObjs = toUnstructured(newTestConfigMap().WithNamespace("ns-2").ConfigMap) ) - resourceClient := &arktest.FakeDynamicClient{} + resourceClient := &velerotest.FakeDynamicClient{} for i := range expectedObjs { addRestoreLabels(&expectedObjs[i], "", "") resourceClient.On("Create", &expectedObjs[i]).Return(&expectedObjs[i], nil) } - dynamicFactory := &arktest.FakeDynamicFactory{} + dynamicFactory := &velerotest.FakeDynamicFactory{} resource := metav1.APIResource{Name: "configmaps", Namespaced: true} gv := schema.GroupVersion{Group: "", Version: "v1"} dynamicFactory.On("ClientForGroupVersionResource", gv, resource, expectedNS).Return(resourceClient, nil) @@ -334,15 +334,15 @@ func TestNamespaceRemapping(t *testing.T) { prioritizedResources: prioritizedResources, restore: restore, backup: &api.Backup{}, - log: arktest.NewLogger(), + log: velerotest.NewLogger(), } warnings, errors := ctx.restoreFromDir(baseDir) - assert.Empty(t, warnings.Ark) + assert.Empty(t, warnings.Velero) assert.Empty(t, warnings.Cluster) assert.Empty(t, warnings.Namespaces) - assert.Empty(t, errors.Ark) + assert.Empty(t, errors.Velero) assert.Empty(t, errors.Cluster) assert.Empty(t, errors.Namespaces) @@ -371,7 +371,7 @@ func TestRestoreResourceForNamespace(t *testing.T) { resourcePath string labelSelector labels.Selector includeClusterResources *bool - fileSystem *arktest.FakeFileSystem + fileSystem *velerotest.FakeFileSystem actions []resolvedAction expectedErrors api.RestoreResult expectedObjs []unstructured.Unstructured @@ -381,7 +381,7 @@ func TestRestoreResourceForNamespace(t *testing.T) { namespace: "ns-1", resourcePath: "configmaps", labelSelector: labels.NewSelector(), - fileSystem: arktest.NewFakeFileSystem(). + fileSystem: velerotest.NewFakeFileSystem(). WithFile("configmaps/cm-1.json", newNamedTestConfigMap("cm-1").ToJSON()). WithFile("configmaps/cm-2.json", newNamedTestConfigMap("cm-2").ToJSON()), expectedObjs: toUnstructured( @@ -393,7 +393,7 @@ func TestRestoreResourceForNamespace(t *testing.T) { name: "no such directory causes error", namespace: "ns-1", resourcePath: "configmaps", - fileSystem: arktest.NewFakeFileSystem(), + fileSystem: velerotest.NewFakeFileSystem(), expectedErrors: api.RestoreResult{ Namespaces: map[string][]string{ "ns-1": {"error reading \"configmaps\" resource directory: open configmaps: file does not exist"}, @@ -404,14 +404,14 @@ func TestRestoreResourceForNamespace(t *testing.T) { name: "empty directory is no-op", namespace: "ns-1", resourcePath: "configmaps", - fileSystem: arktest.NewFakeFileSystem().WithDirectory("configmaps"), + fileSystem: velerotest.NewFakeFileSystem().WithDirectory("configmaps"), }, { name: "unmarshall failure does not cause immediate return", namespace: "ns-1", resourcePath: "configmaps", labelSelector: labels.NewSelector(), - fileSystem: arktest.NewFakeFileSystem(). + fileSystem: velerotest.NewFakeFileSystem(). WithFile("configmaps/cm-1-invalid.json", []byte("this is not valid json")). WithFile("configmaps/cm-2.json", newNamedTestConfigMap("cm-2").ToJSON()), expectedErrors: api.RestoreResult{ @@ -426,7 +426,7 @@ func TestRestoreResourceForNamespace(t *testing.T) { namespace: "ns-1", resourcePath: "configmaps", labelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"foo": "bar"})), - fileSystem: arktest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().WithLabels(map[string]string{"foo": "bar"}).ToJSON()), + fileSystem: velerotest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().WithLabels(map[string]string{"foo": "bar"}).ToJSON()), expectedObjs: toUnstructured(newTestConfigMap().WithLabels(map[string]string{"foo": "bar"}).ConfigMap), }, { @@ -434,14 +434,14 @@ func TestRestoreResourceForNamespace(t *testing.T) { namespace: "ns-1", resourcePath: "configmaps", labelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"foo": "not-bar"})), - fileSystem: arktest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().WithLabels(map[string]string{"foo": "bar"}).ToJSON()), + fileSystem: velerotest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().WithLabels(map[string]string{"foo": "bar"}).ToJSON()), }, { name: "namespace is remapped", namespace: "ns-2", resourcePath: "configmaps", labelSelector: labels.NewSelector(), - fileSystem: arktest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().WithNamespace("ns-1").ToJSON()), + fileSystem: velerotest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().WithNamespace("ns-1").ToJSON()), expectedObjs: toUnstructured(newTestConfigMap().WithNamespace("ns-2").ConfigMap), }, { @@ -449,7 +449,7 @@ func TestRestoreResourceForNamespace(t *testing.T) { namespace: "ns-1", resourcePath: "configmaps", labelSelector: labels.NewSelector(), - fileSystem: arktest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().ToJSON()), + fileSystem: velerotest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().ToJSON()), actions: []resolvedAction{ { ItemAction: newFakeAction("configmaps"), @@ -465,7 +465,7 @@ func TestRestoreResourceForNamespace(t *testing.T) { namespace: "ns-1", resourcePath: "configmaps", labelSelector: labels.NewSelector(), - fileSystem: arktest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().ToJSON()), + fileSystem: velerotest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().ToJSON()), actions: []resolvedAction{ { ItemAction: newFakeAction("foo-resource"), @@ -482,7 +482,7 @@ func TestRestoreResourceForNamespace(t *testing.T) { resourcePath: "persistentvolumes", labelSelector: labels.NewSelector(), includeClusterResources: falsePtr, - fileSystem: arktest.NewFakeFileSystem().WithFile("persistentvolumes/pv-1.json", newTestPV().ToJSON()), + fileSystem: velerotest.NewFakeFileSystem().WithFile("persistentvolumes/pv-1.json", newTestPV().ToJSON()), }, { name: "namespaced resources are not skipped when IncludeClusterResources=false", @@ -490,7 +490,7 @@ func TestRestoreResourceForNamespace(t *testing.T) { resourcePath: "configmaps", labelSelector: labels.NewSelector(), includeClusterResources: falsePtr, - fileSystem: arktest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().ToJSON()), + fileSystem: velerotest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().ToJSON()), expectedObjs: toUnstructured(newTestConfigMap().ConfigMap), }, { @@ -499,7 +499,7 @@ func TestRestoreResourceForNamespace(t *testing.T) { resourcePath: "persistentvolumes", labelSelector: labels.NewSelector(), includeClusterResources: truePtr, - fileSystem: arktest.NewFakeFileSystem().WithFile("persistentvolumes/pv-1.json", newTestPV().ToJSON()), + fileSystem: velerotest.NewFakeFileSystem().WithFile("persistentvolumes/pv-1.json", newTestPV().ToJSON()), expectedObjs: toUnstructured(newTestPV().PersistentVolume), }, { @@ -508,7 +508,7 @@ func TestRestoreResourceForNamespace(t *testing.T) { resourcePath: "configmaps", labelSelector: labels.NewSelector(), includeClusterResources: truePtr, - fileSystem: arktest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().ToJSON()), + fileSystem: velerotest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().ToJSON()), expectedObjs: toUnstructured(newTestConfigMap().ConfigMap), }, { @@ -517,7 +517,7 @@ func TestRestoreResourceForNamespace(t *testing.T) { resourcePath: "persistentvolumes", labelSelector: labels.NewSelector(), includeClusterResources: nil, - fileSystem: arktest.NewFakeFileSystem().WithFile("persistentvolumes/pv-1.json", newTestPV().ToJSON()), + fileSystem: velerotest.NewFakeFileSystem().WithFile("persistentvolumes/pv-1.json", newTestPV().ToJSON()), expectedObjs: toUnstructured(newTestPV().PersistentVolume), }, { @@ -526,7 +526,7 @@ func TestRestoreResourceForNamespace(t *testing.T) { resourcePath: "configmaps", labelSelector: labels.NewSelector(), includeClusterResources: nil, - fileSystem: arktest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().ToJSON()), + fileSystem: velerotest.NewFakeFileSystem().WithFile("configmaps/cm-1.json", newTestConfigMap().ToJSON()), expectedObjs: toUnstructured(newTestConfigMap().ConfigMap), }, { @@ -535,7 +535,7 @@ func TestRestoreResourceForNamespace(t *testing.T) { resourcePath: "serviceaccounts", labelSelector: labels.NewSelector(), includeClusterResources: nil, - fileSystem: arktest.NewFakeFileSystem().WithFile("serviceaccounts/sa-1.json", newTestServiceAccount().ToJSON()), + fileSystem: velerotest.NewFakeFileSystem().WithFile("serviceaccounts/sa-1.json", newTestServiceAccount().ToJSON()), expectedObjs: toUnstructured(newTestServiceAccount().ServiceAccount), }, { @@ -544,7 +544,7 @@ func TestRestoreResourceForNamespace(t *testing.T) { resourcePath: "pods", labelSelector: labels.NewSelector(), includeClusterResources: nil, - fileSystem: arktest.NewFakeFileSystem(). + fileSystem: velerotest.NewFakeFileSystem(). WithFile( "pods/pod.json", NewTestUnstructured(). @@ -569,7 +569,7 @@ func TestRestoreResourceForNamespace(t *testing.T) { resourcePath: "pods", labelSelector: labels.NewSelector(), includeClusterResources: nil, - fileSystem: arktest.NewFakeFileSystem(). + fileSystem: velerotest.NewFakeFileSystem(). WithFile( "pods/pod.json", NewTestUnstructured(). @@ -586,18 +586,18 @@ func TestRestoreResourceForNamespace(t *testing.T) { var ( client = fake.NewSimpleClientset() sharedInformers = informers.NewSharedInformerFactory(client, 0) - snapshotLocationLister = sharedInformers.Ark().V1().VolumeSnapshotLocations().Lister() + snapshotLocationLister = sharedInformers.Velero().V1().VolumeSnapshotLocations().Lister() ) for _, test := range tests { t.Run(test.name, func(t *testing.T) { - resourceClient := &arktest.FakeDynamicClient{} + resourceClient := &velerotest.FakeDynamicClient{} for i := range test.expectedObjs { addRestoreLabels(&test.expectedObjs[i], "my-restore", "my-backup") resourceClient.On("Create", &test.expectedObjs[i]).Return(&test.expectedObjs[i], nil) } - dynamicFactory := &arktest.FakeDynamicFactory{} + dynamicFactory := &velerotest.FakeDynamicFactory{} gv := schema.GroupVersion{Group: "", Version: "v1"} configMapResource := metav1.APIResource{Name: "configmaps", Namespaced: true} @@ -633,7 +633,7 @@ func TestRestoreResourceForNamespace(t *testing.T) { }, }, backup: &api.Backup{}, - log: arktest.NewLogger(), + log: velerotest.NewLogger(), pvRestorer: &pvRestorer{ logger: logging.DefaultLogger(logrus.DebugLevel), blockStoreGetter: &fakeBlockStoreGetter{ @@ -647,7 +647,7 @@ func TestRestoreResourceForNamespace(t *testing.T) { warnings, errors := ctx.restoreResource(test.resourcePath, test.namespace, test.resourcePath) - assert.Empty(t, warnings.Ark) + assert.Empty(t, warnings.Velero) assert.Empty(t, warnings.Cluster) assert.Empty(t, warnings.Namespaces) assert.Equal(t, test.expectedErrors, errors) @@ -682,7 +682,7 @@ func TestRestoringExistingServiceAccount(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - resourceClient := &arktest.FakeDynamicClient{} + resourceClient := &velerotest.FakeDynamicClient{} defer resourceClient.AssertExpectations(t) name := fromCluster.GetName() @@ -703,7 +703,7 @@ func TestRestoringExistingServiceAccount(t *testing.T) { resourceClient.On("Patch", name, test.expectedPatch).Return(test.fromBackup, nil) } - dynamicFactory := &arktest.FakeDynamicFactory{} + dynamicFactory := &velerotest.FakeDynamicFactory{} gv := schema.GroupVersion{Group: "", Version: "v1"} resource := metav1.APIResource{Name: "serviceaccounts", Namespaced: true} @@ -713,7 +713,7 @@ func TestRestoringExistingServiceAccount(t *testing.T) { ctx := &context{ dynamicFactory: dynamicFactory, actions: []resolvedAction{}, - fileSystem: arktest.NewFakeFileSystem(). + fileSystem: velerotest.NewFakeFileSystem(). WithFile("foo/resources/serviceaccounts/namespaces/ns-1/sa-1.json", fromBackupJSON), selector: labels.NewSelector(), restore: &api.Restore{ @@ -727,11 +727,11 @@ func TestRestoringExistingServiceAccount(t *testing.T) { }, }, backup: &api.Backup{}, - log: arktest.NewLogger(), + log: velerotest.NewLogger(), } warnings, errors := ctx.restoreResource("serviceaccounts", "ns-1", "foo/resources/serviceaccounts/namespaces/ns-1/") - assert.Empty(t, warnings.Ark) + assert.Empty(t, warnings.Velero) assert.Empty(t, warnings.Cluster) assert.Empty(t, warnings.Namespaces) assert.Equal(t, api.RestoreResult{}, errors) @@ -917,16 +917,16 @@ status: } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - dynamicFactory := &arktest.FakeDynamicFactory{} + dynamicFactory := &velerotest.FakeDynamicFactory{} gv := schema.GroupVersion{Group: "", Version: "v1"} - pvClient := &arktest.FakeDynamicClient{} + pvClient := &velerotest.FakeDynamicClient{} defer pvClient.AssertExpectations(t) pvResource := metav1.APIResource{Name: "persistentvolumes", Namespaced: false} dynamicFactory.On("ClientForGroupVersionResource", gv, pvResource, "").Return(pvClient, nil) - pvcClient := &arktest.FakeDynamicClient{} + pvcClient := &velerotest.FakeDynamicClient{} defer pvcClient.AssertExpectations(t) pvcResource := metav1.APIResource{Name: "persistentvolumeclaims", Namespaced: true} @@ -962,7 +962,7 @@ status: ctx := &context{ dynamicFactory: dynamicFactory, actions: []resolvedAction{}, - fileSystem: arktest.NewFakeFileSystem(). + fileSystem: velerotest.NewFakeFileSystem(). WithFile("foo/resources/persistentvolumes/cluster/pv.json", pvBytes). WithFile("foo/resources/persistentvolumeclaims/default/pvc.json", pvcBytes), selector: labels.NewSelector(), @@ -977,7 +977,7 @@ status: }, }, backup: backup, - log: arktest.NewLogger(), + log: velerotest.NewLogger(), pvsToProvision: sets.NewString(), pvRestorer: pvRestorer, } @@ -1043,7 +1043,7 @@ status: // Restore PV warnings, errors := ctx.restoreResource("persistentvolumes", "", "foo/resources/persistentvolumes/cluster/") - assert.Empty(t, warnings.Ark) + assert.Empty(t, warnings.Velero) assert.Empty(t, warnings.Namespaces) assert.Equal(t, api.RestoreResult{}, errors) @@ -1078,7 +1078,7 @@ status: // Restore PVC warnings, errors = ctx.restoreResource("persistentvolumeclaims", "default", "foo/resources/persistentvolumeclaims/default/") - assert.Empty(t, warnings.Ark) + assert.Empty(t, warnings.Velero) assert.Empty(t, warnings.Cluster) assert.Empty(t, warnings.Namespaces) assert.Equal(t, api.RestoreResult{}, errors) @@ -1277,7 +1277,7 @@ func TestIsCompleted(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - u := arktest.UnstructuredOrDie(test.content) + u := velerotest.UnstructuredOrDie(test.content) backup, err := isCompleted(u, test.groupResource) if assert.Equal(t, test.expectedErr, err != nil) { @@ -1316,39 +1316,39 @@ func TestExecutePVAction_NoSnapshotRestores(t *testing.T) { { name: "no name should error", obj: NewTestUnstructured().WithMetadata().Unstructured, - restore: arktest.NewDefaultTestRestore().Restore, + restore: velerotest.NewDefaultTestRestore().Restore, expectedErr: true, }, { name: "no spec should error", obj: NewTestUnstructured().WithName("pv-1").Unstructured, - restore: arktest.NewDefaultTestRestore().Restore, + restore: velerotest.NewDefaultTestRestore().Restore, expectedErr: true, }, { name: "ensure spec.claimRef, spec.storageClassName are deleted", obj: NewTestUnstructured().WithName("pv-1").WithAnnotations("a", "b").WithSpec("claimRef", "storageClassName", "someOtherField").Unstructured, - restore: arktest.NewDefaultTestRestore().WithRestorePVs(false).Restore, - backup: arktest.NewTestBackup().WithName("backup1").WithPhase(api.BackupPhaseInProgress).Backup, + restore: velerotest.NewDefaultTestRestore().WithRestorePVs(false).Restore, + backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(api.BackupPhaseInProgress).Backup, expectedRes: NewTestUnstructured().WithAnnotations("a", "b").WithName("pv-1").WithSpec("someOtherField").Unstructured, }, { name: "if backup.spec.snapshotVolumes is false, ignore restore.spec.restorePVs and return early", obj: NewTestUnstructured().WithName("pv-1").WithAnnotations("a", "b").WithSpec("claimRef", "storageClassName", "someOtherField").Unstructured, - restore: arktest.NewDefaultTestRestore().WithRestorePVs(true).Restore, - backup: arktest.NewTestBackup().WithName("backup1").WithPhase(api.BackupPhaseInProgress).WithSnapshotVolumes(false).Backup, + restore: velerotest.NewDefaultTestRestore().WithRestorePVs(true).Restore, + backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(api.BackupPhaseInProgress).WithSnapshotVolumes(false).Backup, expectedRes: NewTestUnstructured().WithName("pv-1").WithAnnotations("a", "b").WithSpec("someOtherField").Unstructured, }, { name: "restore.spec.restorePVs=false, return early", obj: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured, - restore: arktest.NewDefaultTestRestore().WithRestorePVs(false).Restore, - backup: arktest.NewTestBackup().WithName("backup1").WithPhase(api.BackupPhaseInProgress).Backup, + restore: velerotest.NewDefaultTestRestore().WithRestorePVs(false).Restore, + backup: velerotest.NewTestBackup().WithName("backup1").WithPhase(api.BackupPhaseInProgress).Backup, volumeSnapshots: []*volume.Snapshot{ newSnapshot("pv-1", "loc-1", "gp", "az-1", "snap-1", 1000), }, locations: []*api.VolumeSnapshotLocation{ - arktest.NewTestVolumeSnapshotLocation().WithName("loc-1").VolumeSnapshotLocation, + velerotest.NewTestVolumeSnapshotLocation().WithName("loc-1").VolumeSnapshotLocation, }, expectedErr: false, expectedRes: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured, @@ -1356,29 +1356,29 @@ func TestExecutePVAction_NoSnapshotRestores(t *testing.T) { { name: "backup.status.volumeBackups non-nil and no entry for PV: return early", obj: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured, - restore: arktest.NewDefaultTestRestore().WithRestorePVs(true).Restore, - backup: arktest.NewTestBackup().WithName("backup-1").WithSnapshot("non-matching-pv", "snap").Backup, + restore: velerotest.NewDefaultTestRestore().WithRestorePVs(true).Restore, + backup: velerotest.NewTestBackup().WithName("backup-1").WithSnapshot("non-matching-pv", "snap").Backup, expectedRes: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured, }, { name: "backup.status.volumeBackups has entry for PV, >1 VSLs configured: return error", obj: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured, - restore: arktest.NewDefaultTestRestore().WithRestorePVs(true).Restore, - backup: arktest.NewTestBackup().WithName("backup-1").WithSnapshot("pv-1", "snap").Backup, + restore: velerotest.NewDefaultTestRestore().WithRestorePVs(true).Restore, + backup: velerotest.NewTestBackup().WithName("backup-1").WithSnapshot("pv-1", "snap").Backup, locations: []*api.VolumeSnapshotLocation{ - arktest.NewTestVolumeSnapshotLocation().WithName("loc-1").VolumeSnapshotLocation, - arktest.NewTestVolumeSnapshotLocation().WithName("loc-2").VolumeSnapshotLocation, + velerotest.NewTestVolumeSnapshotLocation().WithName("loc-1").VolumeSnapshotLocation, + velerotest.NewTestVolumeSnapshotLocation().WithName("loc-2").VolumeSnapshotLocation, }, expectedErr: true, }, { name: "volumeSnapshots is empty: return early", obj: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured, - restore: arktest.NewDefaultTestRestore().WithRestorePVs(true).Restore, - backup: arktest.NewTestBackup().WithName("backup-1").Backup, + restore: velerotest.NewDefaultTestRestore().WithRestorePVs(true).Restore, + backup: velerotest.NewTestBackup().WithName("backup-1").Backup, locations: []*api.VolumeSnapshotLocation{ - arktest.NewTestVolumeSnapshotLocation().WithName("loc-1").VolumeSnapshotLocation, - arktest.NewTestVolumeSnapshotLocation().WithName("loc-2").VolumeSnapshotLocation, + velerotest.NewTestVolumeSnapshotLocation().WithName("loc-1").VolumeSnapshotLocation, + velerotest.NewTestVolumeSnapshotLocation().WithName("loc-2").VolumeSnapshotLocation, }, volumeSnapshots: []*volume.Snapshot{}, expectedRes: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured, @@ -1386,11 +1386,11 @@ func TestExecutePVAction_NoSnapshotRestores(t *testing.T) { { name: "volumeSnapshots doesn't have a snapshot for PV: return early", obj: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured, - restore: arktest.NewDefaultTestRestore().WithRestorePVs(true).Restore, - backup: arktest.NewTestBackup().WithName("backup-1").Backup, + restore: velerotest.NewDefaultTestRestore().WithRestorePVs(true).Restore, + backup: velerotest.NewTestBackup().WithName("backup-1").Backup, locations: []*api.VolumeSnapshotLocation{ - arktest.NewTestVolumeSnapshotLocation().WithName("loc-1").VolumeSnapshotLocation, - arktest.NewTestVolumeSnapshotLocation().WithName("loc-2").VolumeSnapshotLocation, + velerotest.NewTestVolumeSnapshotLocation().WithName("loc-1").VolumeSnapshotLocation, + velerotest.NewTestVolumeSnapshotLocation().WithName("loc-2").VolumeSnapshotLocation, }, volumeSnapshots: []*volume.Snapshot{ newSnapshot("non-matching-pv-1", "loc-1", "type-1", "az-1", "snap-1", 1), @@ -1404,11 +1404,11 @@ func TestExecutePVAction_NoSnapshotRestores(t *testing.T) { t.Run(tc.name, func(t *testing.T) { var ( client = fake.NewSimpleClientset() - snapshotLocationInformer = informers.NewSharedInformerFactory(client, 0).Ark().V1().VolumeSnapshotLocations() + snapshotLocationInformer = informers.NewSharedInformerFactory(client, 0).Velero().V1().VolumeSnapshotLocations() ) r := &pvRestorer{ - logger: arktest.NewLogger(), + logger: velerotest.NewLogger(), restorePVs: tc.restore.Spec.RestorePVs, snapshotLocationLister: snapshotLocationInformer.Lister(), } @@ -1467,13 +1467,13 @@ func TestExecutePVAction_SnapshotRestores(t *testing.T) { { name: "pre-v0.10 backup with .status.volumeBackups with entry for PV and single VSL executes restore", obj: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured, - restore: arktest.NewDefaultTestRestore().WithRestorePVs(true).Restore, - backup: arktest.NewTestBackup().WithName("backup-1"). + restore: velerotest.NewDefaultTestRestore().WithRestorePVs(true).Restore, + backup: velerotest.NewTestBackup().WithName("backup-1"). WithVolumeBackupInfo("pv-1", "snap-1", "type-1", "az-1", int64Ptr(1)). WithVolumeBackupInfo("pv-2", "snap-2", "type-2", "az-2", int64Ptr(2)). Backup, locations: []*api.VolumeSnapshotLocation{ - arktest.NewTestVolumeSnapshotLocation().WithName("loc-1").WithProvider("provider-1").VolumeSnapshotLocation, + velerotest.NewTestVolumeSnapshotLocation().WithName("loc-1").WithProvider("provider-1").VolumeSnapshotLocation, }, expectedProvider: "provider-1", expectedSnapshotID: "snap-1", @@ -1484,11 +1484,11 @@ func TestExecutePVAction_SnapshotRestores(t *testing.T) { { name: "v0.10+ backup with a matching volume.Snapshot for PV executes restore", obj: NewTestUnstructured().WithName("pv-1").WithSpec().Unstructured, - restore: arktest.NewDefaultTestRestore().WithRestorePVs(true).Restore, - backup: arktest.NewTestBackup().WithName("backup-1").Backup, + restore: velerotest.NewDefaultTestRestore().WithRestorePVs(true).Restore, + backup: velerotest.NewTestBackup().WithName("backup-1").Backup, locations: []*api.VolumeSnapshotLocation{ - arktest.NewTestVolumeSnapshotLocation().WithName("loc-1").WithProvider("provider-1").VolumeSnapshotLocation, - arktest.NewTestVolumeSnapshotLocation().WithName("loc-2").WithProvider("provider-2").VolumeSnapshotLocation, + velerotest.NewTestVolumeSnapshotLocation().WithName("loc-1").WithProvider("provider-1").VolumeSnapshotLocation, + velerotest.NewTestVolumeSnapshotLocation().WithName("loc-2").WithProvider("provider-2").VolumeSnapshotLocation, }, volumeSnapshots: []*volume.Snapshot{ newSnapshot("pv-1", "loc-1", "type-1", "az-1", "snap-1", 1), @@ -1509,7 +1509,7 @@ func TestExecutePVAction_SnapshotRestores(t *testing.T) { blockStoreGetter = providerToBlockStoreMap(map[string]cloudprovider.BlockStore{ tc.expectedProvider: blockStore, }) - locationsInformer = informers.NewSharedInformerFactory(fake.NewSimpleClientset(), 0).Ark().V1().VolumeSnapshotLocations() + locationsInformer = informers.NewSharedInformerFactory(fake.NewSimpleClientset(), 0).Velero().V1().VolumeSnapshotLocations() ) for _, loc := range tc.locations { @@ -1517,7 +1517,7 @@ func TestExecutePVAction_SnapshotRestores(t *testing.T) { } r := &pvRestorer{ - logger: arktest.NewLogger(), + logger: velerotest.NewLogger(), backup: tc.backup, volumeSnapshots: tc.volumeSnapshots, snapshotLocationLister: locationsInformer.Lister(), @@ -1855,14 +1855,14 @@ type fakeAction struct { } type fakeBlockStoreGetter struct { - fakeBlockStore *arktest.FakeBlockStore + fakeBlockStore *velerotest.FakeBlockStore volumeMap map[api.VolumeBackupInfo]string volumeID string } func (r *fakeBlockStoreGetter) GetBlockStore(provider string) (cloudprovider.BlockStore, error) { if r.fakeBlockStore == nil { - r.fakeBlockStore = &arktest.FakeBlockStore{ + r.fakeBlockStore = &velerotest.FakeBlockStore{ RestorableVolumes: r.volumeMap, VolumeID: r.volumeID, } diff --git a/pkg/restore/service_account_action.go b/pkg/restore/service_account_action.go index 8734ae4320..59df4ed3e3 100644 --- a/pkg/restore/service_account_action.go +++ b/pkg/restore/service_account_action.go @@ -25,8 +25,8 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/util/kube" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/util/kube" ) type serviceAccountAction struct { diff --git a/pkg/restore/service_account_action_test.go b/pkg/restore/service_account_action_test.go index 8700f40f04..6b31deff4b 100644 --- a/pkg/restore/service_account_action_test.go +++ b/pkg/restore/service_account_action_test.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "github.com/heptio/ark/pkg/util/test" + "github.com/heptio/velero/pkg/util/test" ) func TestServiceAccountActionAppliesTo(t *testing.T) { diff --git a/pkg/restore/service_action.go b/pkg/restore/service_action.go index 7c9b624cb2..e425ba1c38 100644 --- a/pkg/restore/service_action.go +++ b/pkg/restore/service_action.go @@ -25,8 +25,8 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" - api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/util/collections" + api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/util/collections" ) const annotationLastAppliedConfig = "kubectl.kubernetes.io/last-applied-configuration" diff --git a/pkg/restore/service_action_test.go b/pkg/restore/service_action_test.go index cd4f0b2d10..815591ccb7 100644 --- a/pkg/restore/service_action_test.go +++ b/pkg/restore/service_action_test.go @@ -24,7 +24,7 @@ import ( corev1api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - arktest "github.com/heptio/ark/pkg/util/test" + velerotest "github.com/heptio/velero/pkg/util/test" ) func svcJSON(ports ...corev1api.ServicePort) string { @@ -181,7 +181,7 @@ func TestServiceActionExecute(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - action := NewServiceAction(arktest.NewLogger()) + action := NewServiceAction(velerotest.NewLogger()) res, _, err := action.Execute(test.obj, nil) diff --git a/pkg/serverstatusrequest/builder.go b/pkg/serverstatusrequest/builder.go index 31c14918e4..9834836308 100644 --- a/pkg/serverstatusrequest/builder.go +++ b/pkg/serverstatusrequest/builder.go @@ -21,25 +21,25 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" ) type Builder struct { - serverStatusRequest arkv1api.ServerStatusRequest + serverStatusRequest velerov1api.ServerStatusRequest } func NewBuilder() *Builder { return &Builder{ - serverStatusRequest: arkv1api.ServerStatusRequest{ + serverStatusRequest: velerov1api.ServerStatusRequest{ TypeMeta: metav1.TypeMeta{ - APIVersion: arkv1api.SchemeGroupVersion.String(), + APIVersion: velerov1api.SchemeGroupVersion.String(), Kind: "ServerStatusRequest", }, }, } } -func (b *Builder) Build() *arkv1api.ServerStatusRequest { +func (b *Builder) Build() *velerov1api.ServerStatusRequest { return &b.serverStatusRequest } @@ -58,7 +58,7 @@ func (b *Builder) GenerateName(name string) *Builder { return b } -func (b *Builder) Phase(phase arkv1api.ServerStatusRequestPhase) *Builder { +func (b *Builder) Phase(phase velerov1api.ServerStatusRequestPhase) *Builder { b.serverStatusRequest.Status.Phase = phase return b } diff --git a/pkg/serverstatusrequest/process.go b/pkg/serverstatusrequest/process.go index 91c3df46a8..3e681b7293 100644 --- a/pkg/serverstatusrequest/process.go +++ b/pkg/serverstatusrequest/process.go @@ -26,25 +26,25 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/clock" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/buildinfo" - arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/buildinfo" + velerov1client "github.com/heptio/velero/pkg/generated/clientset/versioned/typed/velero/v1" ) const ttl = time.Minute // Process fills out new ServerStatusRequest objects and deletes processed ones // that have expired. -func Process(req *arkv1api.ServerStatusRequest, client arkv1client.ServerStatusRequestsGetter, clock clock.Clock, log logrus.FieldLogger) error { +func Process(req *velerov1api.ServerStatusRequest, client velerov1client.ServerStatusRequestsGetter, clock clock.Clock, log logrus.FieldLogger) error { switch req.Status.Phase { - case "", arkv1api.ServerStatusRequestPhaseNew: + case "", velerov1api.ServerStatusRequestPhaseNew: log.Info("Processing new ServerStatusRequest") - return errors.WithStack(patch(client, req, func(req *arkv1api.ServerStatusRequest) { + return errors.WithStack(patch(client, req, func(req *velerov1api.ServerStatusRequest) { req.Status.ServerVersion = buildinfo.Version req.Status.ProcessedTimestamp.Time = clock.Now() - req.Status.Phase = arkv1api.ServerStatusRequestPhaseProcessed + req.Status.Phase = velerov1api.ServerStatusRequestPhaseProcessed })) - case arkv1api.ServerStatusRequestPhaseProcessed: + case velerov1api.ServerStatusRequestPhaseProcessed: log.Debug("Checking whether ServerStatusRequest has expired") expiration := req.Status.ProcessedTimestamp.Add(ttl) if expiration.After(clock.Now()) { @@ -63,7 +63,7 @@ func Process(req *arkv1api.ServerStatusRequest, client arkv1client.ServerStatusR } } -func patch(client arkv1client.ServerStatusRequestsGetter, req *arkv1api.ServerStatusRequest, updateFunc func(*arkv1api.ServerStatusRequest)) error { +func patch(client velerov1client.ServerStatusRequestsGetter, req *velerov1api.ServerStatusRequest, updateFunc func(*velerov1api.ServerStatusRequest)) error { originalJSON, err := json.Marshal(req) if err != nil { return errors.WithStack(err) diff --git a/pkg/serverstatusrequest/process_test.go b/pkg/serverstatusrequest/process_test.go index 6f625b6bdf..f5b4d19883 100644 --- a/pkg/serverstatusrequest/process_test.go +++ b/pkg/serverstatusrequest/process_test.go @@ -27,13 +27,13 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/clock" - arkv1api "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/buildinfo" - "github.com/heptio/ark/pkg/generated/clientset/versioned/fake" + velerov1api "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/buildinfo" + "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" ) func statusRequestBuilder() *Builder { - return NewBuilder().Namespace(arkv1api.DefaultNamespace).Name("sr-1") + return NewBuilder().Namespace(velerov1api.DefaultNamespace).Name("sr-1") } func TestProcess(t *testing.T) { @@ -47,8 +47,8 @@ func TestProcess(t *testing.T) { tests := []struct { name string - req *arkv1api.ServerStatusRequest - expected *arkv1api.ServerStatusRequest + req *velerov1api.ServerStatusRequest + expected *velerov1api.ServerStatusRequest expectedErrMsg string }{ { @@ -56,25 +56,25 @@ func TestProcess(t *testing.T) { req: statusRequestBuilder().Build(), expected: statusRequestBuilder(). ServerVersion(buildinfo.Version). - Phase(arkv1api.ServerStatusRequestPhaseProcessed). + Phase(velerov1api.ServerStatusRequestPhaseProcessed). ProcessedTimestamp(now). Build(), }, { name: "server status request with phase=New gets processed", req: statusRequestBuilder(). - Phase(arkv1api.ServerStatusRequestPhaseNew). + Phase(velerov1api.ServerStatusRequestPhaseNew). Build(), expected: statusRequestBuilder(). ServerVersion(buildinfo.Version). - Phase(arkv1api.ServerStatusRequestPhaseProcessed). + Phase(velerov1api.ServerStatusRequestPhaseProcessed). ProcessedTimestamp(now). Build(), }, { name: "server status request with phase=Processed gets deleted if expired", req: statusRequestBuilder(). - Phase(arkv1api.ServerStatusRequestPhaseProcessed). + Phase(velerov1api.ServerStatusRequestPhaseProcessed). ProcessedTimestamp(now.Add(-61 * time.Second)). Build(), expected: nil, @@ -82,21 +82,21 @@ func TestProcess(t *testing.T) { { name: "server status request with phase=Processed does not get deleted if not expired", req: statusRequestBuilder(). - Phase(arkv1api.ServerStatusRequestPhaseProcessed). + Phase(velerov1api.ServerStatusRequestPhaseProcessed). ProcessedTimestamp(now.Add(-59 * time.Second)). Build(), expected: statusRequestBuilder(). - Phase(arkv1api.ServerStatusRequestPhaseProcessed). + Phase(velerov1api.ServerStatusRequestPhaseProcessed). ProcessedTimestamp(now.Add(-59 * time.Second)). Build(), }, { name: "server status request with invalid phase returns an error", req: statusRequestBuilder(). - Phase(arkv1api.ServerStatusRequestPhase("an-invalid-phase")). + Phase(velerov1api.ServerStatusRequestPhase("an-invalid-phase")). Build(), expected: statusRequestBuilder(). - Phase(arkv1api.ServerStatusRequestPhase("an-invalid-phase")). + Phase(velerov1api.ServerStatusRequestPhase("an-invalid-phase")). Build(), expectedErrMsg: "unexpected ServerStatusRequest phase \"an-invalid-phase\"", }, @@ -106,14 +106,14 @@ func TestProcess(t *testing.T) { t.Run(tc.name, func(t *testing.T) { client := fake.NewSimpleClientset(tc.req) - err := Process(tc.req, client.ArkV1(), clock.NewFakeClock(now), logrus.StandardLogger()) + err := Process(tc.req, client.VeleroV1(), clock.NewFakeClock(now), logrus.StandardLogger()) if tc.expectedErrMsg == "" { assert.Nil(t, err) } else { assert.EqualError(t, err, tc.expectedErrMsg) } - res, err := client.ArkV1().ServerStatusRequests(tc.req.Namespace).Get(tc.req.Name, metav1.GetOptions{}) + res, err := client.VeleroV1().ServerStatusRequests(tc.req.Namespace).Get(tc.req.Name, metav1.GetOptions{}) if tc.expected == nil { assert.Nil(t, res) assert.True(t, apierrors.IsNotFound(err)) diff --git a/pkg/util/encode/encode.go b/pkg/util/encode/encode.go index 2842127ab9..5ef2691340 100644 --- a/pkg/util/encode/encode.go +++ b/pkg/util/encode/encode.go @@ -24,8 +24,8 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/runtime" - "github.com/heptio/ark/pkg/apis/ark/v1" - "github.com/heptio/ark/pkg/generated/clientset/versioned/scheme" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" + "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme" ) // Encode converts the provided object to the specified format diff --git a/pkg/util/logging/log_level_flag.go b/pkg/util/logging/log_level_flag.go index 88a0410a8a..aa8b05562f 100644 --- a/pkg/util/logging/log_level_flag.go +++ b/pkg/util/logging/log_level_flag.go @@ -21,7 +21,7 @@ import ( "github.com/sirupsen/logrus" - "github.com/heptio/ark/pkg/cmd/util/flag" + "github.com/heptio/velero/pkg/cmd/util/flag" ) var sortedLogLevels = sortLogLevels() diff --git a/pkg/util/logging/log_location_hook.go b/pkg/util/logging/log_location_hook.go index 2d4fc5a12a..7843dec45f 100644 --- a/pkg/util/logging/log_location_hook.go +++ b/pkg/util/logging/log_location_hook.go @@ -28,16 +28,16 @@ const ( logSourceField = "logSource" logSourceSetMarkerField = "@logSourceSetBy" logrusPackage = "github.com/sirupsen/logrus" - arkPackage = "github.com/heptio/ark/" - arkPackageLen = len(arkPackage) + veleroPackage = "github.com/heptio/velero/" + veleroPackageLen = len(veleroPackage) ) // LogLocationHook is a logrus hook that attaches location information // to log entries, i.e. the file and line number of the logrus log call. -// This hook is designed for use in both the Ark server and Ark plugin +// This hook is designed for use in both the Velero server and Velero plugin // implementations. When triggered within a plugin, a marker field will // be set on the log entry indicating that the location came from a plugin. -// The Ark server instance will not overwrite location information if +// The Velero server instance will not overwrite location information if // it sees this marker. type LogLocationHook struct { loggerName string @@ -60,7 +60,7 @@ func (h *LogLocationHook) Fire(entry *logrus.Entry) error { // skip 2 frames: // runtime.Callers - // github.com/heptio/ark/pkg/util/logging/(*LogLocationHook).Fire + // github.com/heptio/velero/pkg/util/logging/(*LogLocationHook).Fire n := runtime.Callers(2, pcs) // re-slice pcs based on the number of entries written @@ -87,15 +87,15 @@ func (h *LogLocationHook) Fire(entry *logrus.Entry) error { } // record the log statement location if we're within a plugin OR if - // we're in Ark server and not logging something that has the marker + // we're in Velero server and not logging something that has the marker // set (which would indicate the log statement is coming from a plugin). if h.loggerName != "" || getLogSourceSetMarker(entry) == "" { - file := removeArkPackagePrefix(frame.File) + file := removeVeleroPackagePrefix(frame.File) entry.Data[logSourceField] = fmt.Sprintf("%s:%d", file, frame.Line) } - // if we're in the Ark server, remove the marker field since we don't + // if we're in the Velero server, remove the marker field since we don't // want to record it in the actual log. if h.loggerName == "" { delete(entry.Data, logSourceSetMarkerField) @@ -120,10 +120,10 @@ func getLogSourceSetMarker(entry *logrus.Entry) string { return fmt.Sprintf("%s", nameVal) } -func removeArkPackagePrefix(file string) string { - if index := strings.Index(file, arkPackage); index != -1 { - // strip off .../github.com/heptio/ark/ so we just have pkg/... - return file[index+arkPackageLen:] +func removeVeleroPackagePrefix(file string) string { + if index := strings.Index(file, veleroPackage); index != -1 { + // strip off .../github.com/heptio/velero/ so we just have pkg/... + return file[index+veleroPackageLen:] } return file diff --git a/pkg/util/logging/log_location_hook_test.go b/pkg/util/logging/log_location_hook_test.go index b132f73ac8..acae0ef869 100644 --- a/pkg/util/logging/log_location_hook_test.go +++ b/pkg/util/logging/log_location_hook_test.go @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" ) -func TestRemoveArkPackagePrefix(t *testing.T) { - assert.Equal(t, "pkg/foo.go", removeArkPackagePrefix("github.com/heptio/ark/pkg/foo.go")) - assert.Equal(t, "github.com/heptio/ark-plugin-example/foo.go", removeArkPackagePrefix("github.com/heptio/ark-plugin-example/foo.go")) +func TestRemoveVeleroPackagePrefix(t *testing.T) { + assert.Equal(t, "pkg/foo.go", removeVeleroPackagePrefix("github.com/heptio/velero/pkg/foo.go")) + assert.Equal(t, "github.com/heptio/velero-plugin-example/foo.go", removeVeleroPackagePrefix("github.com/heptio/velero-plugin-example/foo.go")) } diff --git a/pkg/util/test/fake_block_store.go b/pkg/util/test/fake_block_store.go index b512bf3b41..1c15a1d57a 100644 --- a/pkg/util/test/fake_block_store.go +++ b/pkg/util/test/fake_block_store.go @@ -22,7 +22,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - api "github.com/heptio/ark/pkg/apis/ark/v1" + api "github.com/heptio/velero/pkg/apis/velero/v1" ) type FakeBlockStore struct { diff --git a/pkg/util/test/fake_dynamic.go b/pkg/util/test/fake_dynamic.go index 1a95be5d1f..a15ea1b307 100644 --- a/pkg/util/test/fake_dynamic.go +++ b/pkg/util/test/fake_dynamic.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/watch" - "github.com/heptio/ark/pkg/client" + "github.com/heptio/velero/pkg/client" ) type FakeDynamicFactory struct { diff --git a/pkg/util/test/fake_file_system.go b/pkg/util/test/fake_file_system.go index 176415fc69..b7e0f243fd 100644 --- a/pkg/util/test/fake_file_system.go +++ b/pkg/util/test/fake_file_system.go @@ -21,7 +21,7 @@ import ( "github.com/spf13/afero" - "github.com/heptio/ark/pkg/util/filesystem" + "github.com/heptio/velero/pkg/util/filesystem" ) type FakeFileSystem struct { diff --git a/pkg/util/test/mock_pod_command_executor.go b/pkg/util/test/mock_pod_command_executor.go index 97de9c8329..1034a2315a 100644 --- a/pkg/util/test/mock_pod_command_executor.go +++ b/pkg/util/test/mock_pod_command_executor.go @@ -19,7 +19,7 @@ import ( "github.com/sirupsen/logrus" "github.com/stretchr/testify/mock" - "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" ) type MockPodCommandExecutor struct { diff --git a/pkg/util/test/test_backup.go b/pkg/util/test/test_backup.go index bc3970a313..540ef3d4ac 100644 --- a/pkg/util/test/test_backup.go +++ b/pkg/util/test/test_backup.go @@ -21,7 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" ) type TestBackup struct { diff --git a/pkg/util/test/test_backup_storage_location.go b/pkg/util/test/test_backup_storage_location.go index d18d7c4286..8fc7ef605c 100644 --- a/pkg/util/test/test_backup_storage_location.go +++ b/pkg/util/test/test_backup_storage_location.go @@ -19,7 +19,7 @@ package test import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" ) type TestBackupStorageLocation struct { diff --git a/pkg/util/test/test_restore.go b/pkg/util/test/test_restore.go index 0aed7a9fd7..5adaf80903 100644 --- a/pkg/util/test/test_restore.go +++ b/pkg/util/test/test_restore.go @@ -19,7 +19,7 @@ package test import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "github.com/heptio/ark/pkg/apis/ark/v1" + api "github.com/heptio/velero/pkg/apis/velero/v1" ) type TestRestore struct { diff --git a/pkg/util/test/test_schedule.go b/pkg/util/test/test_schedule.go index 3735cef99e..6e2cb56140 100644 --- a/pkg/util/test/test_schedule.go +++ b/pkg/util/test/test_schedule.go @@ -21,7 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - api "github.com/heptio/ark/pkg/apis/ark/v1" + api "github.com/heptio/velero/pkg/apis/velero/v1" ) type TestSchedule struct { diff --git a/pkg/util/test/test_volume_snapshot_location.go b/pkg/util/test/test_volume_snapshot_location.go index 831b952418..57145e0680 100644 --- a/pkg/util/test/test_volume_snapshot_location.go +++ b/pkg/util/test/test_volume_snapshot_location.go @@ -19,7 +19,7 @@ package test import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/heptio/ark/pkg/apis/ark/v1" + v1 "github.com/heptio/velero/pkg/apis/velero/v1" ) type TestVolumeSnapshotLocation struct { diff --git a/pkg/volume/snapshot.go b/pkg/volume/snapshot.go index 1aed9f9e5c..70c6594edf 100644 --- a/pkg/volume/snapshot.go +++ b/pkg/volume/snapshot.go @@ -17,7 +17,7 @@ limitations under the License. package volume // Snapshot stores information about a persistent volume snapshot taken as -// part of an Ark backup. +// part of a Velero backup. type Snapshot struct { Spec SnapshotSpec `json:"spec"` @@ -25,11 +25,11 @@ type Snapshot struct { } type SnapshotSpec struct { - // BackupName is the name of the Ark backup this snapshot + // BackupName is the name of the Velero backup this snapshot // is associated with. BackupName string `json:"backupName"` - // BackupUID is the UID of the Ark backup this snapshot + // BackupUID is the UID of the Velero backup this snapshot // is associated with. BackupUID string `json:"backupUID"` @@ -64,7 +64,7 @@ type SnapshotStatus struct { Phase SnapshotPhase `json:"phase,omitempty"` } -// SnapshotPhase is the lifecyle phase of an Ark volume snapshot. +// SnapshotPhase is the lifecyle phase of a Velero volume snapshot. type SnapshotPhase string const ( diff --git a/third_party/kubernetes/pkg/kubectl/cmd/completion.go b/third_party/kubernetes/pkg/kubectl/cmd/completion.go index 731e6542cd..62a4f22727 100644 --- a/third_party/kubernetes/pkg/kubectl/cmd/completion.go +++ b/third_party/kubernetes/pkg/kubectl/cmd/completion.go @@ -26,14 +26,14 @@ import ( "github.com/spf13/cobra" ) -func GenZshCompletion(out io.Writer, ark *cobra.Command) { +func GenZshCompletion(out io.Writer, velero *cobra.Command) { - zshHead := "#compdef ark\n" + zshHead := "#compdef velero\n" out.Write([]byte(zshHead)) zshInitialization := ` -__ark_bash_source() { +__velero_bash_source() { alias shopt=':' alias _expand=_bash_expand alias _complete=_bash_comp @@ -41,7 +41,7 @@ __ark_bash_source() { setopt kshglob noshglob braceexpand source "$@" } -__ark_type() { +__velero_type() { # -t is not supported by zsh if [ "$1" == "-t" ]; then shift @@ -49,14 +49,14 @@ __ark_type() { # "compopt +-o nospace" is used in the code to toggle trailing # spaces. We don't support that, but leave trailing spaces on # all the time - if [ "$1" = "__ark_compopt" ]; then + if [ "$1" = "__velero_compopt" ]; then echo builtin return 0 fi fi type "$@" } -__ark_compgen() { +__velero_compgen() { local completions w completions=( $(compgen "$@") ) || return $? # filter by given word as prefix @@ -73,10 +73,10 @@ __ark_compgen() { fi done } -__ark_compopt() { +__velero_compopt() { true # don't do anything. Not supported by bashcompinit in zsh } -__ark_ltrim_colon_completions() +__velero_ltrim_colon_completions() { if [[ "$1" == *:* && "$COMP_WORDBREAKS" == *:* ]]; then # Remove colon-word prefix from COMPREPLY items @@ -87,15 +87,15 @@ __ark_ltrim_colon_completions() done fi } -__ark_get_comp_words_by_ref() { +__velero_get_comp_words_by_ref() { cur="${COMP_WORDS[COMP_CWORD]}" prev="${COMP_WORDS[${COMP_CWORD}-1]}" words=("${COMP_WORDS[@]}") cword=("${COMP_CWORD[@]}") } -__ark_filedir() { +__velero_filedir() { local RET OLD_IFS w qw - __ark_debug "_filedir $@ cur=$cur" + __velero_debug "_filedir $@ cur=$cur" if [[ "$1" = \~* ]]; then # somehow does not work. Maybe, zsh does not call this at all eval echo "$1" @@ -110,13 +110,13 @@ __ark_filedir() { RET=( $(compgen -f) ) fi IFS="$OLD_IFS" - IFS="," __ark_debug "RET=${RET[@]} len=${#RET[@]}" + IFS="," __velero_debug "RET=${RET[@]} len=${#RET[@]}" for w in ${RET[@]}; do if [[ ! "${w}" = "${cur}"* ]]; then continue fi if eval "[[ \"\${w}\" = *.$1 || -d \"\${w}\" ]]"; then - qw="$(__ark_quote "${w}")" + qw="$(__velero_quote "${w}")" if [ -d "${w}" ]; then COMPREPLY+=("${qw}/") else @@ -125,7 +125,7 @@ __ark_filedir() { fi done } -__ark_quote() { +__velero_quote() { if [[ $1 == \'* || $1 == \"* ]]; then # Leave out first character printf %q "${1:1}" @@ -141,33 +141,33 @@ if sed --help 2>&1 | grep -q GNU; then LWORD='\<' RWORD='\>' fi -__ark_convert_bash_to_zsh() { +__velero_convert_bash_to_zsh() { sed \ -e 's/declare -F/whence -w/' \ -e 's/_get_comp_words_by_ref "\$@"/_get_comp_words_by_ref "\$*"/' \ -e 's/local \([a-zA-Z0-9_]*\)=/local \1; \1=/' \ -e 's/flags+=("\(--.*\)=")/flags+=("\1"); two_word_flags+=("\1")/' \ -e 's/must_have_one_flag+=("\(--.*\)=")/must_have_one_flag+=("\1")/' \ - -e "s/${LWORD}_filedir${RWORD}/__ark_filedir/g" \ - -e "s/${LWORD}_get_comp_words_by_ref${RWORD}/__ark_get_comp_words_by_ref/g" \ - -e "s/${LWORD}__ltrim_colon_completions${RWORD}/__ark_ltrim_colon_completions/g" \ - -e "s/${LWORD}compgen${RWORD}/__ark_compgen/g" \ - -e "s/${LWORD}compopt${RWORD}/__ark_compopt/g" \ + -e "s/${LWORD}_filedir${RWORD}/__velero_filedir/g" \ + -e "s/${LWORD}_get_comp_words_by_ref${RWORD}/__velero_get_comp_words_by_ref/g" \ + -e "s/${LWORD}__ltrim_colon_completions${RWORD}/__velero_ltrim_colon_completions/g" \ + -e "s/${LWORD}compgen${RWORD}/__velero_compgen/g" \ + -e "s/${LWORD}compopt${RWORD}/__velero_compopt/g" \ -e "s/${LWORD}declare${RWORD}/builtin declare/g" \ - -e "s/\\\$(type${RWORD}/\$(__ark_type/g" \ + -e "s/\\\$(type${RWORD}/\$(__velero_type/g" \ <<'BASH_COMPLETION_EOF' ` out.Write([]byte(zshInitialization)) buf := new(bytes.Buffer) - ark.GenBashCompletion(buf) + velero.GenBashCompletion(buf) out.Write(buf.Bytes()) zshTail := ` BASH_COMPLETION_EOF } -__ark_bash_source <(__ark_convert_bash_to_zsh) -_complete ark 2>/dev/null +__velero_bash_source <(__velero_convert_bash_to_zsh) +_complete velero 2>/dev/null ` out.Write([]byte(zshTail)) } diff --git a/velero.png b/velero.png new file mode 100644 index 0000000000..b616651963 Binary files /dev/null and b/velero.png differ