diff --git a/changelogs/unreleased/5388-blackpiglet b/changelogs/unreleased/5388-blackpiglet new file mode 100644 index 0000000000..2bd9f73752 --- /dev/null +++ b/changelogs/unreleased/5388-blackpiglet @@ -0,0 +1 @@ +Add some corner cases checking for CSI snapshot in backup controller. \ No newline at end of file diff --git a/changelogs/unreleased/5471-kcboyle b/changelogs/unreleased/5471-kcboyle new file mode 100644 index 0000000000..341cef245f --- /dev/null +++ b/changelogs/unreleased/5471-kcboyle @@ -0,0 +1,6 @@ +Update the k8s.io dependencies to 0.24.0. +This also required an update to github.com/bombsimon/logrusr/v3. +Removed the `WithClusterName` method +as it is a "legacy field that was +always cleared by the system and never used" as per upstream k8s +https://github.com/kubernetes/apimachinery/blob/release-1.24/pkg/apis/meta/v1/types.go#L257-L259 \ No newline at end of file diff --git a/changelogs/unreleased/5478-lyndon b/changelogs/unreleased/5478-lyndon new file mode 100644 index 0000000000..d068694491 --- /dev/null +++ b/changelogs/unreleased/5478-lyndon @@ -0,0 +1 @@ +Issue fix 5477: create the common way to support S3 compatible object storages that work for both Restic and Kopia; Keep the resticRepoPrefix parameter for compatibility \ No newline at end of file diff --git a/changelogs/unreleased/5483-blackpiglet b/changelogs/unreleased/5483-blackpiglet new file mode 100644 index 0000000000..59d6accf3b --- /dev/null +++ b/changelogs/unreleased/5483-blackpiglet @@ -0,0 +1 @@ +Remove redundancy code block left by #5388. \ No newline at end of file diff --git a/changelogs/unreleased/5484-lyndon b/changelogs/unreleased/5484-lyndon new file mode 100644 index 0000000000..6bd2dced1f --- /dev/null +++ b/changelogs/unreleased/5484-lyndon @@ -0,0 +1 @@ +Refactor Pod Volume Backup/Restore doc to match the new behavior diff --git a/changelogs/unreleased/5499-lyndon b/changelogs/unreleased/5499-lyndon new file mode 100644 index 0000000000..613f6aaeea --- /dev/null +++ b/changelogs/unreleased/5499-lyndon @@ -0,0 +1 @@ +After Pod Volume Backup/Restore refactor, remove all the unreasonable appearance of "restic" word from documents diff --git a/changelogs/unreleased/5512-lyndon b/changelogs/unreleased/5512-lyndon new file mode 100644 index 0000000000..2712304b38 --- /dev/null +++ b/changelogs/unreleased/5512-lyndon @@ -0,0 +1 @@ +Fix issue 5505: the pod volume backups/restores except the first one fail under the kopia path if "AZURE_CLOUD_NAME" is specified diff --git a/changelogs/unreleased/5521-blackpiglet b/changelogs/unreleased/5521-blackpiglet new file mode 100644 index 0000000000..93c0b59412 --- /dev/null +++ b/changelogs/unreleased/5521-blackpiglet @@ -0,0 +1 @@ +Add credential store in backup deletion controller to support VSL credential. \ No newline at end of file diff --git a/changelogs/unreleased/5526-qiuming-best b/changelogs/unreleased/5526-qiuming-best new file mode 100644 index 0000000000..772f6d4d90 --- /dev/null +++ b/changelogs/unreleased/5526-qiuming-best @@ -0,0 +1 @@ +fix restic backup failure with self-signed certification backend storage diff --git a/changelogs/unreleased/5534-qiuming-best b/changelogs/unreleased/5534-qiuming-best new file mode 100644 index 0000000000..09faeac0c2 --- /dev/null +++ b/changelogs/unreleased/5534-qiuming-best @@ -0,0 +1 @@ +fix restic backup progress error diff --git a/changelogs/unreleased/5535-ywk253100 b/changelogs/unreleased/5535-ywk253100 new file mode 100644 index 0000000000..186672ea0c --- /dev/null +++ b/changelogs/unreleased/5535-ywk253100 @@ -0,0 +1 @@ +Enhance the restore priorities list to support specifying the low prioritized resources that need to be restored in the last \ No newline at end of file diff --git a/design/Implemented/backup-resources-order.md b/design/Implemented/backup-resources-order.md index d888ad2371..92d6a6bebb 100644 --- a/design/Implemented/backup-resources-order.md +++ b/design/Implemented/backup-resources-order.md @@ -2,7 +2,7 @@ This document proposes a solution that allows user to specify a backup order for resources of specific resource type. ## Background -During backup process, user may need to back up resources of specific type in some specific order to ensure the resources were backup properly because these resources are related and ordering might be required to preserve the consistency for the apps to recover itself �from the backup image +During backup process, user may need to back up resources of specific type in some specific order to ensure the resources were backup properly because these resources are related and ordering might be required to preserve the consistency for the apps to recover itself from the backup image (Ex: primary-secondary database pods in a cluster). ## Goals diff --git a/go.mod b/go.mod index 145babd7d2..c69d4c2edd 100644 --- a/go.mod +++ b/go.mod @@ -11,13 +11,13 @@ require ( github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 github.com/Azure/go-autorest/autorest/to v0.3.0 github.com/aws/aws-sdk-go v1.43.31 - github.com/bombsimon/logrusr v1.1.0 - github.com/evanphx/json-patch v4.11.0+incompatible + github.com/bombsimon/logrusr/v3 v3.0.0 + github.com/evanphx/json-patch v5.6.0+incompatible github.com/fatih/color v1.13.0 github.com/gobwas/glob v0.2.3 github.com/gofrs/uuid v3.2.0+incompatible github.com/golang/protobuf v1.5.2 - github.com/google/go-cmp v0.5.7 + github.com/google/go-cmp v0.5.8 github.com/google/uuid v1.3.0 github.com/hashicorp/go-hclog v0.14.1 github.com/hashicorp/go-plugin v1.4.3 @@ -25,31 +25,32 @@ require ( github.com/kopia/kopia v0.10.7 github.com/kubernetes-csi/external-snapshotter/client/v4 v4.2.0 github.com/onsi/ginkgo v1.16.5 - github.com/onsi/gomega v1.16.0 + github.com/onsi/gomega v1.18.1 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.12.1 + github.com/prometheus/client_golang v1.12.2 github.com/robfig/cron v1.1.0 github.com/sirupsen/logrus v1.8.1 github.com/spf13/afero v1.6.0 - github.com/spf13/cobra v1.2.1 + github.com/spf13/cobra v1.4.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.7.1 github.com/vmware-tanzu/crash-diagnostics v0.3.7 - golang.org/x/mod v0.5.1 - golang.org/x/net v0.0.0-20220325170049-de3da57026de - golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a + golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 + golang.org/x/net v0.0.0-20220615171555-694bf12d69de + golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb golang.org/x/sync v0.0.0-20210220032951-036812b2e83c google.golang.org/api v0.74.0 google.golang.org/grpc v1.45.0 google.golang.org/protobuf v1.28.0 - k8s.io/api v0.22.2 - k8s.io/apiextensions-apiserver v0.22.2 - k8s.io/apimachinery v0.22.2 - k8s.io/cli-runtime v0.22.2 - k8s.io/client-go v0.22.2 - k8s.io/klog/v2 v2.9.0 + k8s.io/api v0.24.1 + k8s.io/apiextensions-apiserver v0.24.1 + k8s.io/apimachinery v0.24.1 + k8s.io/cli-runtime v0.24.0 + k8s.io/client-go v0.24.1 + k8s.io/klog/v2 v2.60.1 k8s.io/kube-aggregator v0.19.12 - sigs.k8s.io/controller-runtime v0.10.2 + k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 + sigs.k8s.io/controller-runtime v0.12.1 sigs.k8s.io/yaml v1.3.0 ) @@ -73,27 +74,33 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect github.com/dustin/go-humanize v1.0.0 // indirect - github.com/fsnotify/fsnotify v1.5.1 // indirect - github.com/go-logr/logr v0.4.0 // indirect - github.com/go-logr/zapr v0.4.0 // indirect + github.com/emicklei/go-restful/v3 v3.8.0 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/zapr v1.2.0 // indirect + github.com/go-openapi/jsonpointer v0.19.5 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/swag v0.21.1 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.4.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/google/gnostic v0.6.9 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/googleapis/gax-go/v2 v2.2.0 // indirect - github.com/googleapis/gnostic v0.5.5 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.15.1 // indirect github.com/klauspost/cpuid/v2 v2.0.12 // indirect github.com/klauspost/pgzip v1.2.5 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-ieproxy v0.0.1 // indirect github.com/mattn/go-isatty v0.0.14 // indirect @@ -106,13 +113,14 @@ require ( github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/natefinch/atomic v1.0.1 // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/oklog/run v1.0.0 // indirect github.com/pierrec/lz4 v2.6.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/common v0.34.0 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/rs/xid v1.3.0 // indirect github.com/stretchr/objx v0.2.0 // indirect @@ -125,10 +133,10 @@ require ( go.uber.org/zap v1.21.0 // indirect golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect golang.org/x/exp v0.0.0-20210916165020-5cb4fee858ee // indirect - golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886 // indirect - golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + golang.org/x/sys v0.0.0-20220614162138-6c1b26c55098 // indirect + golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect golang.org/x/text v0.3.7 // indirect - golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect + golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect @@ -138,10 +146,10 @@ require ( gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/component-base v0.22.2 // indirect - k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e // indirect - k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect + k8s.io/component-base v0.24.1 // indirect + k8s.io/kube-openapi v0.0.0-20220614142933-1062c7ade5f8 // indirect + sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect ) replace github.com/gogo/protobuf => github.com/gogo/protobuf v1.3.2 diff --git a/go.sum b/go.sum index 2f90eb7cce..7f200bfc43 100644 --- a/go.sum +++ b/go.sum @@ -71,7 +71,6 @@ github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v0.3.0/go.mod h1:tPaiy8S5bQ github.com/Azure/azure-storage-blob-go v0.14.0 h1:1BCg74AmVdYwO3dlKwtFU1V0wU2PZdREkXvAmZJRUlM= github.com/Azure/azure-storage-blob-go v0.14.0/go.mod h1:SMqIBi+SuiQH32bvyjngEewEeXoPfKMgWlBDaYf6fck= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= @@ -128,10 +127,13 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.43.31 h1:yJZIr8nMV1hXjAvvOLUFqZRJcHV7udPQBfhJqawDzI0= github.com/aws/aws-sdk-go v1.43.31/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= @@ -146,9 +148,10 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bombsimon/logrusr v1.1.0 h1:Y03FI4Z/Shyrc9jF26vuaUbnPxC5NMJnTtJA/3Lihq8= -github.com/bombsimon/logrusr v1.1.0/go.mod h1:Jq0nHtvxabKE5EMwAAdgTaz7dfWE8C4i11NOltxGQpc= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/bombsimon/logrusr/v3 v3.0.0 h1:tcAoLfuAhKP9npBxWzSdpsvKPQt1XV02nSf2lZA82TQ= +github.com/bombsimon/logrusr/v3 v3.0.0/go.mod h1:PksPPgSFEL2I52pla2glgCyyd2OqOHAnFF5E+g8Ixco= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= @@ -186,6 +189,7 @@ github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -209,6 +213,8 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7fo github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw= +github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -220,19 +226,24 @@ github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go. github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/frankban/quicktest v1.13.1 h1:xVm/f9seEhZFL9+n5kv5XLrGwy6elc4V9v/XFY2vmd8= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -243,29 +254,38 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= -github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= +github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= @@ -281,6 +301,7 @@ github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzw github.com/golang-jwt/jwt/v4 v4.4.1 h1:pC5DB52sCeK48Wlb9oPcdhnjkz1TKt1D/P7WKJ0kUcQ= github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -319,6 +340,11 @@ github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/cel-go v0.10.1/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= +github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= +github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -331,8 +357,9 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= @@ -354,6 +381,7 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -373,9 +401,9 @@ github.com/googleapis/gax-go/v2 v2.2.0 h1:s7jOdKSaksJVOxE0Y/S32otcfiP+UQ0cL8/GTK github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= @@ -420,8 +448,8 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1: github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -435,6 +463,7 @@ github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -459,7 +488,6 @@ github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuOb github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kopia/kopia v0.10.7 h1:6s0ZIZW3Ge2ozzefddASy7CIUadp/5tF9yCDKQfAKKI= github.com/kopia/kopia v0.10.7/go.mod h1:0d9THPD+jwomPcXvPbCdmLyX6phQVP7AqcCcDEajfNA= @@ -488,6 +516,8 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -528,7 +558,7 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= -github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= +github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -539,6 +569,7 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= @@ -561,13 +592,15 @@ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9k github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= -github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c= -github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -589,8 +622,9 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -601,8 +635,9 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.34.0 h1:RBmGO9d/FVjqHT0yUGQwBJhkwKV+wPCn7KGpvfab0uE= +github.com/prometheus/common v0.34.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -621,6 +656,7 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rs/xid v1.3.0 h1:6NjYksEUlhurdVehpc7S7dk6DAmcKv8V9gG0FsVN2U4= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -645,8 +681,9 @@ github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -680,6 +717,9 @@ github.com/vladimirvivien/gexe v0.1.1 h1:2A0SBaOSKH+cwLVdt6H+KkHZotZWRNLlWygANGw github.com/vladimirvivien/gexe v0.1.1/go.mod h1:LHQL00w/7gDUKIak24n801ABp8C+ni6eBht9vGVst8w= github.com/vmware-tanzu/crash-diagnostics v0.3.7 h1:6gbv/3o1FzyRLS7Dz/+yVg1Lk1oRBQLyI3d1YTtlTT8= github.com/vmware-tanzu/crash-diagnostics v0.3.7/go.mod h1:gO8670rd+qdjnJVol674snT/A46GQ27u085kKhZznlM= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -688,6 +728,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg= @@ -700,9 +741,12 @@ go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.etcd.io/etcd/client/v3 v3.5.1/go.mod h1:OnjH4M8OnAotwaB2l9bVgZzRFKru7/ZMoS46OtKyd3Q= go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= @@ -735,8 +779,8 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= @@ -760,6 +804,7 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38= golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -799,8 +844,8 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38= -golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -855,11 +900,14 @@ golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220325170049-de3da57026de h1:pZB1TWnKi+o4bENlbzAgLrEbY4RMYmUIRobMcSmfeYc= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220615171555-694bf12d69de h1:ogOG2+P6LjO2j55AkRScrkB2BFpd+Z8TY2wcM0Z3MGo= +golang.org/x/net v0.0.0-20220615171555-694bf12d69de/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -878,8 +926,9 @@ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a h1:qfl7ob3DIEs3Ml9oLuPwY2N04gymzAW04WsUQHIClgM= golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb h1:8tDJ3aechhddbdPAxpycgXHJRMLpk/Ab+aa4OgdN5/g= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -925,7 +974,6 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -942,7 +990,6 @@ golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -968,8 +1015,8 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210817190340-bfb29a6856f2/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -980,13 +1027,16 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886 h1:eJv7u3ksNXoLbGSKuv2s/SIO4tJVxc/A+MTpzxDgz/Q= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220614162138-6c1b26c55098 h1:PgOr27OhUx2IRqGJ2RxAWI4dJQ7bi9cSrB82uzFzfUA= +golang.org/x/sys v0.0.0-20220614162138-6c1b26c55098/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 h1:CBpWXWQpIRjzmkkA+M7q9Fqnwd2mZr3AFqexg8YTfoM= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1003,8 +1053,10 @@ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858 h1:Dpdu/EMxGMFgq0CeYMh4fazTD2vtlZRYE7wyynxJb9U= +golang.org/x/time v0.0.0-20220609170525-579cf78fd858/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1067,6 +1119,7 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.10-0.20220218145154-897bd77cd717/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1156,6 +1209,7 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1186,6 +1240,7 @@ google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220111164026-67b88f271998/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220114231437-d2e6a121cae0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= @@ -1280,6 +1335,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= @@ -1294,58 +1350,76 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= k8s.io/api v0.19.12/go.mod h1:EK+KvSq2urA6+CjVdZyAHEphXoLq2K2eW6lxOzTKSaY= -k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw= k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8= -k8s.io/apiextensions-apiserver v0.22.2 h1:zK7qI8Ery7j2CaN23UCFaC1hj7dMiI87n01+nKuewd4= -k8s.io/apiextensions-apiserver v0.22.2/go.mod h1:2E0Ve/isxNl7tWLSUDgi6+cmwHi5fQRdwGVCxbC+KFA= +k8s.io/api v0.24.0/go.mod h1:5Jl90IUrJHUJYEMANRURMiVvJ0g7Ax7r3R1bqO8zx8I= +k8s.io/api v0.24.1 h1:BjCMRDcyEYz03joa3K1+rbshwh1Ay6oB53+iUx2H8UY= +k8s.io/api v0.24.1/go.mod h1:JhoOvNiLXKTPQ60zh2g0ewpA+bnEYf5q44Flhquh4vQ= +k8s.io/apiextensions-apiserver v0.24.1 h1:5yBh9+ueTq/kfnHQZa0MAo6uNcPrtxPMpNQgorBaKS0= +k8s.io/apiextensions-apiserver v0.24.1/go.mod h1:A6MHfaLDGfjOc/We2nM7uewD5Oa/FnEbZ6cD7g2ca4Q= k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.19.12/go.mod h1:9eb44nUQSsz9QZiilFRuMj3ZbTmoWolU8S2gnXoRMjo= -k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk= k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= +k8s.io/apimachinery v0.24.0/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= +k8s.io/apimachinery v0.24.1 h1:ShD4aDxTQKN5zNf8K1RQ2u98ELLdIW7jEnlO9uAMX/I= +k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= k8s.io/apiserver v0.19.12/go.mod h1:ldZAZTNIKfMMv/UUEhk6UyTXC0/34iRdNFHo+MJOPc4= -k8s.io/apiserver v0.22.2/go.mod h1:vrpMmbyjWrgdyOvZTSpsusQq5iigKNWv9o9KlDAbBHI= -k8s.io/cli-runtime v0.22.2 h1:fsd9rFk9FSaVq4SUq1fM27c8CFGsYZUJ/3BkgmjYWuY= +k8s.io/apiserver v0.24.1/go.mod h1:dQWNMx15S8NqJMp0gpYfssyvhYnkilc1LpExd/dkLh0= k8s.io/cli-runtime v0.22.2/go.mod h1:tkm2YeORFpbgQHEK/igqttvPTRIHFRz5kATlw53zlMI= +k8s.io/cli-runtime v0.24.0 h1:ot3Qf49T852uEyNApABO1UHHpFIckKK/NqpheZYN2gM= +k8s.io/cli-runtime v0.24.0/go.mod h1:9XxoZDsEkRFUThnwqNviqzljtT/LdHtNWvcNFrAXl0A= k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= k8s.io/client-go v0.19.12/go.mod h1:BAGKQraZ6fDmXhT46pGXWZQQqN7P4E0BJux0+9O6Gt0= -k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc= k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U= +k8s.io/client-go v0.24.0/go.mod h1:VFPQET+cAFpYxh6Bq6f4xyMY80G6jKKktU6G0m00VDw= +k8s.io/client-go v0.24.1 h1:w1hNdI9PFrzu3OlovVeTnf4oHDt+FJLd9Ndluvnb42E= +k8s.io/client-go v0.24.1/go.mod h1:f1kIDqcEYmwXS/vTbbhopMUbhKp2JhOeVTfxgaCIlF8= k8s.io/code-generator v0.19.0/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.19.12/go.mod h1:ADrDvaUQWGn4a8lX0ONtzb7uFmDRQOMSYIMk1qWIAx8= -k8s.io/code-generator v0.22.2/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= +k8s.io/code-generator v0.24.1/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI15w= k8s.io/component-base v0.19.12/go.mod h1:tpwExE0sY3A7CwtlxGL7SnQOdQfUlnFybT6GmAD+z/s= -k8s.io/component-base v0.22.2 h1:vNIvE0AIrLhjX8drH0BgCNJcR4QZxMXcJzBsDplDx9M= -k8s.io/component-base v0.22.2/go.mod h1:5Br2QhI9OTe79p+TzPe9JKNQYvEKbq9rTJDWllunGug= +k8s.io/component-base v0.24.1 h1:APv6W/YmfOWZfo+XJ1mZwep/f7g7Tpwvdbo9CQLDuts= +k8s.io/component-base v0.24.1/go.mod h1:DW5vQGYVCog8WYpNob3PMmmsY8A3L9QZNg4j/dV3s38= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= +k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-aggregator v0.19.12 h1:OwyNUe/7/gxzEnaLd3sC9Yrpx0fZAERzvFslX5Qq5g8= k8s.io/kube-aggregator v0.19.12/go.mod h1:K76wPd03pSHEmS1FgJOcpryac5C3va4cbCvSu+4EmE0= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= -k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= +k8s.io/kube-openapi v0.0.0-20220614142933-1062c7ade5f8 h1:IyQ1DifCBk589JD4Cm2CT2poIdO3lfPzz3WwVh1Ugf8= +k8s.io/kube-openapi v0.0.0-20220614142933-1062c7ade5f8/go.mod h1:guXtiQW/y/AWAfPSOaI/1eY0TGBAmL5OygiIyUOKDRc= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs= -k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= +k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/controller-runtime v0.10.2 h1:jW8qiY+yMnnPx6O9hu63tgcwaKzd1yLYui+mpvClOOc= -sigs.k8s.io/controller-runtime v0.10.2/go.mod h1:CQp8eyUQZ/Q7PJvnIrB6/hgfTC1kBkGylwsLgOQi1WY= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw= +sigs.k8s.io/controller-runtime v0.12.1 h1:4BJY01xe9zKQti8oRjj/NeHKRXthf1YkYJAgLONFFoI= +sigs.k8s.io/controller-runtime v0.12.1/go.mod h1:BKhxlA4l7FPK4AQcsuL4X6vZeWnKDXez/vp1Y8dxTU0= +sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= +sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124 h1:2sgAQQcY0dEW2SsQwTXhQV4vO6+rSslYx8K3XmM5hqQ= +sigs.k8s.io/json v0.0.0-20220525155127-227cbc7cc124/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH83nJtY1g= +sigs.k8s.io/kustomize/api v0.11.4/go.mod h1:k+8RsqYbgpkIrJ4p9jcdPqe8DprLxFUUO0yNOq8C+xI= sigs.k8s.io/kustomize/kyaml v0.11.0/go.mod h1:GNMwjim4Ypgp/MueD3zXHLRJEjz7RvtPae0AwlvEMFM= +sigs.k8s.io/kustomize/kyaml v0.13.6/go.mod h1:yHP031rn1QX1lr/Xd934Ri/xdVNG8BE2ECa78Ht/kEg= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= diff --git a/pkg/builder/object_meta.go b/pkg/builder/object_meta.go index 92cfb29ef9..6c01019b47 100644 --- a/pkg/builder/object_meta.go +++ b/pkg/builder/object_meta.go @@ -117,14 +117,6 @@ func setMapEntries(m map[string]string, vals ...string) map[string]string { return m } -// WithClusterName is a functional option that applies the specified -// cluster name to an object. -func WithClusterName(val string) func(obj metav1.Object) { - return func(obj metav1.Object) { - obj.SetClusterName(val) - } -} - // WithFinalizers is a functional option that applies the specified // finalizers to an object. func WithFinalizers(vals ...string) func(obj metav1.Object) { diff --git a/pkg/builder/volume_snapshot_builder.go b/pkg/builder/volume_snapshot_builder.go new file mode 100644 index 0000000000..19815c0f05 --- /dev/null +++ b/pkg/builder/volume_snapshot_builder.go @@ -0,0 +1,69 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// VolumeSnapshotBuilder builds VolumeSnapshot objects. +type VolumeSnapshotBuilder struct { + object *snapshotv1api.VolumeSnapshot +} + +// ForVolumeSnapshot is the constructor for VolumeSnapshotBuilder. +func ForVolumeSnapshot(ns, name string) *VolumeSnapshotBuilder { + return &VolumeSnapshotBuilder{ + object: &snapshotv1api.VolumeSnapshot{ + TypeMeta: metav1.TypeMeta{ + APIVersion: snapshotv1api.SchemeGroupVersion.String(), + Kind: "VolumeSnapshot", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + }, + } +} + +// ObjectMeta applies functional options to the VolumeSnapshot's ObjectMeta. +func (v *VolumeSnapshotBuilder) ObjectMeta(opts ...ObjectMetaOpt) *VolumeSnapshotBuilder { + for _, opt := range opts { + opt(v.object) + } + + return v +} + +// Result return the built VolumeSnapshot. +func (v *VolumeSnapshotBuilder) Result() *snapshotv1api.VolumeSnapshot { + return v.object +} + +// Status init the built VolumeSnapshot's status. +func (v *VolumeSnapshotBuilder) Status() *VolumeSnapshotBuilder { + v.object.Status = &snapshotv1api.VolumeSnapshotStatus{} + return v +} + +// BoundVolumeSnapshotContentName set built VolumeSnapshot's status BoundVolumeSnapshotContentName field. +func (v *VolumeSnapshotBuilder) BoundVolumeSnapshotContentName(vscName string) *VolumeSnapshotBuilder { + v.object.Status.BoundVolumeSnapshotContentName = &vscName + return v +} diff --git a/pkg/builder/volume_snapshot_content_builder.go b/pkg/builder/volume_snapshot_content_builder.go new file mode 100644 index 0000000000..936eb74c53 --- /dev/null +++ b/pkg/builder/volume_snapshot_content_builder.go @@ -0,0 +1,70 @@ +/* +Copyright the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package builder + +import ( + snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// VolumeSnapshotContentBuilder builds VolumeSnapshotContent object. +type VolumeSnapshotContentBuilder struct { + object *snapshotv1api.VolumeSnapshotContent +} + +// ForVolumeSnapshotContent is the constructor of VolumeSnapshotContentBuilder. +func ForVolumeSnapshotContent(name string) *VolumeSnapshotContentBuilder { + return &VolumeSnapshotContentBuilder{ + object: &snapshotv1api.VolumeSnapshotContent{ + TypeMeta: metav1.TypeMeta{ + APIVersion: snapshotv1api.SchemeGroupVersion.String(), + Kind: "VolumeSnapshotContent", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + }, + } +} + +// Result returns the built VolumeSnapshotContent. +func (v *VolumeSnapshotContentBuilder) Result() *snapshotv1api.VolumeSnapshotContent { + return v.object +} + +// Status initiates VolumeSnapshotContent's status. +func (v *VolumeSnapshotContentBuilder) Status() *VolumeSnapshotContentBuilder { + v.object.Status = &snapshotv1api.VolumeSnapshotContentStatus{} + return v +} + +// DeletionPolicy sets built VolumeSnapshotContent's spec.DeletionPolicy value. +func (v *VolumeSnapshotContentBuilder) DeletionPolicy(policy snapshotv1api.DeletionPolicy) *VolumeSnapshotContentBuilder { + v.object.Spec.DeletionPolicy = policy + return v +} + +func (v *VolumeSnapshotContentBuilder) VolumeSnapshotRef(namespace, name string) *VolumeSnapshotContentBuilder { + v.object.Spec.VolumeSnapshotRef = v1.ObjectReference{ + APIVersion: "snapshot.storage.k8s.io/v1", + Kind: "VolumeSnapshot", + Namespace: namespace, + Name: name, + } + return v +} diff --git a/pkg/cmd/server/server.go b/pkg/cmd/server/server.go index acb6eb8d2f..33f43c3c20 100644 --- a/pkg/cmd/server/server.go +++ b/pkg/cmd/server/server.go @@ -27,9 +27,7 @@ import ( "strings" "time" - "github.com/vmware-tanzu/velero/pkg/uploader" - - "github.com/bombsimon/logrusr" + logrusr "github.com/bombsimon/logrusr/v3" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/sirupsen/logrus" @@ -47,6 +45,8 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" + "github.com/vmware-tanzu/velero/pkg/uploader" + snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" snapshotv1client "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned" snapshotv1informers "github.com/kubernetes-csi/external-snapshotter/client/v4/informers/externalversions" @@ -121,7 +121,7 @@ type serverConfig struct { pluginDir, metricsAddress, defaultBackupLocation string backupSyncPeriod, podVolumeOperationTimeout, resourceTerminatingTimeout time.Duration defaultBackupTTL, storeValidationFrequency, defaultCSISnapshotTimeout time.Duration - restoreResourcePriorities []string + restoreResourcePriorities restore.Priorities defaultVolumeSnapshotLocations map[string]string restoreOnly bool disabledControllers []string @@ -216,7 +216,7 @@ func NewCommand(f client.Factory) *cobra.Command { command.Flags().DurationVar(&config.podVolumeOperationTimeout, "fs-backup-timeout", config.podVolumeOperationTimeout, "How long pod volume file system backups/restores should be allowed to run before timing out.") command.Flags().BoolVar(&config.restoreOnly, "restore-only", config.restoreOnly, "Run in a mode where only restores are allowed; backups, schedules, and garbage-collection are all disabled. DEPRECATED: this flag will be removed in v2.0. Use read-only backup storage locations instead.") command.Flags().StringSliceVar(&config.disabledControllers, "disable-controllers", config.disabledControllers, fmt.Sprintf("List of controllers to disable on startup. Valid values are %s", strings.Join(controller.DisableableControllers, ","))) - command.Flags().StringSliceVar(&config.restoreResourcePriorities, "restore-resource-priorities", config.restoreResourcePriorities, "Desired order of resource restores; any resource not in the list will be restored alphabetically after the prioritized resources.") + command.Flags().Var(&config.restoreResourcePriorities, "restore-resource-priorities", "Desired order of resource restores, the priority list contains two parts which are split by \"-\" element. The resources before \"-\" element are restored first as high priorities, the resources after \"-\" element are restored last as low priorities, and any resource not in the list will be restored alphabetically between the high and low priorities.") command.Flags().StringVar(&config.defaultBackupLocation, "default-backup-storage-location", config.defaultBackupLocation, "Name of the default backup storage location. DEPRECATED: this flag will be removed in v2.0. Use \"velero backup-location set --default\" instead.") command.Flags().DurationVar(&config.storeValidationFrequency, "store-validation-frequency", config.storeValidationFrequency, "How often to verify if the storage is valid. Optional. Set this to `0s` to disable sync. Default 1 minute.") command.Flags().Var(&volumeSnapshotLocations, "default-volume-snapshot-locations", "List of unique volume providers and default volume snapshot location (provider1:location-01,provider2:location-02,...)") @@ -326,7 +326,7 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s corev1api.AddToScheme(scheme) snapshotv1api.AddToScheme(scheme) - ctrl.SetLogger(logrusr.NewLogger(logger)) + ctrl.SetLogger(logrusr.New(logger)) mgr, err := ctrl.NewManager(clientConfig, ctrl.Options{ Scheme: scheme, @@ -488,6 +488,7 @@ func (s *server) veleroResourcesExist() error { return nil } +// High priorities: // - Custom Resource Definitions come before Custom Resource so that they can be // restored with their corresponding CRD. // - Namespaces go second because all namespaced resources depend on them. @@ -510,28 +511,36 @@ func (s *server) veleroResourcesExist() error { // - CAPI Clusters come before ClusterResourceSets because failing to do so means the CAPI controller-manager will panic. // Both Clusters and ClusterResourceSets need to come before ClusterResourceSetBinding in order to properly restore workload clusters. // See https://github.com/kubernetes-sigs/cluster-api/issues/4105 -var defaultRestorePriorities = []string{ - "customresourcedefinitions", - "namespaces", - "storageclasses", - "volumesnapshotclass.snapshot.storage.k8s.io", - "volumesnapshotcontents.snapshot.storage.k8s.io", - "volumesnapshots.snapshot.storage.k8s.io", - "persistentvolumes", - "persistentvolumeclaims", - "secrets", - "configmaps", - "serviceaccounts", - "limitranges", - "pods", - // we fully qualify replicasets.apps because prior to Kubernetes 1.16, replicasets also - // existed in the extensions API group, but we back up replicasets from "apps" so we want - // to ensure that we prioritize restoring from "apps" too, since this is how they're stored - // in the backup. - "replicasets.apps", - "clusterclasses.cluster.x-k8s.io", - "clusters.cluster.x-k8s.io", - "clusterresourcesets.addons.cluster.x-k8s.io", +// +// Low priorities: +// - Tanzu ClusterBootstrap go last as it can reference any other kind of resources +var defaultRestorePriorities = restore.Priorities{ + HighPriorities: []string{ + "customresourcedefinitions", + "namespaces", + "storageclasses", + "volumesnapshotclass.snapshot.storage.k8s.io", + "volumesnapshotcontents.snapshot.storage.k8s.io", + "volumesnapshots.snapshot.storage.k8s.io", + "persistentvolumes", + "persistentvolumeclaims", + "secrets", + "configmaps", + "serviceaccounts", + "limitranges", + "pods", + // we fully qualify replicasets.apps because prior to Kubernetes 1.16, replicasets also + // existed in the extensions API group, but we back up replicasets from "apps" so we want + // to ensure that we prioritize restoring from "apps" too, since this is how they're stored + // in the backup. + "replicasets.apps", + "clusterclasses.cluster.x-k8s.io", + "clusters.cluster.x-k8s.io", + "clusterresourcesets.addons.cluster.x-k8s.io", + }, + LowPriorities: []string{ + "clusterbootstraps.run.tanzu.vmware.com", + }, } func (s *server) checkNodeAgent() { @@ -805,6 +814,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string s.discoveryHelper, newPluginManager, backupStoreGetter, + s.credentialFileStore, ).SetupWithManager(s.mgr); err != nil { s.logger.Fatal(err, "unable to create controller", "controller", controller.BackupDeletion) } diff --git a/pkg/controller/backup_controller.go b/pkg/controller/backup_controller.go index 5b3235e8ac..2765d076ae 100644 --- a/pkg/controller/backup_controller.go +++ b/pkg/controller/backup_controller.go @@ -95,7 +95,7 @@ type backupController struct { backupStoreGetter persistence.ObjectBackupStoreGetter formatFlag logging.Format volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister - volumeSnapshotClient *snapshotterClientSet.Clientset + volumeSnapshotClient snapshotterClientSet.Interface credentialFileStore credentials.FileStore } @@ -119,7 +119,7 @@ func NewBackupController( backupStoreGetter persistence.ObjectBackupStoreGetter, formatFlag logging.Format, volumeSnapshotLister snapshotv1listers.VolumeSnapshotLister, - volumeSnapshotClient *snapshotterClientSet.Clientset, + volumeSnapshotClient snapshotterClientSet.Interface, credentialStore credentials.FileStore, ) Interface { c := &backupController{ @@ -663,21 +663,11 @@ func (c *backupController) runBackup(backup *pkgbackup.Request) error { selector := label.NewSelectorForBackup(backup.Name) vscList := &snapshotv1api.VolumeSnapshotContentList{} - if c.volumeSnapshotLister != nil { - tmpVSs, err := c.volumeSnapshotLister.List(selector) - if err != nil { - backupLog.Error(err) - } - - for _, vs := range tmpVSs { - volumeSnapshots = append(volumeSnapshots, *vs) - } - } - - err = c.checkVolumeSnapshotReadyToUse(context.Background(), volumeSnapshots, backup.Spec.CSISnapshotTimeout.Duration) + volumeSnapshots, err = c.waitVolumeSnapshotReadyToUse(context.Background(), backup.Spec.CSISnapshotTimeout.Duration, backup.Name) if err != nil { backupLog.Errorf("fail to wait VolumeSnapshot change to Ready: %s", err.Error()) } + backup.CSISnapshots = volumeSnapshots err = c.kbClient.List(context.Background(), vscList, &kbclient.ListOptions{LabelSelector: selector}) @@ -911,18 +901,35 @@ func encodeToJSONGzip(data interface{}, desc string) (*bytes.Buffer, []error) { return buf, nil } -// Waiting for VolumeSnapshot ReadyTosue to true is time consuming. Try to make the process parallel by +// waitVolumeSnapshotReadyToUse is used to wait VolumeSnapshot turned to ReadyToUse. +// Waiting for VolumeSnapshot ReadyToUse to true is time consuming. Try to make the process parallel by // using goroutine here instead of waiting in CSI plugin, because it's not easy to make BackupItemAction // parallel by now. After BackupItemAction parallel is implemented, this logic should be moved to CSI plugin // as https://github.com/vmware-tanzu/velero-plugin-for-csi/pull/100 -func (c *backupController) checkVolumeSnapshotReadyToUse(ctx context.Context, volumesnapshots []snapshotv1api.VolumeSnapshot, - csiSnapshotTimeout time.Duration) error { +func (c *backupController) waitVolumeSnapshotReadyToUse(ctx context.Context, + csiSnapshotTimeout time.Duration, backupName string) ([]snapshotv1api.VolumeSnapshot, error) { eg, _ := errgroup.WithContext(ctx) timeout := csiSnapshotTimeout interval := 5 * time.Second + volumeSnapshots := make([]snapshotv1api.VolumeSnapshot, 0) + + if c.volumeSnapshotLister != nil { + tmpVSs, err := c.volumeSnapshotLister.List(label.NewSelectorForBackup(backupName)) + if err != nil { + c.logger.Error(err) + return volumeSnapshots, err + } + + for _, vs := range tmpVSs { + volumeSnapshots = append(volumeSnapshots, *vs) + } + } + + vsChannel := make(chan snapshotv1api.VolumeSnapshot, len(volumeSnapshots)) + defer close(vsChannel) - for _, vs := range volumesnapshots { - volumeSnapshot := vs + for index := range volumeSnapshots { + volumeSnapshot := volumeSnapshots[index] eg.Go(func() error { err := wait.PollImmediate(interval, timeout, func() (bool, error) { tmpVS, err := c.volumeSnapshotClient.SnapshotV1().VolumeSnapshots(volumeSnapshot.Namespace).Get(ctx, volumeSnapshot.Name, metav1.GetOptions{}) @@ -934,6 +941,9 @@ func (c *backupController) checkVolumeSnapshotReadyToUse(ctx context.Context, vo return false, nil } + c.logger.Debugf("VolumeSnapshot %s/%s turned into ReadyToUse.", volumeSnapshot.Namespace, volumeSnapshot.Name) + // Put the ReadyToUse VolumeSnapshot element in the result channel. + vsChannel <- *tmpVS return true, nil }) if err == wait.ErrWaitTimeout { @@ -942,7 +952,16 @@ func (c *backupController) checkVolumeSnapshotReadyToUse(ctx context.Context, vo return err }) } - return eg.Wait() + + err := eg.Wait() + + result := make([]snapshotv1api.VolumeSnapshot, 0) + length := len(vsChannel) + for index := 0; index < length; index++ { + result = append(result, <-vsChannel) + } + + return result, err } // deleteVolumeSnapshot delete VolumeSnapshot created during backup. @@ -965,17 +984,20 @@ func (c *backupController) deleteVolumeSnapshot(volumeSnapshots []snapshotv1api. defer wg.Done() var vsc snapshotv1api.VolumeSnapshotContent modifyVSCFlag := false - if vs.Status.BoundVolumeSnapshotContentName != nil && + if vs.Status != nil && + vs.Status.BoundVolumeSnapshotContentName != nil && len(*vs.Status.BoundVolumeSnapshotContentName) > 0 { var found bool if vsc, found = vscMap[*vs.Status.BoundVolumeSnapshotContentName]; !found { - logger.Errorf("Not find %s from the vscMap", vs.Status.BoundVolumeSnapshotContentName) + logger.Errorf("Not find %s from the vscMap", *vs.Status.BoundVolumeSnapshotContentName) return } if vsc.Spec.DeletionPolicy == snapshotv1api.VolumeSnapshotContentDelete { modifyVSCFlag = true } + } else { + logger.Errorf("VolumeSnapshot %s/%s is not ready. This is not expected.", vs.Namespace, vs.Name) } // Change VolumeSnapshotContent's DeletionPolicy to Retain before deleting VolumeSnapshot, @@ -1001,7 +1023,7 @@ func (c *backupController) deleteVolumeSnapshot(volumeSnapshots []snapshotv1api. } // Delete VolumeSnapshot from cluster - logger.Debugf("Deleting VolumeSnapshotContent %s", vsc.Name) + logger.Debugf("Deleting VolumeSnapshot %s/%s", vs.Namespace, vs.Name) err := c.volumeSnapshotClient.SnapshotV1().VolumeSnapshots(vs.Namespace).Delete(context.TODO(), vs.Name, metav1.DeleteOptions{}) if err != nil { logger.Errorf("fail to delete VolumeSnapshot %s/%s: %s", vs.Namespace, vs.Name, err.Error()) diff --git a/pkg/controller/backup_controller_test.go b/pkg/controller/backup_controller_test.go index 4617975681..696397b729 100644 --- a/pkg/controller/backup_controller_test.go +++ b/pkg/controller/backup_controller_test.go @@ -27,6 +27,9 @@ import ( "testing" "time" + snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" + snapshotfake "github.com/kubernetes-csi/external-snapshotter/client/v4/clientset/versioned/fake" + snapshotinformers "github.com/kubernetes-csi/external-snapshotter/client/v4/informers/externalversions" "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" @@ -1393,3 +1396,90 @@ func Test_getLastSuccessBySchedule(t *testing.T) { }) } } + +func TestDeleteVolumeSnapshot(t *testing.T) { + tests := []struct { + name string + vsArray []snapshotv1api.VolumeSnapshot + vscArray []snapshotv1api.VolumeSnapshotContent + expectedVSArray []snapshotv1api.VolumeSnapshot + expectedVSCArray []snapshotv1api.VolumeSnapshotContent + }{ + { + name: "VS is ReadyToUse, and VS has corresponding VSC. VS should be deleted.", + vsArray: []snapshotv1api.VolumeSnapshot{ + *builder.ForVolumeSnapshot("velero", "vs1").ObjectMeta(builder.WithLabels("testing-vs", "vs1")).Status().BoundVolumeSnapshotContentName("vsc1").Result(), + }, + vscArray: []snapshotv1api.VolumeSnapshotContent{ + *builder.ForVolumeSnapshotContent("vsc1").DeletionPolicy(snapshotv1api.VolumeSnapshotContentDelete).Status().Result(), + }, + expectedVSArray: []snapshotv1api.VolumeSnapshot{}, + expectedVSCArray: []snapshotv1api.VolumeSnapshotContent{ + *builder.ForVolumeSnapshotContent("vsc1").DeletionPolicy(snapshotv1api.VolumeSnapshotContentRetain).VolumeSnapshotRef("ns-", "name-").Status().Result(), + }, + }, + { + name: "Corresponding VSC not found for VS. VS is not deleted.", + vsArray: []snapshotv1api.VolumeSnapshot{ + *builder.ForVolumeSnapshot("velero", "vs1").ObjectMeta(builder.WithLabels("testing-vs", "vs1")).Status().BoundVolumeSnapshotContentName("vsc1").Result(), + }, + vscArray: []snapshotv1api.VolumeSnapshotContent{}, + expectedVSArray: []snapshotv1api.VolumeSnapshot{ + *builder.ForVolumeSnapshot("velero", "vs1").Status().BoundVolumeSnapshotContentName("vsc1").Result(), + }, + expectedVSCArray: []snapshotv1api.VolumeSnapshotContent{}, + }, + { + name: "VS status is nil. VSC should not be modified.", + vsArray: []snapshotv1api.VolumeSnapshot{ + *builder.ForVolumeSnapshot("velero", "vs1").ObjectMeta(builder.WithLabels("testing-vs", "vs1")).Result(), + }, + vscArray: []snapshotv1api.VolumeSnapshotContent{ + *builder.ForVolumeSnapshotContent("vsc1").DeletionPolicy(snapshotv1api.VolumeSnapshotContentDelete).Status().Result(), + }, + expectedVSArray: []snapshotv1api.VolumeSnapshot{}, + expectedVSCArray: []snapshotv1api.VolumeSnapshotContent{ + *builder.ForVolumeSnapshotContent("vsc1").DeletionPolicy(snapshotv1api.VolumeSnapshotContentDelete).Status().Result(), + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + fakeClient := velerotest.NewFakeControllerRuntimeClientBuilder(t).WithLists( + &snapshotv1api.VolumeSnapshotContentList{Items: tc.vscArray}, + ).Build() + + vsClient := snapshotfake.NewSimpleClientset(&tc.vsArray[0]) + sharedInformers := snapshotinformers.NewSharedInformerFactory(vsClient, 0) + + for _, vs := range tc.vsArray { + sharedInformers.Snapshot().V1().VolumeSnapshots().Informer().GetStore().Add(vs) + } + + logger := logging.DefaultLogger(logrus.DebugLevel, logging.FormatText) + c := &backupController{ + kbClient: fakeClient, + volumeSnapshotClient: vsClient, + volumeSnapshotLister: sharedInformers.Snapshot().V1().VolumeSnapshots().Lister(), + } + + c.deleteVolumeSnapshot(tc.vsArray, tc.vscArray, logger) + + vsList, err := c.volumeSnapshotClient.SnapshotV1().VolumeSnapshots("velero").List(context.TODO(), metav1.ListOptions{}) + require.NoError(t, err) + assert.Equal(t, len(tc.expectedVSArray), len(vsList.Items)) + for index := range tc.expectedVSArray { + assert.Equal(t, tc.expectedVSArray[index].Status, vsList.Items[index].Status) + assert.Equal(t, tc.expectedVSArray[index].Spec, vsList.Items[index].Spec) + } + + vscList := &snapshotv1api.VolumeSnapshotContentList{} + require.NoError(t, c.kbClient.List(context.Background(), vscList)) + assert.Equal(t, len(tc.expectedVSCArray), len(vscList.Items)) + for index := range tc.expectedVSCArray { + assert.Equal(t, tc.expectedVSCArray[index].Spec, vscList.Items[index].Spec) + } + }) + } +} diff --git a/pkg/controller/backup_deletion_controller.go b/pkg/controller/backup_deletion_controller.go index 76c39c8c84..1c75dad6e7 100644 --- a/pkg/controller/backup_deletion_controller.go +++ b/pkg/controller/backup_deletion_controller.go @@ -28,10 +28,11 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/clock" kubeerrs "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/utils/clock" ctrl "sigs.k8s.io/controller-runtime" + "github.com/vmware-tanzu/velero/internal/credentials" "github.com/vmware-tanzu/velero/internal/delete" velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1" "github.com/vmware-tanzu/velero/pkg/discovery" @@ -43,6 +44,7 @@ import ( "github.com/vmware-tanzu/velero/pkg/repository" "github.com/vmware-tanzu/velero/pkg/util/filesystem" "github.com/vmware-tanzu/velero/pkg/util/kube" + "github.com/vmware-tanzu/velero/pkg/volume" "sigs.k8s.io/controller-runtime/pkg/client" @@ -64,6 +66,7 @@ type backupDeletionReconciler struct { discoveryHelper discovery.Helper newPluginManager func(logrus.FieldLogger) clientmgmt.Manager backupStoreGetter persistence.ObjectBackupStoreGetter + credentialStore credentials.FileStore } // NewBackupDeletionReconciler creates a new backup deletion reconciler. @@ -76,6 +79,7 @@ func NewBackupDeletionReconciler( helper discovery.Helper, newPluginManager func(logrus.FieldLogger) clientmgmt.Manager, backupStoreGetter persistence.ObjectBackupStoreGetter, + credentialStore credentials.FileStore, ) *backupDeletionReconciler { return &backupDeletionReconciler{ Client: client, @@ -87,6 +91,7 @@ func NewBackupDeletionReconciler( discoveryHelper: helper, newPluginManager: newPluginManager, backupStoreGetter: backupStoreGetter, + credentialStore: credentialStore, } } @@ -289,7 +294,7 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque volumeSnapshotter, ok := volumeSnapshotters[snapshot.Spec.Location] if !ok { - if volumeSnapshotter, err = volumeSnapshottersForVSL(ctx, backup.Namespace, snapshot.Spec.Location, r.Client, pluginManager); err != nil { + if volumeSnapshotter, err = r.volumeSnapshottersForVSL(ctx, backup.Namespace, snapshot.Spec.Location, pluginManager); err != nil { errs = append(errs, err.Error()) continue } @@ -387,19 +392,25 @@ func (r *backupDeletionReconciler) Reconcile(ctx context.Context, req ctrl.Reque return ctrl.Result{}, nil } -func volumeSnapshottersForVSL( +func (r *backupDeletionReconciler) volumeSnapshottersForVSL( ctx context.Context, namespace, vslName string, - client client.Client, pluginManager clientmgmt.Manager, ) (vsv1.VolumeSnapshotter, error) { vsl := &velerov1api.VolumeSnapshotLocation{} - if err := client.Get(ctx, types.NamespacedName{ + if err := r.Client.Get(ctx, types.NamespacedName{ Namespace: namespace, Name: vslName, }, vsl); err != nil { return nil, errors.Wrapf(err, "error getting volume snapshot location %s", vslName) } + + // add credential to config + err := volume.UpdateVolumeSnapshotLocationWithCredentialConfig(vsl, r.credentialStore, r.logger) + if err != nil { + return nil, errors.WithStack(err) + } + volumeSnapshotter, err := pluginManager.GetVolumeSnapshotter(vsl.Spec.Provider) if err != nil { return nil, errors.Wrapf(err, "error getting volume snapshotter for provider %s", vsl.Spec.Provider) diff --git a/pkg/controller/backup_deletion_controller_test.go b/pkg/controller/backup_deletion_controller_test.go index 62be34b0d2..c193f7819f 100644 --- a/pkg/controller/backup_deletion_controller_test.go +++ b/pkg/controller/backup_deletion_controller_test.go @@ -96,6 +96,7 @@ func setupBackupDeletionControllerTest(t *testing.T, req *velerov1api.DeleteBack nil, // discovery helper func(logrus.FieldLogger) clientmgmt.Manager { return pluginManager }, NewFakeSingleObjectBackupStoreGetter(backupStore), + velerotest.NewFakeCredentialsFileStore("", nil), ), req: ctrl.Request{NamespacedName: types.NamespacedName{Namespace: req.Namespace, Name: req.Name}}, } diff --git a/pkg/repository/config/azure.go b/pkg/repository/config/azure.go index c90c4f0ac7..7716c73c7e 100644 --- a/pkg/repository/config/azure.go +++ b/pkg/repository/config/azure.go @@ -18,6 +18,7 @@ package config import ( "context" + "fmt" "os" "strings" @@ -211,13 +212,15 @@ func getRequiredValues(getValue func(string) string, keys ...string) error { } // GetAzureStorageDomain gets the Azure storage domain required by a Azure blob connection, -// if the provided config doean't have the value, get it from system's environment variables -func GetAzureStorageDomain(config map[string]string) string { - if domain, exist := config[storageDomainConfigKey]; exist { - return domain - } else { - return os.Getenv(cloudNameEnvVar) +// if the provided credential file doesn't have the value, get it from system's environment variables +func GetAzureStorageDomain(config map[string]string) (string, error) { + credentialsFile := selectCredentialsFile(config) + + if err := loadCredentialsIntoEnv(credentialsFile); err != nil { + return "", err } + + return getStorageDomainFromCloudName(os.Getenv(cloudNameEnvVar)) } func GetAzureCredentials(config map[string]string) (string, string, error) { @@ -228,3 +231,12 @@ func GetAzureCredentials(config map[string]string) (string, string, error) { return config[storageAccountConfigKey], storageAccountKey, nil } + +func getStorageDomainFromCloudName(cloudName string) (string, error) { + env, err := parseAzureEnvironment(cloudName) + if err != nil { + return "", errors.Wrapf(err, "unable to parse azure env from cloud name %s", cloudName) + } + + return fmt.Sprintf("blob.%s", env.StorageEndpointSuffix), nil +} diff --git a/pkg/repository/config/azure_test.go b/pkg/repository/config/azure_test.go index d20ac2e28b..f32f87ce01 100644 --- a/pkg/repository/config/azure_test.go +++ b/pkg/repository/config/azure_test.go @@ -20,6 +20,7 @@ import ( "os" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -86,3 +87,48 @@ func TestSelectCredentialsFile(t *testing.T) { }) } } + +func TestGetStorageDomainFromCloudName(t *testing.T) { + testCases := []struct { + name string + cloudName string + expected string + expectedErr string + }{ + { + name: "get azure env fail", + cloudName: "fake-cloud", + expectedErr: "unable to parse azure env from cloud name fake-cloud: autorest/azure: There is no cloud environment matching the name \"FAKE-CLOUD\"", + }, + { + name: "cloud name is empty", + cloudName: "", + expected: "blob.core.windows.net", + }, + { + name: "azure public cloud", + cloudName: "AzurePublicCloud", + expected: "blob.core.windows.net", + }, + { + + name: "azure China cloud", + cloudName: "AzureChinaCloud", + expected: "blob.core.chinacloudapi.cn", + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + domain, err := getStorageDomainFromCloudName(tc.cloudName) + + require.Equal(t, tc.expected, domain) + + if tc.expectedErr == "" { + assert.NoError(t, err) + } else { + assert.EqualError(t, err, tc.expectedErr) + assert.Empty(t, domain) + } + }) + } +} diff --git a/pkg/repository/config/config.go b/pkg/repository/config/config.go index d7ed99b69e..c1ef8b906c 100644 --- a/pkg/repository/config/config.go +++ b/pkg/repository/config/config.go @@ -56,7 +56,7 @@ func getRepoPrefix(location *velerov1api.BackupStorageLocation) (string, error) prefix = layout.GetResticDir() } - backendType := GetBackendType(location.Spec.Provider) + backendType := GetBackendType(location.Spec.Provider, location.Spec.Config) if repoPrefix := location.Spec.Config["resticRepoPrefix"]; repoPrefix != "" { return repoPrefix, nil @@ -87,15 +87,25 @@ func getRepoPrefix(location *velerov1api.BackupStorageLocation) (string, error) return fmt.Sprintf("gs:%s:/%s", bucket, prefix), nil } - return "", errors.New("restic repository prefix (resticRepoPrefix) not specified in backup storage location's config") + return "", errors.Errorf("invalid backend type %s, provider %s", backendType, location.Spec.Provider) } -func GetBackendType(provider string) BackendType { +// GetBackendType returns a backend type that is known by Velero. +// If the provider doesn't indicate a known backend type, but the endpoint is +// specified, Velero regards it as a S3 compatible object store and return AWSBackend as the type. +func GetBackendType(provider string, config map[string]string) BackendType { if !strings.Contains(provider, "/") { provider = "velero.io/" + provider } - return BackendType(provider) + bt := BackendType(provider) + if IsBackendTypeValid(bt) { + return bt + } else if config != nil && config["s3Url"] != "" { + return AWSBackend + } else { + return bt + } } func IsBackendTypeValid(backendType BackendType) bool { diff --git a/pkg/repository/config/config_test.go b/pkg/repository/config/config_test.go index 2fa26a1936..4f18d6faea 100644 --- a/pkg/repository/config/config_test.go +++ b/pkg/repository/config/config_test.go @@ -48,7 +48,7 @@ func TestGetRepoIdentifier(t *testing.T) { }, }, repoName: "repo-1", - expectedErr: "restic repository prefix (resticRepoPrefix) not specified in backup storage location's config", + expectedErr: "invalid backend type velero.io/unsupported-provider, provider unsupported-provider", }, { name: "resticRepoPrefix in BSL config is used if set", @@ -69,6 +69,25 @@ func TestGetRepoIdentifier(t *testing.T) { repoName: "repo-1", expected: "custom:prefix:/restic/repo-1", }, + { + name: "s3Url in BSL config is used", + bsl: &velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "custom-repo-identifier", + Config: map[string]string{ + "s3Url": "s3Url", + }, + StorageType: velerov1api.StorageType{ + ObjectStorage: &velerov1api.ObjectStorageLocation{ + Bucket: "bucket", + Prefix: "prefix", + }, + }, + }, + }, + repoName: "repo-1", + expected: "s3:s3Url/bucket/prefix/restic/repo-1", + }, { name: "s3.amazonaws.com URL format is used if region cannot be determined for AWS BSL", bsl: &velerov1api.BackupStorageLocation{ diff --git a/pkg/repository/provider/unified_repo.go b/pkg/repository/provider/unified_repo.go index 30d1c97aaa..2ba1c7c0d1 100644 --- a/pkg/repository/provider/unified_repo.go +++ b/pkg/repository/provider/unified_repo.go @@ -344,7 +344,7 @@ func getRepoPassword(secretStore credentials.SecretStore) (string, error) { } func getStorageType(backupLocation *velerov1api.BackupStorageLocation) string { - backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider) + backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider, backupLocation.Spec.Config) switch backendType { case repoconfig.AWSBackend: @@ -368,7 +368,7 @@ func getStorageCredentials(backupLocation *velerov1api.BackupStorageLocation, cr return map[string]string{}, errors.New("invalid credentials interface") } - backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider) + backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider, backupLocation.Spec.Config) if !repoconfig.IsBackendTypeValid(backendType) { return map[string]string{}, errors.New("invalid storage provider") } @@ -414,7 +414,7 @@ func getStorageCredentials(backupLocation *velerov1api.BackupStorageLocation, cr func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repoBackend string, repoName string) (map[string]string, error) { result := make(map[string]string) - backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider) + backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider, backupLocation.Spec.Config) if !repoconfig.IsBackendTypeValid(backendType) { return map[string]string{}, errors.New("invalid storage provider") } @@ -466,7 +466,12 @@ func getStorageVariables(backupLocation *velerov1api.BackupStorageLocation, repo result[udmrepo.StoreOptionS3DisableTlsVerify] = config["insecureSkipTLSVerify"] result[udmrepo.StoreOptionS3DisableTls] = strconv.FormatBool(disableTls) } else if backendType == repoconfig.AzureBackend { - result[udmrepo.StoreOptionAzureDomain] = getAzureStorageDomain(config) + domain, err := getAzureStorageDomain(config) + if err != nil { + return map[string]string{}, errors.Wrapf(err, "error to get azure storage domain") + } + + result[udmrepo.StoreOptionAzureDomain] = domain } result[udmrepo.StoreOptionOssBucket] = bucket diff --git a/pkg/repository/provider/unified_repo_test.go b/pkg/repository/provider/unified_repo_test.go index c8121058c6..e33059c2b7 100644 --- a/pkg/repository/provider/unified_repo_test.go +++ b/pkg/repository/provider/unified_repo_test.go @@ -235,7 +235,7 @@ func TestGetStorageVariables(t *testing.T) { repoName string repoBackend string getS3BucketRegion func(string) (string, error) - getAzureStorageDomain func(map[string]string) string + getAzureStorageDomain func(map[string]string) (string, error) expected map[string]string expectedErr string }{ @@ -366,17 +366,42 @@ func TestGetStorageVariables(t *testing.T) { "skipTLSVerify": "false", }, }, + { + name: "azure, getAzureStorageDomain fail", + backupLocation: velerov1api.BackupStorageLocation{ + Spec: velerov1api.BackupStorageLocationSpec{ + Provider: "velero.io/azure", + Config: map[string]string{ + "bucket": "fake-bucket-config", + "prefix": "fake-prefix-config", + "region": "fake-region", + "fspath": "", + }, + StorageType: velerov1api.StorageType{ + ObjectStorage: &velerov1api.ObjectStorageLocation{ + Bucket: "fake-bucket-object-store", + Prefix: "fake-prefix-object-store", + }, + }, + }, + }, + getAzureStorageDomain: func(config map[string]string) (string, error) { + return "", errors.New("fake error") + }, + repoBackend: "fake-repo-type", + expected: map[string]string{}, + expectedErr: "error to get azure storage domain: fake error", + }, { name: "azure, ObjectStorage section exists in BSL", backupLocation: velerov1api.BackupStorageLocation{ Spec: velerov1api.BackupStorageLocationSpec{ Provider: "velero.io/azure", Config: map[string]string{ - "bucket": "fake-bucket-config", - "prefix": "fake-prefix-config", - "region": "fake-region", - "fspath": "", - "storageDomain": "fake-domain", + "bucket": "fake-bucket-config", + "prefix": "fake-prefix-config", + "region": "fake-region", + "fspath": "", }, StorageType: velerov1api.StorageType{ ObjectStorage: &velerov1api.ObjectStorageLocation{ @@ -386,8 +411,8 @@ func TestGetStorageVariables(t *testing.T) { }, }, }, - getAzureStorageDomain: func(config map[string]string) string { - return config["storageDomain"] + getAzureStorageDomain: func(config map[string]string) (string, error) { + return "fake-domain", nil }, repoBackend: "fake-repo-type", expected: map[string]string{ @@ -404,18 +429,17 @@ func TestGetStorageVariables(t *testing.T) { Spec: velerov1api.BackupStorageLocationSpec{ Provider: "velero.io/azure", Config: map[string]string{ - "bucket": "fake-bucket", - "prefix": "fake-prefix", - "region": "fake-region", - "fspath": "", - "storageDomain": "fake-domain", + "bucket": "fake-bucket", + "prefix": "fake-prefix", + "region": "fake-region", + "fspath": "", }, }, }, repoName: "//fake-name//", repoBackend: "fake-repo-type", - getAzureStorageDomain: func(config map[string]string) string { - return config["storageDomain"] + getAzureStorageDomain: func(config map[string]string) (string, error) { + return "fake-domain", nil }, expected: map[string]string{ "bucket": "fake-bucket", diff --git a/pkg/restic/common.go b/pkg/restic/common.go index f1ecb9a718..26eb2ef27a 100644 --- a/pkg/restic/common.go +++ b/pkg/restic/common.go @@ -92,7 +92,7 @@ func CmdEnv(backupLocation *velerov1api.BackupStorageLocation, credentialFileSto config[repoconfig.CredentialsFileKey] = credsFile } - backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider) + backendType := repoconfig.GetBackendType(backupLocation.Spec.Provider, backupLocation.Spec.Config) switch backendType { case repoconfig.AWSBackend: diff --git a/pkg/restic/exec_commands.go b/pkg/restic/exec_commands.go index 22c1a96659..4ff9e1a325 100644 --- a/pkg/restic/exec_commands.go +++ b/pkg/restic/exec_commands.go @@ -105,8 +105,8 @@ func RunBackup(backupCmd *Command, log logrus.FieldLogger, updater uploader.Prog // caller with the progress if stat.BytesDone != 0 { updater.UpdateProgress(&uploader.UploaderProgress{ - TotalBytes: stat.TotalBytesProcessed, - BytesDone: stat.TotalBytesProcessed, + TotalBytes: stat.TotalBytes, + BytesDone: stat.BytesDone, }) } } diff --git a/pkg/restore/priority.go b/pkg/restore/priority.go new file mode 100644 index 0000000000..c544532c74 --- /dev/null +++ b/pkg/restore/priority.go @@ -0,0 +1,92 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restore + +import ( + "fmt" + "strings" +) + +const ( + prioritySeparator = "-" +) + +// Priorities defines the desired order of resource operations: +// Resources in the HighPriorities list will be handled first +// Resources in the LowPriorities list will be handled last +// Other resources will be handled alphabetically after the high prioritized resources and before the low prioritized resources +type Priorities struct { + HighPriorities []string + LowPriorities []string +} + +// String returns a string representation of Priority. +func (p *Priorities) String() string { + priorities := p.HighPriorities + if len(p.LowPriorities) > 0 { + priorities = append(priorities, prioritySeparator) + priorities = append(priorities, p.LowPriorities...) + } + return strings.Join(priorities, ",") +} + +// Set parses the provided string to the priority object +func (p *Priorities) Set(s string) error { + if len(s) == 0 { + return nil + } + strs := strings.Split(s, ",") + separatorIndex := -1 + for i, str := range strs { + if str == prioritySeparator { + if separatorIndex > -1 { + return fmt.Errorf("multiple priority separator %q found", prioritySeparator) + } + separatorIndex = i + } + } + // has no separator + if separatorIndex == -1 { + p.HighPriorities = strs + return nil + } + // start with separator + if separatorIndex == 0 { + // contain only separator + if len(strs) == 1 { + return nil + } + p.LowPriorities = strs[1:] + return nil + } + // end with separator + if separatorIndex == len(strs)-1 { + p.HighPriorities = strs[:len(strs)-1] + return nil + } + + // separator in the middle + p.HighPriorities = strs[:separatorIndex] + p.LowPriorities = strs[separatorIndex+1:] + + return nil +} + +// Type specifies the flag type +func (p *Priorities) Type() string { + return "stringArray" +} diff --git a/pkg/restore/priority_test.go b/pkg/restore/priority_test.go new file mode 100644 index 0000000000..4336c6bfac --- /dev/null +++ b/pkg/restore/priority_test.go @@ -0,0 +1,110 @@ +/* +Copyright The Velero Contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restore + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestStringOfPriorities(t *testing.T) { + priority := Priorities{ + HighPriorities: []string{"high"}, + } + assert.Equal(t, "high", priority.String()) + + priority = Priorities{ + HighPriorities: []string{"high"}, + LowPriorities: []string{"low"}, + } + assert.Equal(t, "high,-,low", priority.String()) +} + +func TestSetOfPriority(t *testing.T) { + cases := []struct { + name string + input string + priorities Priorities + hasErr bool + }{ + { + name: "empty input", + input: "", + priorities: Priorities{}, + hasErr: false, + }, + { + name: "only high priorities", + input: "p0", + priorities: Priorities{ + HighPriorities: []string{"p0"}, + }, + hasErr: false, + }, + { + name: "only low priorities", + input: "-,p9", + priorities: Priorities{ + LowPriorities: []string{"p9"}, + }, + hasErr: false, + }, + { + name: "only separator", + input: "-", + priorities: Priorities{}, + hasErr: false, + }, + { + name: "multiple separators", + input: "-,-", + priorities: Priorities{}, + hasErr: true, + }, + { + name: "contain both high and low priorities", + input: "p0,p1,p2,-,p9", + priorities: Priorities{ + HighPriorities: []string{"p0", "p1", "p2"}, + LowPriorities: []string{"p9"}, + }, + hasErr: false, + }, + { + name: "end with separator", + input: "p0,-", + priorities: Priorities{ + HighPriorities: []string{"p0"}, + }, + hasErr: false, + }, + } + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + p := Priorities{} + err := p.Set(c.input) + if c.hasErr { + require.NotNil(t, err) + } else { + require.Nil(t, err) + } + assert.Equal(t, c.priorities, p) + }) + } +} diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index ef6578a03a..9f1130fc06 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -110,7 +110,7 @@ type kubernetesRestorer struct { podVolumeRestorerFactory podvolume.RestorerFactory podVolumeTimeout time.Duration resourceTerminatingTimeout time.Duration - resourcePriorities []string + resourcePriorities Priorities fileSystem filesystem.Interface pvRenamer func(string) (string, error) logger logrus.FieldLogger @@ -124,7 +124,7 @@ func NewKubernetesRestorer( restoreClient velerov1client.RestoresGetter, discoveryHelper discovery.Helper, dynamicFactory client.DynamicFactory, - resourcePriorities []string, + resourcePriorities Priorities, namespaceClient corev1.NamespaceInterface, podVolumeRestorerFactory podvolume.RestorerFactory, podVolumeTimeout time.Duration, @@ -358,7 +358,7 @@ type restoreContext struct { renamedPVs map[string]string pvRenamer func(string) (string, error) discoveryHelper discovery.Helper - resourcePriorities []string + resourcePriorities Priorities hooksWaitGroup sync.WaitGroup hooksErrs chan error resourceRestoreHooks []hook.ResourceRestoreHook @@ -374,19 +374,31 @@ type resourceClientKey struct { // getOrderedResources returns an ordered list of resource identifiers to restore, // based on the provided resource priorities and backup contents. The returned list -// begins with all of the prioritized resources (in order), and appends to that -// an alphabetized list of all resources in the backup. -func getOrderedResources(resourcePriorities []string, backupResources map[string]*archive.ResourceItems) []string { - // alphabetize resources in the backup - orderedBackupResources := make([]string, 0, len(backupResources)) +// begins with all of the high prioritized resources (in order), ends with all of +// the low prioritized resources(in order), and an alphabetized list of resources +// in the backup(pick out the prioritized resources) is put in the middle. +func getOrderedResources(resourcePriorities Priorities, backupResources map[string]*archive.ResourceItems) []string { + priorities := map[string]struct{}{} + for _, priority := range resourcePriorities.HighPriorities { + priorities[priority] = struct{}{} + } + for _, priority := range resourcePriorities.LowPriorities { + priorities[priority] = struct{}{} + } + + // pick the prioritized resources out + var orderedBackupResources []string for resource := range backupResources { + if _, exist := priorities[resource]; exist { + continue + } orderedBackupResources = append(orderedBackupResources, resource) } + // alphabetize resources in the backup sort.Strings(orderedBackupResources) - // Main list: everything in resource priorities, followed by what's in the - // backup (alphabetized). - return append(resourcePriorities, orderedBackupResources...) + list := append(resourcePriorities.HighPriorities, orderedBackupResources...) + return append(list, resourcePriorities.LowPriorities...) } type progressUpdate struct { @@ -479,7 +491,7 @@ func (ctx *restoreContext) execute() (Result, Result) { backupResources, make([]restoreableResource, 0), sets.NewString(), - []string{"customresourcedefinitions"}, + Priorities{HighPriorities: []string{"customresourcedefinitions"}}, false, ) warnings.Merge(&w) @@ -1796,7 +1808,7 @@ func (ctx *restoreContext) getOrderedResourceCollection( backupResources map[string]*archive.ResourceItems, restoreResourceCollection []restoreableResource, processedResources sets.String, - resourcePriorities []string, + resourcePriorities Priorities, includeAllResources bool, ) ([]restoreableResource, sets.String, Result, Result) { var warnings, errs Result @@ -1818,7 +1830,7 @@ func (ctx *restoreContext) getOrderedResourceCollection( if includeAllResources { resourceList = getOrderedResources(resourcePriorities, backupResources) } else { - resourceList = resourcePriorities + resourceList = resourcePriorities.HighPriorities } for _, resource := range resourceList { // try to resolve the resource via discovery to a complete group/version/resource diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index c7fedf7c6b..c8bd37ad50 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -681,7 +681,7 @@ func TestRestoreResourcePriorities(t *testing.T) { backup *velerov1api.Backup apiResources []*test.APIResource tarball io.Reader - resourcePriorities []string + resourcePriorities Priorities }{ { name: "resources are restored according to the specified resource priorities", @@ -715,7 +715,10 @@ func TestRestoreResourcePriorities(t *testing.T) { test.Deployments(), test.ServiceAccounts(), }, - resourcePriorities: []string{"persistentvolumes", "serviceaccounts", "pods", "deployments.apps"}, + resourcePriorities: Priorities{ + HighPriorities: []string{"persistentvolumes", "persistentvolumeclaims", "serviceaccounts"}, + LowPriorities: []string{"deployments.apps"}, + }, }, } @@ -747,7 +750,7 @@ func TestRestoreResourcePriorities(t *testing.T) { ) assertEmptyResults(t, warnings, errs) - assertResourceCreationOrder(t, tc.resourcePriorities, recorder.resources) + assertResourceCreationOrder(t, []string{"persistentvolumes", "persistentvolumeclaims", "serviceaccounts", "pods", "deployments.apps"}, recorder.resources) } } @@ -873,7 +876,6 @@ func TestRestoreItems(t *testing.T) { ObjectMeta( builder.WithLabels("key-1", "val-1"), builder.WithAnnotations("key-1", "val-1"), - builder.WithClusterName("cluster-1"), builder.WithFinalizers("finalizer-1"), ). Result(), @@ -2625,7 +2627,7 @@ func TestRestorePersistentVolumes(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { h := newHarness(t) - h.restorer.resourcePriorities = []string{"persistentvolumes", "persistentvolumeclaims"} + h.restorer.resourcePriorities = Priorities{HighPriorities: []string{"persistentvolumes", "persistentvolumeclaims"}} h.restorer.pvRenamer = func(oldName string) (string, error) { renamed := "renamed-" + oldName return renamed, nil @@ -2943,19 +2945,19 @@ func TestIsCompleted(t *testing.T) { func Test_getOrderedResources(t *testing.T) { tests := []struct { name string - resourcePriorities []string + resourcePriorities Priorities backupResources map[string]*archive.ResourceItems want []string }{ { name: "when only priorities are specified, they're returned in order", - resourcePriorities: []string{"prio-3", "prio-2", "prio-1"}, + resourcePriorities: Priorities{HighPriorities: []string{"prio-3", "prio-2", "prio-1"}}, backupResources: nil, want: []string{"prio-3", "prio-2", "prio-1"}, }, { name: "when only backup resources are specified, they're returned in alphabetical order", - resourcePriorities: nil, + resourcePriorities: Priorities{}, backupResources: map[string]*archive.ResourceItems{ "backup-resource-3": nil, "backup-resource-2": nil, @@ -2965,14 +2967,26 @@ func Test_getOrderedResources(t *testing.T) { }, { name: "when priorities and backup resources are specified, they're returned in the correct order", - resourcePriorities: []string{"prio-3", "prio-2", "prio-1"}, + resourcePriorities: Priorities{HighPriorities: []string{"prio-3", "prio-2", "prio-1"}}, + backupResources: map[string]*archive.ResourceItems{ + "prio-3": nil, + "backup-resource-3": nil, + "backup-resource-2": nil, + "backup-resource-1": nil, + }, + want: []string{"prio-3", "prio-2", "prio-1", "backup-resource-1", "backup-resource-2", "backup-resource-3"}, + }, + { + name: "when priorities and backup resources are specified, they're returned in the correct order", + resourcePriorities: Priorities{HighPriorities: []string{"prio-3", "prio-2", "prio-1"}, LowPriorities: []string{"prio-0"}}, backupResources: map[string]*archive.ResourceItems{ "prio-3": nil, + "prio-0": nil, "backup-resource-3": nil, "backup-resource-2": nil, "backup-resource-1": nil, }, - want: []string{"prio-3", "prio-2", "prio-1", "backup-resource-1", "backup-resource-2", "backup-resource-3", "prio-3"}, + want: []string{"prio-3", "prio-2", "prio-1", "backup-resource-1", "backup-resource-2", "backup-resource-3", "prio-0"}, }, } diff --git a/pkg/test/fake_controller_runtime_client.go b/pkg/test/fake_controller_runtime_client.go index d1c1b6106f..0be391bd9f 100644 --- a/pkg/test/fake_controller_runtime_client.go +++ b/pkg/test/fake_controller_runtime_client.go @@ -19,6 +19,7 @@ package test import ( "testing" + snapshotv1api "github.com/kubernetes-csi/external-snapshotter/client/v4/apis/volumesnapshot/v1" "github.com/stretchr/testify/require" corev1api "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -34,6 +35,8 @@ func NewFakeControllerRuntimeClientBuilder(t *testing.T) *k8sfake.ClientBuilder require.NoError(t, err) err = corev1api.AddToScheme(scheme) require.NoError(t, err) + err = snapshotv1api.AddToScheme(scheme) + require.NoError(t, err) return k8sfake.NewClientBuilder().WithScheme(scheme) } @@ -43,5 +46,7 @@ func NewFakeControllerRuntimeClient(t *testing.T, initObjs ...runtime.Object) cl require.NoError(t, err) err = corev1api.AddToScheme(scheme) require.NoError(t, err) + err = snapshotv1api.AddToScheme(scheme) + require.NoError(t, err) return k8sfake.NewFakeClientWithScheme(scheme, initObjs...) } diff --git a/pkg/uploader/provider/restic.go b/pkg/uploader/provider/restic.go index 2715b9e9d2..00fe420571 100644 --- a/pkg/uploader/provider/restic.go +++ b/pkg/uploader/provider/restic.go @@ -147,7 +147,9 @@ func (rp *resticProvider) RunBackup( snapshotIdCmd := restic.GetSnapshotCommand(rp.repoIdentifier, rp.credentialsFile, tags) snapshotIdCmd.Env = rp.cmdEnv snapshotIdCmd.CACertFile = rp.caCertFile - + if len(rp.extraFlags) != 0 { + snapshotIdCmd.ExtraFlags = append(snapshotIdCmd.ExtraFlags, rp.extraFlags...) + } snapshotID, err := restic.GetSnapshotID(snapshotIdCmd) if err != nil { return "", false, errors.WithStack(fmt.Errorf("error getting snapshot id with error: %v", err)) diff --git a/site/content/community/_index.md b/site/content/community/_index.md index b243ce1d91..7e21132908 100644 --- a/site/content/community/_index.md +++ b/site/content/community/_index.md @@ -14,9 +14,10 @@ You can follow the work we do, see our milestones, and our backlog on our [GitHu * Follow us on Twitter at [@projectvelero](https://twitter.com/projectvelero) * Join our Kubernetes Slack channel and talk to over 800 other community members: [#velero](https://kubernetes.slack.com/messages/velero) * Join our [Google Group](https://groups.google.com/forum/#!forum/projectvelero) to get updates on the project and invites to community meetings. -* Join the Velero community meetings - [Zoom link](https://VMware.zoom.us/j/94501971662?pwd=aUxVbWVEWHZSbDh4ZGdGU1cxYUFoZz09): - * 1st and 3rd Tuesday at 12PM ET / 9AM PT ([Convert to your time zone](https://dateful.com/convert/est-edt-eastern-time?t=12pm)) - * 2nd and 4th Wednesday at 8am China Standard Time / Tuesday 7pm EST (8pm EDT) / Tuesday 4pm PST (5pm PDT) ([Convert to your time zone](https://dateful.com/convert/beijing-china?t=8am)) - * Read and comment on the [meeting notes](https://hackmd.io/Jq6F5zqZR7S80CeDWUklkA?view) - * See previous community meetings on our [YouTube Channel](https://www.youtube.com/playlist?list=PL7bmigfV0EqQRysvqvqOtRNk4L5S7uqwM) - * Have a question to discuss in the community meeting? Please add it to our [Q&A Discussion board](https://github.com/vmware-tanzu/velero/discussions/categories/community-support-q-a) +* Join the Velero community meetings - [Zoom link](https://VMware.zoom.us/j/94501971662?pwd=aUxVbWVEWHZSbDh4ZGdGU1cxYUFoZz09) +Bi-weekly community meeting alternating every week between Beijing Friendly timezone and EST/Europe Friendly Timezone + * Beijing/US friendly - we start at 8am Beijing Time(bound to CST) / 8pm EDT(7pm EST) / 5pm PDT(4pm PST) / 2am CEST(1am CET) - [Convert to your time zone](https://dateful.com/convert/beijing-china?t=8am) + * US/Europe friendly - we start at 10am ET(bound to ET) / 7am PT / 3pm CET / 10pm(11pm) CST - [Convert to your time zone](https://dateful.com/convert/est-edt-eastern-time?t=10) +* Read and comment on the [meeting notes](https://hackmd.io/Jq6F5zqZR7S80CeDWUklkA?view) +* See previous community meetings on our [YouTube Channel](https://www.youtube.com/playlist?list=PL7bmigfV0EqQRysvqvqOtRNk4L5S7uqwM) +* Have a question to discuss in the community meeting? Please add it to our [Q&A Discussion board](https://github.com/vmware-tanzu/velero/discussions/categories/community-support-q-a) diff --git a/site/content/docs/main/api-types/backup.md b/site/content/docs/main/api-types/backup.md index 23805b21d7..1c1d0b9aad 100644 --- a/site/content/docs/main/api-types/backup.md +++ b/site/content/docs/main/api-types/backup.md @@ -84,8 +84,8 @@ spec: # a default value of 30 days will be used. The default can be configured on the velero server # by passing the flag --default-backup-ttl. ttl: 24h0m0s - # Whether restic should be used to take a backup of all pod volumes by default. - defaultVolumesToRestic: true + # whether pod volume file system backup should be used for all volumes by default. + defaultVolumesToFsBackup: true # Actions to perform at different times during a backup. The only hook supported is # executing a command in a container in a pod using the pod exec API. Optional. hooks: diff --git a/site/content/docs/main/api-types/schedule.md b/site/content/docs/main/api-types/schedule.md index 31130f43ae..eb3aa271ba 100644 --- a/site/content/docs/main/api-types/schedule.md +++ b/site/content/docs/main/api-types/schedule.md @@ -82,8 +82,8 @@ spec: # a default value of 30 days will be used. The default can be configured on the velero server # by passing the flag --default-backup-ttl. ttl: 24h0m0s - # Whether restic should be used to take a backup of all pod volumes by default. - defaultVolumesToRestic: true + # whether pod volume file system backup should be used for all volumes by default. + defaultVolumesToFsBackup: true # The labels you want on backup objects, created from this schedule (instead of copying the labels you have on schedule object itself). # When this field is set, the labels from the Schedule resource are not copied to the Backup resource. metadata: diff --git a/site/content/docs/main/basic-install.md b/site/content/docs/main/basic-install.md index 080b27b2f0..bb8bd7f383 100644 --- a/site/content/docs/main/basic-install.md +++ b/site/content/docs/main/basic-install.md @@ -17,7 +17,7 @@ Velero supports storage providers for both cloud-provider environments and on-pr ### Velero on Windows -Velero does not officially support Windows. In testing, the Velero team was able to backup stateless Windows applications only. The restic integration and backups of stateful applications or PersistentVolumes were not supported. +Velero does not officially support Windows. In testing, the Velero team was able to backup stateless Windows applications only. The File System Backup and backups of stateful applications or PersistentVolumes were not supported. If you want to perform your own testing of Velero on Windows, you must deploy Velero as a Windows container. Velero does not provide official Windows images, but its possible for you to build your own Velero Windows container image to use. Note that you must build this image on a Windows node. diff --git a/site/content/docs/main/build-from-source.md b/site/content/docs/main/build-from-source.md index df9f738cf2..083cdc202c 100644 --- a/site/content/docs/main/build-from-source.md +++ b/site/content/docs/main/build-from-source.md @@ -96,7 +96,7 @@ Optionally, set the `$VERSION` environment variable to change the image tag or ` ```bash make container ``` -_Note: To build build container images for both `velero` and `velero-restic-restore-helper`, run: `make all-containers`_ +_Note: To build build container images for both `velero` and `velero-restore-helper`, run: `make all-containers`_ ### Publishing container images to a registry diff --git a/site/content/docs/main/code-standards.md b/site/content/docs/main/code-standards.md index a6dfa12c64..c12317981a 100644 --- a/site/content/docs/main/code-standards.md +++ b/site/content/docs/main/code-standards.md @@ -70,13 +70,13 @@ Example: We use a package to generate mocks for our interfaces. -Example: if you want to change this mock: https://github.com/vmware-tanzu/velero/blob/main/pkg/restic/mocks/restorer.go +Example: if you want to change this mock: https://github.com/vmware-tanzu/velero/blob/main/pkg/podvolume/mocks/restorer.go Run: ```bash go get github.com/vektra/mockery/.../ -cd pkg/restic +cd pkg/podvolume mockery -name=Restorer ``` diff --git a/site/content/docs/main/contributions/ibm-config.md b/site/content/docs/main/contributions/ibm-config.md index b551bc1a73..332d0a5701 100644 --- a/site/content/docs/main/contributions/ibm-config.md +++ b/site/content/docs/main/contributions/ibm-config.md @@ -71,9 +71,9 @@ velero install \ Velero does not have a volume snapshot plugin for IBM Cloud, so creating volume snapshots is disabled. -Additionally, you can specify `--use-restic` to enable [restic support][16], and `--wait` to wait for the deployment to be ready. +Additionally, you can specify `--use-node-agent` to enable [File System Backup][16], and `--wait` to wait for the deployment to be ready. -(Optional) Specify [CPU and memory resource requests and limits][15] for the Velero/restic pods. +(Optional) Specify [CPU and memory resource requests and limits][15] for the Velero/node-agent pods. Once the installation is complete, remove the default `VolumeSnapshotLocation` that was created by `velero install`, since it's specific to AWS and won't work for IBM Cloud: @@ -98,4 +98,4 @@ Uncomment `storageClassName: ` and replace with your `S [5]: https://cloud.ibm.com/docs/containers/container_index.html#container_index [14]: http://docs.aws.amazon.com/IAM/latest/UserGuide/introduction.html [15]: ../customize-installation.md#customize-resource-requests-and-limits -[16]: ../restic.md +[16]: ../file-system-backup.md diff --git a/site/content/docs/main/contributions/minio.md b/site/content/docs/main/contributions/minio.md index 3c683473b6..b04354a1f6 100644 --- a/site/content/docs/main/contributions/minio.md +++ b/site/content/docs/main/contributions/minio.md @@ -16,7 +16,7 @@ If you encounter issues with installing or configuring, see [Debugging Installat ## Prerequisites -* Access to a Kubernetes cluster, version 1.7 or later. **Note:** restic support requires Kubernetes version 1.10 or later, or an earlier version with the mount propagation feature enabled. Restic support is not required for this example, but may be of interest later. See [Restic Integration][17]. +* Access to a Kubernetes cluster, version 1.7 or later. **Note:** File System Backup support requires Kubernetes version 1.10 or later, or an earlier version with the mount propagation feature enabled. File System Backup support is not required for this example, but may be of interest later. See [File System Backup][17]. * A DNS server on the cluster * `kubectl` installed * Sufficient disk space to store backups in Minio. You will need sufficient disk space available to handle any @@ -83,7 +83,7 @@ These instructions start the Velero server and a Minio instance that is accessib This example assumes that it is running within a local cluster without a volume provider capable of snapshots, so no `VolumeSnapshotLocation` is created (`--use-volume-snapshots=false`). You may need to update AWS plugin version to one that is [compatible](https://github.com/vmware-tanzu/velero-plugin-for-aws#compatibility) with the version of Velero you are installing. - Additionally, you can specify `--use-restic` to enable restic support, and `--wait` to wait for the deployment to be ready. + Additionally, you can specify `--use-node-agent` to enable File System Backup support, and `--wait` to wait for the deployment to be ready. This example also assumes you have named your Minio bucket "velero". @@ -289,7 +289,7 @@ In this case: [1]: #expose-minio-with-service-of-type-nodeport [3]: ../customize-installation.md -[17]: ../restic.md +[17]: ../file-system-backup.md [18]: ../debugging-restores.md [26]: https://github.com/vmware-tanzu/velero/releases [30]: https://godoc.org/github.com/robfig/cron diff --git a/site/content/docs/main/contributions/tencent-config.md b/site/content/docs/main/contributions/tencent-config.md index 11b0762c0a..592808c2dd 100644 --- a/site/content/docs/main/contributions/tencent-config.md +++ b/site/content/docs/main/contributions/tencent-config.md @@ -39,13 +39,13 @@ aws_secret_access_key= You need to install the Velero CLI first, see [Install the CLI](https://velero.io/docs/v1.5/basic-install/#install-the-cli) for how to install. -Follow the Velero installation command below to create velero and restic workloads and other necessary resource objects. +Follow the Velero installation command below to create velero and node-agent workloads and other necessary resource objects. ```bash velero install --provider aws --plugins velero/velero-plugin-for-aws:v1.1.0 --bucket \ --secret-file ./credentials-velero \ ---use-restic \ ---default-volumes-to-restic \ +--use-node-agent \ +--default-volumes-to-fs-backup \ --backup-location-config \ region=ap-guangzhou,s3ForcePathStyle="true",s3Url=https://cos.ap-guangzhou.myqcloud.com ``` @@ -60,9 +60,9 @@ Description of the parameters: - `--secret-file`: Access tencent cloud COS access credential file for the "credentials-velero" credential file created above. -- `--use-restic`: Back up and restore persistent volume data using the open source free backup tool [restic](https://github.com/restic/restic). However, 'hostPath' volumes are not supported, see the [restic limit](https://velero.io/docs/v1.5/restic/#limitations) for details), an integration that complements Velero's backup capabilities and is recommended to be turned on. +- `--use-node-agent`: Enable Velero node-agent daemonset. At present, Velero File System Backup requires this daemonset, so if you are using File System Backup, it needs to be turned on. For the usage and limitation of File System Backup, See [File System Backup](../file-system-backup.md). -- `--default-volumes-to-restic`: Enable the use of Restic to back up all Pod volumes, provided that the `--use-restic`parameter needs to be turned on. +- `--default-volumes-to-fs-backup`: Enable the use of File System Backup to back up all Pod volumes, provided that the `--use-node-agent`parameter needs to be turned on. - `--backup-location-config`: Back up the bucket access-related configuration: @@ -78,7 +78,7 @@ After executing the installation commands above, the installation process looks {{< figure src="/docs/main/contributions/img-for-tencent/9015313121ed7987558c88081b052574.png" width="100%">}} -After the installation command is complete, wait for the velero and restic workloads to be ready to see if the configured storage location is available. +After the installation command is complete, wait for the velero and node-agent workloads to be ready to see if the configured storage location is available. Executing the 'velero backup-location get' command to view the storage location status and display "Available" indicates that access to Tencent Cloud COS is OK, as shown in the following image: diff --git a/site/content/docs/main/customize-installation.md b/site/content/docs/main/customize-installation.md index b90300f826..786d74dd68 100644 --- a/site/content/docs/main/customize-installation.md +++ b/site/content/docs/main/customize-installation.md @@ -17,17 +17,17 @@ By default, `velero install` expects a credentials file for your `velero` IAM ac If you are using an alternate identity mechanism, such as kube2iam/kiam on AWS, Workload Identity on GKE, etc., that does not require a credentials file, you can specify the `--no-secret` flag instead of `--secret-file`. -## Enable restic integration +## Enable file system backup -By default, `velero install` does not install Velero's [restic integration][3]. To enable it, specify the `--use-restic` flag. +By default, `velero install` does not install Velero's [File System Backup][3]. To enable it, specify the `--use-node-agent` flag. -If you've already run `velero install` without the `--use-restic` flag, you can run the same command again, including the `--use-restic` flag, to add the restic integration to your existing install. +If you've already run `velero install` without the `--use-node-agent` flag, you can run the same command again, including the `--use-node-agent` flag, to add the file system backup to your existing install. -## Default Pod Volume backup to restic +## Default Pod Volume backup to file system backup -By default, `velero install` does not enable the use of restic to take backups of all pod volumes. You must apply an [annotation](restic.md/#using-opt-in-pod-volume-backup) to every pod which contains volumes for Velero to use restic for the backup. +By default, `velero install` does not enable the use of File System Backup (FSB) to take backups of all pod volumes. You must apply an [annotation](file-system-backup.md/#using-opt-in-pod-volume-backup) to every pod which contains volumes for Velero to use FSB for the backup. -If you are planning to only use restic for volume backups, you can run the `velero install` command with the `--default-volumes-to-restic` flag. This will default all pod volumes backups to use restic without having to apply annotations to pods. Note that when this flag is set during install, Velero will always try to use restic to perform the backup, even want an individual backup to use volume snapshots, by setting the `--snapshot-volumes` flag in the `backup create` command. Alternatively, you can set the `--default-volumes-to-restic` on an individual backup to to make sure Velero uses Restic for each volume being backed up. +If you are planning to only use FSB for volume backups, you can run the `velero install` command with the `--default-volumes-to-fs-backup` flag. This will default all pod volumes backups to use FSB without having to apply annotations to pods. Note that when this flag is set during install, Velero will always try to use FSB to perform the backup, even want an individual backup to use volume snapshots, by setting the `--snapshot-volumes` flag in the `backup create` command. Alternatively, you can set the `--default-volumes-to-fs-backup` on an individual backup to to make sure Velero uses FSB for each volume being backed up. ## Enable features @@ -43,15 +43,15 @@ velero install --features=EnableCSI Another example is enabling the support of multiple API group versions, as documented at [- -features=EnableAPIGroupVersions](enable-api-group-versions-feature.md). -Feature flags, passed to `velero install` will be passed to the Velero deployment and also to the `restic` daemon set, if `--use-restic` flag is used. +Feature flags, passed to `velero install` will be passed to the Velero deployment and also to the `node-agent` daemon set, if `--use-node-agent` flag is used. Similarly, features may be disabled by removing the corresponding feature flags from the `--features` flag. -Enabling and disabling feature flags will require modifying the Velero deployment and also the restic daemonset. This may be done from the CLI by uninstalling and re-installing Velero, or by editing the `deploy/velero` and `daemonset/restic` resources in-cluster. +Enabling and disabling feature flags will require modifying the Velero deployment and also the node-agent daemonset. This may be done from the CLI by uninstalling and re-installing Velero, or by editing the `deploy/velero` and `daemonset/node-agent` resources in-cluster. ```bash $ kubectl -n velero edit deploy/velero -$ kubectl -n velero edit daemonset/restic +$ kubectl -n velero edit daemonset/node-agent ``` ### Enable client side features @@ -87,10 +87,10 @@ the config file setting. ## Customize resource requests and limits -At installation, Velero sets default resource requests and limits for the Velero pod and the restic pod, if you using the [restic integration](/docs/main/restic/). +At installation, Velero sets default resource requests and limits for the Velero pod and the node-agent pod, if you using the [File System Backup][3]. {{< table caption="Velero Customize resource requests and limits defaults" >}} -|Setting|Velero pod defaults|restic pod defaults| +|Setting|Velero pod defaults|node-agent pod defaults| |--- |--- |--- | |CPU request|500m|500m| |Memory requests|128Mi|512Mi| @@ -98,9 +98,9 @@ At installation, Velero sets default resource requests and limits for the Velero |Memory limit|512Mi|1024Mi| {{< /table >}} -Depending on the cluster resources, especially if you are using Restic, you may need to increase these defaults. Through testing, the Velero maintainers have found these defaults work well when backing up and restoring 1000 or less resources and total size of files is 100GB or below. If the resources you are planning to backup or restore exceed this, you will need to increase the CPU or memory resources available to Velero. In general, the Velero maintainer's testing found that backup operations needed more CPU & memory resources but were less time-consuming than restore operations, when comparing backing up and restoring the same amount of data. The exact CPU and memory limits you will need depend on the scale of the files and directories of your resources and your hardware. It's recommended that you perform your own testing to find the best resource limits for your clusters and resources. +Depending on the cluster resources, you may need to increase these defaults. Through testing, the Velero maintainers have found these defaults work well when backing up and restoring 1000 or less resources and total size of files is 100GB or below. If the resources you are planning to backup or restore exceed this, you will need to increase the CPU or memory resources available to Velero. In general, the Velero maintainer's testing found that backup operations needed more CPU & memory resources but were less time-consuming than restore operations, when comparing backing up and restoring the same amount of data. The exact CPU and memory limits you will need depend on the scale of the files and directories of your resources and your hardware. It's recommended that you perform your own testing to find the best resource limits for your clusters and resources. -Due to a [known Restic issue](https://github.com/restic/restic/issues/2446), the Restic pod will consume large amounts of memory, especially if you are backing up millions of tiny files and directories. If you are planning to use Restic to backup 100GB of data or more, you will need to increase the resource limits to make sure backups complete successfully. +You may need to increase the resource limits if you are using File System Backup, see the details in [File System Backup][3]. ### Install with custom resource requests and limits @@ -112,17 +112,17 @@ velero install \ --velero-pod-mem-request \ --velero-pod-cpu-limit \ --velero-pod-mem-limit \ - [--use-restic] \ - [--default-volumes-to-restic] \ - [--restic-pod-cpu-request ] \ - [--restic-pod-mem-request ] \ - [--restic-pod-cpu-limit ] \ - [--restic-pod-mem-limit ] + [--use-node-agent] \ + [--default-volumes-to-fs-backup] \ + [--node-agent-pod-cpu-request ] \ + [--node-agent-pod-mem-request ] \ + [--node-agent-pod-cpu-limit ] \ + [--node-agent-pod-mem-limit ] ``` ### Update resource requests and limits after install -After installation you can adjust the resource requests and limits in the Velero Deployment spec or restic DeamonSet spec, if you are using the restic integration. +After installation you can adjust the resource requests and limits in the Velero Deployment spec or node-agent DeamonSet spec, if you are using the File System Backup. **Velero pod** @@ -133,16 +133,16 @@ kubectl patch deployment velero -n velero --patch \ '{"spec":{"template":{"spec":{"containers":[{"name": "velero", "resources": {"limits":{"cpu": "1", "memory": "512Mi"}, "requests": {"cpu": "1", "memory": "128Mi"}}}]}}}}' ``` -**restic pod** +**node-agent pod** -Update the `spec.template.spec.containers.resources.limits` and `spec.template.spec.containers.resources.requests` values in the restic DeamonSet spec. +Update the `spec.template.spec.containers.resources.limits` and `spec.template.spec.containers.resources.requests` values in the node-agent DeamonSet spec. ```bash -kubectl patch daemonset restic -n velero --patch \ -'{"spec":{"template":{"spec":{"containers":[{"name": "restic", "resources": {"limits":{"cpu": "1", "memory": "1024Mi"}, "requests": {"cpu": "1", "memory": "512Mi"}}}]}}}}' +kubectl patch daemonset node-agent -n velero --patch \ +'{"spec":{"template":{"spec":{"containers":[{"name": "node-agent", "resources": {"limits":{"cpu": "1", "memory": "1024Mi"}, "requests": {"cpu": "1", "memory": "512Mi"}}}]}}}}' ``` -Additionally, you may want to update the the default Velero restic pod operation timeout (default 240 minutes) to allow larger backups more time to complete. You can adjust this timeout by adding the `- --restic-timeout` argument to the Velero Deployment spec. +Additionally, you may want to update the the default File System Backup operation timeout (default 240 minutes) to allow larger backups more time to complete. You can adjust this timeout by adding the `- --fs-backup-timeout` argument to the Velero Deployment spec. **NOTE:** Changes made to this timeout value will revert back to the default value if you re-run the Velero install command. @@ -152,7 +152,7 @@ Additionally, you may want to update the the default Velero restic pod operation kubectl edit deploy velero -n velero ``` -1. Add `- --restic-timeout` to `spec.template.spec.containers`. +1. Add `- --fs-backup-timeout` to `spec.template.spec.containers`. ```yaml spec: @@ -160,7 +160,7 @@ Additionally, you may want to update the the default Velero restic pod operation spec: containers: - args: - - --restic-timeout=240m + - --fs-backup-timeout=240m ``` ## Configure more than one storage location for backups or volume snapshots @@ -380,7 +380,7 @@ If you get an error like `complete:13: command not found: compdef`, then add the [1]: https://github.com/vmware-tanzu/velero/releases/latest [2]: namespace.md -[3]: restic.md +[3]: file-system-backup.md [4]: on-premises.md [6]: velero-install.md#usage [7]: https://github.com/vmware-tanzu/velero/issues/2077 diff --git a/site/content/docs/main/file-system-backup.md b/site/content/docs/main/file-system-backup.md new file mode 100644 index 0000000000..5b1043c63b --- /dev/null +++ b/site/content/docs/main/file-system-backup.md @@ -0,0 +1,597 @@ +--- +title: "File System Backup" +layout: docs +--- + +Velero supports backing up and restoring Kubernetes volumes attached to pods from the file system of the volumes, called +File System Backup (FSB shortly) or Pod Volume Backup. The data movement is fulfilled by using modules from free open-source +backup tools [restic][1] and [kopia][2]. This support is considered beta quality. Please see the list of [limitations](#limitations) +to understand if it fits your use case. + +Velero allows you to take snapshots of persistent volumes as part of your backups if you’re using one of +the supported cloud providers’ block storage offerings (Amazon EBS Volumes, Azure Managed Disks, Google Persistent Disks). +It also provides a plugin model that enables anyone to implement additional object and block storage backends, outside the +main Velero repository. + +Velero's File System Backup is an addition to the aforementioned snapshot approaches. Its pros and cons are listed below: +Pros: +- It is capable of backing up and restoring almost any type of Kubernetes volume. Therefore, if you need a volume snapshot +plugin for your storage platform, or if you're using EFS, AzureFile, NFS, emptyDir, local, or any other volume type that doesn't +have a native snapshot concept, FSB might be for you. +- It is not tied to a specific storage platform, so you could save the backup data to a different storage platform from +the one backing Kubernetes volumes, for example, a durable storage. + +Cons: +- It backs up data from the live file system, so the backup data is less consistent than the snapshot approaches. +- It access the file system from the mounted hostpath directory, so the pods need to run as root user and even under +privileged mode in some environments. + +**NOTE:** hostPath volumes are not supported, but the [local volume type][5] is supported. + +## Setup File System Backup + +### Prerequisites + +- Understand how Velero performs [file system backup](#how-backup-and-restore-work). +- [Download][4] the latest Velero release. +- Kubernetes v1.16.0 or later are required. Velero's File System Backup requires the Kubernetes [MountPropagation feature][6]. + +### Install Velero Node Agent + +Velero Node Agent is a Kubernetes daemonset that hosts FSB modules, i.e., restic, kopia uploader & repository. +To install Node Agent, use the `--use-node-agent` flag in the `velero install` command. See the [install overview][3] for more +details on other flags for the install command. + +``` +velero install --use-node-agent +``` + +When using FSB on a storage that doesn't have Velero support for snapshots, the `--use-volume-snapshots=false` flag prevents an +unused `VolumeSnapshotLocation` from being created on installation. + +At present, Velero FSB supports object storage as the backup storage only. Velero gets the parameters from the +[BackupStorageLocation `config`](api-types/backupstoragelocation.md) to compose the URL to the backup storage. Velero's known object +storage providers are include here [supported providers](supported-providers.md), for which, Velero pre-defines the endpoints; if you +want to use a different backup storage, make sure it is S3 compatible and you provide the correct bucket name and endpoint in +BackupStorageLocation. Alternatively, for Restic, you could set the `resticRepoPrefix` value in BackupStorageLocation. For example, +on AWS, `resticRepoPrefix` is something like `s3:s3-us-west-2.amazonaws.com/bucket` (note that `resticRepoPrefix` doesn't work for Kopia). +Velero handles the creation of the backup repo prefix in the backup storage, so make sure it is specified in BackupStorageLocation correctly. + +Velero creates one backup repo per namespace. For example, if backing up 2 namespaces, namespace1 and namespace2, using kopia +repository on AWS S3, the full backup repo path for namespace1 would be `https://s3-us-west-2.amazonaws.com/bucket/kopia/ns1` and +for namespace2 would be `https://s3-us-west-2.amazonaws.com/bucket/kopia/ns2`. + +There may be additional installation steps depending on the cloud provider plugin you are using. You should refer to the +[plugin specific documentation](supported-providers.md) for the must up to date information. + +### Configure Node Agent DaemonSet spec + +After installation, some PaaS/CaaS platforms based on Kubernetes also require modifications the node-agent DaemonSet spec. +The steps in this section are only needed if you are installing on RancherOS, OpenShift, VMware Tanzu Kubernetes Grid +Integrated Edition (formerly VMware Enterprise PKS), or Microsoft Azure. + + +**RancherOS** + + +Update the host path for volumes in the nonde-agent DaemonSet in the Velero namespace from `/var/lib/kubelet/pods` to +`/opt/rke/var/lib/kubelet/pods`. + +```yaml +hostPath: + path: /var/lib/kubelet/pods +``` + +to + +```yaml +hostPath: + path: /opt/rke/var/lib/kubelet/pods +``` + + +**OpenShift** + + +To mount the correct hostpath to pods volumes, run the node-agent pod in `privileged` mode. + +1. Add the `velero` ServiceAccount to the `privileged` SCC: + + ``` + $ oc adm policy add-scc-to-user privileged -z velero -n velero + ``` + +2. Modify the DaemonSet yaml to request a privileged mode: + + ```diff + @@ -67,3 +67,5 @@ spec: + value: /credentials/cloud + - name: VELERO_SCRATCH_DIR + value: /scratch + + securityContext: + + privileged: true + ``` + + or + + ```shell + oc patch ds/node-agent \ + --namespace velero \ + --type json \ + -p '[{"op":"add","path":"/spec/template/spec/containers/0/securityContext","value": { "privileged": true}}]' + ``` + + +If node-agent is not running in a privileged mode, it will not be able to access pods volumes within the mounted +hostpath directory because of the default enforced SELinux mode configured in the host system level. You can +[create a custom SCC](https://docs.openshift.com/container-platform/3.11/admin_guide/manage_scc.html) to relax the +security in your cluster so that node-agent pods are allowed to use the hostPath volume plug-in without granting +them access to the `privileged` SCC. + +By default a userland openshift namespace will not schedule pods on all nodes in the cluster. + +To schedule on all nodes the namespace needs an annotation: + +``` +oc annotate namespace openshift.io/node-selector="" +``` + +This should be done before velero installation. + +Or the ds needs to be deleted and recreated: + +``` +oc get ds node-agent -o yaml -n > ds.yaml +oc annotate namespace openshift.io/node-selector="" +oc create -n -f ds.yaml +``` + +**VMware Tanzu Kubernetes Grid Integrated Edition (formerly VMware Enterprise PKS)** + +You need to enable the `Allow Privileged` option in your plan configuration so that Velero is able to mount the hostpath. + +The hostPath should be changed from `/var/lib/kubelet/pods` to `/var/vcap/data/kubelet/pods` + +```yaml +hostPath: + path: /var/vcap/data/kubelet/pods +``` + + +**Microsoft Azure** + +If you are using [Azure Files][8], you need to add `nouser_xattr` to your storage class's `mountOptions`. +See [this restic issue][9] for more details. + +You can use the following command to patch the storage class: + +```bash +kubectl patch storageclass/ \ + --type json \ + --patch '[{"op":"add","path":"/mountOptions/-","value":"nouser_xattr"}]' +``` + +## To back up + +Velero supports two approaches of discovering pod volumes that need to be backed up using FSB: + +- Opt-in approach: Where every pod containing a volume to be backed up using FSB must be annotated +with the volume's name. +- Opt-out approach: Where all pod volumes are backed up using FSB, with the ability to opt-out any +volumes that should not be backed up. + +The following sections provide more details on the two approaches. + +### Using the opt-out approach + +In this approach, Velero will back up all pod volumes using FSB with the exception of: + +- Volumes mounting the default service account token, Kubernetes secrets, and config maps +- Hostpath volumes + +It is possible to exclude volumes from being backed up using the `backup.velero.io/backup-volumes-excludes` +annotation on the pod. + +Instructions to back up using this approach are as follows: + +1. Run the following command on each pod that contains volumes that should **not** be backed up using FSB + + ```bash + kubectl -n YOUR_POD_NAMESPACE annotate pod/YOUR_POD_NAME backup.velero.io/backup-volumes-excludes=YOUR_VOLUME_NAME_1,YOUR_VOLUME_NAME_2,... + ``` + where the volume names are the names of the volumes in the pod spec. + + For example, in the following pod: + + ```yaml + apiVersion: v1 + kind: Pod + metadata: + name: app1 + namespace: sample + spec: + containers: + - image: k8s.gcr.io/test-webserver + name: test-webserver + volumeMounts: + - name: pvc1-vm + mountPath: /volume-1 + - name: pvc2-vm + mountPath: /volume-2 + volumes: + - name: pvc1-vm + persistentVolumeClaim: + claimName: pvc1 + - name: pvc2-vm + claimName: pvc2 + ``` + to exclude FSB of volume `pvc1-vm`, you would run: + + ```bash + kubectl -n sample annotate pod/app1 backup.velero.io/backup-volumes-excludes=pvc1-vm + ``` + +2. Take a Velero backup: + + ```bash + velero backup create BACKUP_NAME --default-volumes-to-fs-backup OTHER_OPTIONS + ``` + + The above steps uses the opt-out approach on a per backup basis. + + Alternatively, this behavior may be enabled on all velero backups running the `velero install` command with + the `--default-volumes-to-fs-backup` flag. Refer [install overview][10] for details. + +3. When the backup completes, view information about the backups: + + ```bash + velero backup describe YOUR_BACKUP_NAME + ``` + ```bash + kubectl -n velero get podvolumebackups -l velero.io/backup-name=YOUR_BACKUP_NAME -o yaml + ``` + +### Using opt-in pod volume backup + +Velero, by default, uses this approach to discover pod volumes that need to be backed up using FSB. Every pod +containing a volume to be backed up using FSB must be annotated with the volume's name using the +`backup.velero.io/backup-volumes` annotation. + +Instructions to back up using this approach are as follows: + +1. Run the following for each pod that contains a volume to back up: + + ```bash + kubectl -n YOUR_POD_NAMESPACE annotate pod/YOUR_POD_NAME backup.velero.io/backup-volumes=YOUR_VOLUME_NAME_1,YOUR_VOLUME_NAME_2,... + ``` + + where the volume names are the names of the volumes in the pod spec. + + For example, for the following pod: + + ```yaml + apiVersion: v1 + kind: Pod + metadata: + name: sample + namespace: foo + spec: + containers: + - image: k8s.gcr.io/test-webserver + name: test-webserver + volumeMounts: + - name: pvc-volume + mountPath: /volume-1 + - name: emptydir-volume + mountPath: /volume-2 + volumes: + - name: pvc-volume + persistentVolumeClaim: + claimName: test-volume-claim + - name: emptydir-volume + emptyDir: {} + ``` + + You'd run: + + ```bash + kubectl -n foo annotate pod/sample backup.velero.io/backup-volumes=pvc-volume,emptydir-volume + ``` + + This annotation can also be provided in a pod template spec if you use a controller to manage your pods. + +1. Take a Velero backup: + + ```bash + velero backup create NAME OPTIONS... + ``` + +1. When the backup completes, view information about the backups: + + ```bash + velero backup describe YOUR_BACKUP_NAME + ``` + ```bash + kubectl -n velero get podvolumebackups -l velero.io/backup-name=YOUR_BACKUP_NAME -o yaml + ``` + +## To restore + +Regardless of how volumes are discovered for backup using FSB, the process of restoring remains the same. + +1. Restore from your Velero backup: + + ```bash + velero restore create --from-backup BACKUP_NAME OPTIONS... + ``` + +1. When the restore completes, view information about your pod volume restores: + + ```bash + velero restore describe YOUR_RESTORE_NAME + ``` + ```bash + kubectl -n velero get podvolumerestores -l velero.io/restore-name=YOUR_RESTORE_NAME -o yaml + ``` + +## Limitations + +- `hostPath` volumes are not supported. [Local persistent volumes][5] are supported. +- At present, Velero uses a static, common encryption key for all backup repositories it creates. **This means +that anyone who has access to your backup storage can decrypt your backup data**. Make sure that you limit access +to the backup storage appropriately. +- An incremental backup chain will be maintained across pod reschedules for PVCs. However, for pod volumes that +are *not* PVCs, such as `emptyDir` volumes, when a pod is deleted/recreated (for example, by a ReplicaSet/Deployment), +the next backup of those volumes will be full rather than incremental, because the pod volume's lifecycle is assumed +to be defined by its pod. +- Even though the backup data could be incrementally preserved, for a single file data, FSB leverages on deduplication +to find the difference to be saved. This means that large files (such as ones storing a database) will take a long time +to scan for data deduplication, even if the actual difference is small. +- You may need to [customize the resource limits](/docs/main/customize-installation/#customize-resource-requests-and-limits) +to make sure backups complete successfully for massive small files or large backup size cases, for more details refer to +[Velero File System Backup Performance Guide](https://empty-to-be-created). +- Velero's File System Backup reads/writes data from volumes by accessing the node's filesystem, on which the pod is running. +For this reason, FSB can only backup volumes that are mounted by a pod and not directly from the PVC. For orphan PVC/PV pairs +(without running pods), some Velero users overcame this limitation running a staging pod (i.e. a busybox or alpine container +with an infinite sleep) to mount these PVC/PV pairs prior taking a Velero backup. + +## Customize Restore Helper Container + +Velero uses a helper init container when performing a FSB restore. By default, the image for this container is +`velero/velero-restore-helper:`, where `VERSION` matches the version/tag of the main Velero image. +You can customize the image that is used for this helper by creating a ConfigMap in the Velero namespace with the alternate image. + +In addition, you can customize the resource requirements for the init container, should you need. + +The ConfigMap must look like the following: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + # any name can be used; Velero uses the labels (below) + # to identify it rather than the name + name: fs-restore-action-config + # must be in the velero namespace + namespace: velero + # the below labels should be used verbatim in your + # ConfigMap. + labels: + # this value-less label identifies the ConfigMap as + # config for a plugin (i.e. the built-in restore + # item action plugin) + velero.io/plugin-config: "" + # this label identifies the name and kind of plugin + # that this ConfigMap is for. + velero.io/pod-volume-restore: RestoreItemAction +data: + # The value for "image" can either include a tag or not; + # if the tag is *not* included, the tag from the main Velero + # image will automatically be used. + image: myregistry.io/my-custom-helper-image[:OPTIONAL_TAG] + + # "cpuRequest" sets the request.cpu value on the restore init containers during restore. + # If not set, it will default to "100m". A value of "0" is treated as unbounded. + cpuRequest: 200m + + # "memRequest" sets the request.memory value on the restore init containers during restore. + # If not set, it will default to "128Mi". A value of "0" is treated as unbounded. + memRequest: 128Mi + + # "cpuLimit" sets the request.cpu value on the restore init containers during restore. + # If not set, it will default to "100m". A value of "0" is treated as unbounded. + cpuLimit: 200m + + # "memLimit" sets the request.memory value on the restore init containers during restore. + # If not set, it will default to "128Mi". A value of "0" is treated as unbounded. + memLimit: 128Mi + + # "secCtxRunAsUser" sets the securityContext.runAsUser value on the restore init containers during restore. + secCtxRunAsUser: 1001 + + # "secCtxRunAsGroup" sets the securityContext.runAsGroup value on the restore init containers during restore. + secCtxRunAsGroup: 999 + + # "secCtxAllowPrivilegeEscalation" sets the securityContext.allowPrivilegeEscalation value on the restore init containers during restore. + secCtxAllowPrivilegeEscalation: false + + # "secCtx" sets the securityContext object value on the restore init containers during restore. + # This key override `secCtxRunAsUser`, `secCtxRunAsGroup`, `secCtxAllowPrivilegeEscalation` if `secCtx.runAsUser`, `secCtx.runAsGroup` or `secCtx.allowPrivilegeEscalation` are set. + secCtx: | + capabilities: + drop: + - ALL + add: [] + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 999 + +``` + +## Troubleshooting + +Run the following checks: + +Are your Velero server and daemonset pods running? + +```bash +kubectl get pods -n velero +``` + +Does your backup repository exist, and is it ready? + +```bash +velero repo get + +velero repo get REPO_NAME -o yaml +``` + +Are there any errors in your Velero backup/restore? + +```bash +velero backup describe BACKUP_NAME +velero backup logs BACKUP_NAME + +velero restore describe RESTORE_NAME +velero restore logs RESTORE_NAME +``` + +What is the status of your pod volume backups/restores? + +```bash +kubectl -n velero get podvolumebackups -l velero.io/backup-name=BACKUP_NAME -o yaml + +kubectl -n velero get podvolumerestores -l velero.io/restore-name=RESTORE_NAME -o yaml +``` + +Is there any useful information in the Velero server or daemon pod logs? + +```bash +kubectl -n velero logs deploy/velero +kubectl -n velero logs DAEMON_POD_NAME +``` + +**NOTE**: You can increase the verbosity of the pod logs by adding `--log-level=debug` as an argument +to the container command in the deployment/daemonset pod template spec. + +## How backup and restore work + +### How Velero integrates with Restic +Velero integrate Restic binary directly, so the operations are done by calling Restic commands: +- Run `restic init` command to initialize the [restic repository](https://restic.readthedocs.io/en/latest/100_references.html#terminology) +- Run `restic prune` command periodically to prune restic repository +- Run `restic backup` commands to backup pod volume data +- Run `restic restore` commands to restore pod volume data + +### How Velero integrates with Kopia +Velero integrate Kopia modules into Velero's code, primarily two modules: +- Kopia Uploader: Velero makes some wrap and isolation around it to create a generic file system uploader, +which is used to backup pod volume data +- Kopia Repository: Velero integrates it with Velero's Unified Repository Interface, it is used to preserve the backup data and manage +the backup storage + +For more details, refer to [kopia architecture](https://kopia.io/docs/advanced/architecture/) and +Velero's [Unified Repository design](https://github.com/vmware-tanzu/velero/pull/4926) + +### Custom resource and controllers +Velero has three custom resource definitions and associated controllers: + +- `BackupRepository` - represents/manages the lifecycle of Velero's backup repositories. Velero creates +a backup repository per namespace when the first FSB backup/restore for a namespace is requested. The backup +repository is backed by restic or kopia, the `BackupRepository` controller invokes restic or kopia internally, +refer to [restic integration](#how-velero-integrates-with-restic) and [kopia integration](#how-velero-integrates-with-kopia) +for details. + + You can see information about your Velero's backup repositories by running `velero repo get`. + +- `PodVolumeBackup` - represents a FSB backup of a volume in a pod. The main Velero backup process creates +one or more of these when it finds an annotated pod. Each node in the cluster runs a controller for this +resource (in a daemonset) that handles the `PodVolumeBackups` for pods on that node. `PodVolumeBackup` is backed by +restic or kopia, the controller invokes restic or kopia internally, refer to [restic integration](#how-velero-integrates-with-restic) +and [kopia integration](#how-velero-integrates-with-kopia) for details. + +- `PodVolumeRestore` - represents a FSB restore of a pod volume. The main Velero restore process creates one +or more of these when it encounters a pod that has associated FSB backups. Each node in the cluster runs a +controller for this resource (in the same daemonset as above) that handles the `PodVolumeRestores` for pods +on that node. `PodVolumeRestore` is backed by restic or kopia, the controller invokes restic or kopia internally, +refer to [restic integration](#how-velero-integrates-with-restic) and [kopia integration](#how-velero-integrates-with-kopia) for details. + +### Path selection +Velero's FSB supports two data movement paths, the restic path and the kopia path. Velero allows users to select +between the two paths: +- For backup, the path is specified at the installation time through the `uploader-type` flag, the valid value is +either `restic` or `kopia`, or default to `restic` if the value is not specified. The selection is not allowed to be +changed after the installation. +- For restore, the path is decided by the path used to back up the data, it is automatically selected. For example, +if you've created a backup with restic path, then you reinstall Velero with `uploader-type=kopia`, when you create +a restore from the backup, the restore still goes with restic path. + +### Backup + +1. Based on configuration, the main Velero backup process uses the opt-in or opt-out approach to check each pod +that it's backing up for the volumes to be backed up using FSB. +2. When found, Velero first ensures a backup repository exists for the pod's namespace, by: + - checking if a `BackupRepository` custom resource already exists + - if not, creating a new one, and waiting for the `BackupRepository` controller to init/connect it +3. Velero then creates a `PodVolumeBackup` custom resource per volume listed in the pod annotation +4. The main Velero process now waits for the `PodVolumeBackup` resources to complete or fail +5. Meanwhile, each `PodVolumeBackup` is handled by the controller on the appropriate node, which: + - has a hostPath volume mount of `/var/lib/kubelet/pods` to access the pod volume data + - finds the pod volume's subdirectory within the above volume + - based on the path selection, Velero inokes restic or kopia for backup + - updates the status of the custom resource to `Completed` or `Failed` +6. As each `PodVolumeBackup` finishes, the main Velero process adds it to the Velero backup in a file named +`-podvolumebackups.json.gz`. This file gets uploaded to object storage alongside the backup tarball. +It will be used for restores, as seen in the next section. + +### Restore + +1. The main Velero restore process checks each existing `PodVolumeBackup` custom resource in the cluster to backup from. +2. For each `PodVolumeBackup` found, Velero first ensures a backup repository exists for the pod's namespace, by: + - checking if a `BackupRepository` custom resource already exists + - if not, creating a new one, and waiting for the `BackupRepository` controller to connect it (note that + in this case, the actual repository should already exist in backup storage, so the Velero controller will simply + check it for integrity and make a location connection) +3. Velero adds an init container to the pod, whose job is to wait for all FSB restores for the pod to complete (more +on this shortly) +4. Velero creates the pod, with the added init container, by submitting it to the Kubernetes API. Then, the Kubernetes +scheduler schedules this pod to a worker node, and the pod must be in a running state. If the pod fails to start for +some reason (i.e. lack of cluster resources), the FSB restore will not be done. +5. Velero creates a `PodVolumeRestore` custom resource for each volume to be restored in the pod +6. The main Velero process now waits for each `PodVolumeRestore` resource to complete or fail +7. Meanwhile, each `PodVolumeRestore` is handled by the controller on the appropriate node, which: + - has a hostPath volume mount of `/var/lib/kubelet/pods` to access the pod volume data + - waits for the pod to be running the init container + - finds the pod volume's subdirectory within the above volume + - based on the path selection, Velero inokes restic or kopia for restore + - on success, writes a file into the pod volume, in a `.velero` subdirectory, whose name is the UID of the Velero + restore that this pod volume restore is for + - updates the status of the custom resource to `Completed` or `Failed` +8. The init container that was added to the pod is running a process that waits until it finds a file +within each restored volume, under `.velero`, whose name is the UID of the Velero restore being run +9. Once all such files are found, the init container's process terminates successfully and the pod moves +on to running other init containers/the main containers. + +Velero won't restore a resource if a that resource is scaled to 0 and already exists in the cluster. If Velero restored the +requested pods in this scenario, the Kubernetes reconciliation loops that manage resources would delete the running pods +because its scaled to be 0. Velero will be able to restore once the resources is scaled up, and the pods are created and remain running. + +## 3rd party controllers + +### Monitor backup annotation + +Velero does not provide a mechanism to detect persistent volume claims that are missing the File System Backup annotation. + +To solve this, a controller was written by Thomann Bits&Beats: [velero-pvc-watcher][7] + +[1]: https://github.com/restic/restic +[2]: https://github.com/kopia/kopia +[3]: customize-installation.md#enable-restic-integration +[4]: https://github.com/vmware-tanzu/velero/releases/ +[5]: https://kubernetes.io/docs/concepts/storage/volumes/#local +[6]: https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation +[7]: https://github.com/bitsbeats/velero-pvc-watcher +[8]: https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv +[9]: https://github.com/restic/restic/issues/1800 +[10]: customize-installation.md#default-pod-volume-backup-to-file-system-backup diff --git a/site/content/docs/main/locations.md b/site/content/docs/main/locations.md index fe74391f9e..db347801b0 100644 --- a/site/content/docs/main/locations.md +++ b/site/content/docs/main/locations.md @@ -37,13 +37,13 @@ This configuration design enables a number of different use cases, including: - Cross-provider snapshots are not supported. If you have a cluster with more than one type of volume, like EBS and Portworx, but you only have a `VolumeSnapshotLocation` configured for EBS, then Velero will **only** snapshot the EBS volumes. -- Restic data is stored under a prefix/subdirectory of the main Velero bucket, and will go into the bucket corresponding to the `BackupStorageLocation` selected by the user at backup creation time. +- File System Backup data is stored under a prefix/subdirectory of the main Velero bucket, and will go into the bucket corresponding to the `BackupStorageLocation` selected by the user at backup creation time. -- Velero's backups are split into 2 pieces - the metadata stored in object storage, and snapshots/backups of the persistent volume data. Right now, Velero *itself* does not encrypt either of them, instead it relies on the native mechanisms in the object and snapshot systems. A special case is restic, which backs up the persistent volume data at the filesystem level and send it to Velero's object storage. +- Velero's backups are split into 2 pieces - the metadata stored in object storage, and snapshots/backups of the persistent volume data. Right now, Velero *itself* does not encrypt either of them, instead it relies on the native mechanisms in the object and snapshot systems. A special case is File System Backup, which backs up the persistent volume data at the filesystem level and send it to Velero's object storage. -- Velero's compression for object metadata is limited, using Golang's tar implementation. In most instances, Kubernetes objects are limited to 1.5MB in size, but many don't approach that, meaning that compression may not be necessary. Note that restic has not yet implemented compression, but does have de-deduplication capabilities. +- Velero's compression for object metadata is limited, using Golang's tar implementation. In most instances, Kubernetes objects are limited to 1.5MB in size, but many don't approach that, meaning that compression may not be necessary. Note that File System Backup has not yet implemented compression, but does have de-deduplication capabilities. -- If you have [multiple](customize-installation.md/#configure-more-than-one-storage-location-for-backups-or-volume-snapshots) `VolumeSnapshotLocations` configured for a provider, you must always specify a valid `VolumeSnapshotLocation` when creating a backup, even if you are using [Restic](restic.md) for volume backups. You can optionally decide to set the [`--default-volume-snapshot-locations`](customize-locations.md#set-default-backup-storage-location-or-volume-snapshot-locations) flag using the `velero server`, which lists the default `VolumeSnapshotLocation` Velero should use if a `VolumeSnapshotLocation` is not specified when creating a backup. If you only have one `VolumeSnapshotLocation` for a provider, Velero will automatically use that location as the default. +- If you have [multiple](customize-installation.md/#configure-more-than-one-storage-location-for-backups-or-volume-snapshots) `VolumeSnapshotLocations` configured for a provider, you must always specify a valid `VolumeSnapshotLocation` when creating a backup, even if you are using [File System Backup](file-system-backup.md) for volume backups. You can optionally decide to set the [`--default-volume-snapshot-locations`](customize-locations.md#set-default-backup-storage-location-or-volume-snapshot-locations) flag using the `velero server`, which lists the default `VolumeSnapshotLocation` Velero should use if a `VolumeSnapshotLocation` is not specified when creating a backup. If you only have one `VolumeSnapshotLocation` for a provider, Velero will automatically use that location as the default. ## Examples @@ -205,7 +205,7 @@ kubectl create secret generic -n velero credentials --from-file=bsl= \ --credential== ``` +### Create a volume snapshot location that uses unique credentials + +It is possible to create additional `VolumeSnapshotLocations` that use their own credentials. +This may be necessary if you already have default credentials which don't match the account used by the cloud volumes being backed up. + +If you create additional `VolumeSnapshotLocations` without specifying the credentials to use, Velero will use the credentials provided at install time and stored in the `cloud-credentials` secret. + +#### Prerequisites +- This feature requires support from the [volume snapshotter plugin][5] you wish to use. + All plugins maintained by the Velero team support this feature. + If you are using a plugin from another provider, please check their documentation to determine if this is supported. +- The [plugin for the volume snapshotter provider][5] you wish to use must be [installed][6]. +- You must create a file with the object storage credentials. Follow the instructions provided by your object storage provider plugin to create this file. + +Once you have installed the necessary plugin and created the credentials file, create a [Kubernetes Secret][7] in the Velero namespace that contains these credentials: + +```shell +kubectl create secret generic -n velero credentials --from-file=vsl= +``` + +This will create a secret named `credentials` with a single key (`vsl`) which contains the contents of your credentials file. +Next, create a `VolumeSnapshotLocation` that uses this Secret by passing the Secret name and key in the `--credential` flag. +When interacting with this `VolumeSnapshotLocation` in the future, Velero will fetch the data from the key within the Secret you provide. + +For example, a new `VolumeSnapshotLocation` with a Secret would be configured as follows: + +```bash +velero snapshot-location create \ + --provider \ + --config region= \ + --credential== +``` + +To use this new `VolumeSnapshotLocation` when performing a backup, use the flag `--volume-snapshot-locations [, \ + --credential== +``` + ## Additional Use Cases 1. If you're using Azure's AKS, you may want to store your volume snapshots outside of the "infrastructure" resource group that is automatically created when you create your AKS cluster. This is possible using a `VolumeSnapshotLocation`, by specifying a `resourceGroup` under the `config` section of the snapshot location. See the [Azure volume snapshot location documentation][3] for details. diff --git a/site/content/docs/main/manual-testing.md b/site/content/docs/main/manual-testing.md index d7d8968831..136b490bb2 100644 --- a/site/content/docs/main/manual-testing.md +++ b/site/content/docs/main/manual-testing.md @@ -31,7 +31,7 @@ The "Backup and Restore" test cases below describe general backup and restore fu #### Backup and Restore - Verify that a backup and restore using Volume Snapshots can be performed -- Verify that a backup and restore using Restic can be performed +- Verify that a backup and restore using File System Backup can be performed - Verify that a backup of a cluster workload can be restored in a new cluster - Verify that an installation using the latest version can be used to restore from backups created with the last 3 versions. - e.g. Install Velero 1.6 and use it to restore backups from Velero v1.3, v1.4, v1.5. @@ -61,9 +61,9 @@ The following are test cases that are not currently performed as part of a Veler - Verify that backups that exceed their TTL are deleted - Verify that existing backups in object storage are synced to Velero -### Restic repository test cases +### Backup repository test cases -- Verify that restic repository maintenance is performed as the specified interval +- Verify that backup repository maintenance is performed as the specified interval ### Backup Hooks @@ -76,7 +76,7 @@ The following are test cases that are not currently performed as part of a Veler - Verify that an InitContainer restore hook provided via pod annotation is performed during restore - Verify that an InitContainer restore hook provided via Restore spec is performed during restore -- Verify that an InitContainer restore hook provided via Restore spec is performed during restore that includes restoring restic volumes +- Verify that an InitContainer restore hook provided via Restore spec is performed during restore that includes restoring File System Backup volumes - Verify that an Exec restore hook provided via pod annotation is performed during restore - Verify that an Exec restore hook provided via Restore spec is performed during restore diff --git a/site/content/docs/main/migration-case.md b/site/content/docs/main/migration-case.md index afb3a3e2a2..daac425f45 100644 --- a/site/content/docs/main/migration-case.md +++ b/site/content/docs/main/migration-case.md @@ -11,10 +11,10 @@ This page outlines a cluster migration scenario and some common configurations y Before migrating you should consider the following, -* Velero does not natively support the migration of persistent volumes snapshots across cloud providers. If you would like to migrate volume data between cloud platforms, enable [restic](restic.md), which will backup volume contents at the filesystem level. +* Velero does not natively support the migration of persistent volumes snapshots across cloud providers. If you would like to migrate volume data between cloud platforms, enable [File System Backup](file-system-backup.md), which will backup volume contents at the filesystem level. * Velero doesn't support restoring into a cluster with a lower Kubernetes version than where the backup was taken. * Migrating workloads across clusters that are not running the same version of Kubernetes might be possible, but some factors need to be considered before migration, including the compatibility of API groups between clusters for each custom resource. If a Kubernetes version upgrade breaks the compatibility of core/native API groups, migrating with Velero will not be possible without first updating the impacted custom resources. For more information about API group versions, please see [EnableAPIGroupVersions](enable-api-group-versions-feature.md). -* The Velero plugin for AWS and Azure does not support migrating data between regions. If you need to do this, you must use [restic](restic.md). +* The Velero plugin for AWS and Azure does not support migrating data between regions. If you need to do this, you must use [File System Backup](file-system-backup.md). ## Migration Scenario diff --git a/site/content/docs/main/on-premises.md b/site/content/docs/main/on-premises.md index 30ddf98b31..88e5479737 100644 --- a/site/content/docs/main/on-premises.md +++ b/site/content/docs/main/on-premises.md @@ -18,7 +18,7 @@ If you need to back up persistent volume data, you must select a volume backup s For example, if you use [Portworx][4] for persistent storage, you can install their Velero plugin to get native Portworx snapshots as part of your Velero backups. -If there is no native snapshot plugin available for your storage platform, you can use Velero's [restic integration][1], which provides a platform-agnostic file-level backup solution for volume data. +If there is no native snapshot plugin available for your storage platform, you can use Velero's [File System Backup][1], which provides a platform-agnostic file-level backup solution for volume data. ### Air-gapped deployments @@ -54,17 +54,17 @@ docker tag velero/velero-plugin-for-aws:$PLUGIN_VERSION $PRIVATE_REG/velero-plug docker push $PRIVATE_REG/velero-plugin-for-aws:$PLUGIN_VERSION ``` -#### Preparing the restic helper image (optional) +#### Preparing the restore helper image (optional) -If you are using restic, you will also need to upload the restic helper image. +If you are using File System Backup, you will also need to upload the restore helper image. ```bash PRIVATE_REG= VELERO_VERSION= -docker pull velero/velero-restic-restore-helper:$VELERO_VERSION -docker tag velero/velero-restic-restore-helper:$VELERO_VERSION $PRIVATE_REG/velero-restic-restore-helper:$VELERO_VERSION -docker push $PRIVATE_REG/velero-restic-restore-helper:$VELERO_VERSION +docker pull velero/velero-restore-helper:$VELERO_VERSION +docker tag velero/velero-restore-helper:$VELERO_VERSION $PRIVATE_REG/velero-restore-helper:$VELERO_VERSION +docker push $PRIVATE_REG/velero-restore-helper:$VELERO_VERSION ``` #### Pulling specific architecture images (optional) @@ -88,7 +88,7 @@ velero install \ [0]: supported-providers.md -[1]: restic.md +[1]: file-system-backup.md [2]: https://min.io [3]: contributions/minio.md [4]: https://portworx.com diff --git a/site/content/docs/main/restic.md b/site/content/docs/main/restic.md deleted file mode 100644 index 492738af15..0000000000 --- a/site/content/docs/main/restic.md +++ /dev/null @@ -1,549 +0,0 @@ ---- -title: "Restic Integration" -layout: docs ---- - -Velero supports backing up and restoring Kubernetes volumes using a free open-source backup tool called [restic][1]. This support is considered beta quality. Please see the list of [limitations](#limitations) to understand if it fits your use case. - -Velero allows you to take snapshots of persistent volumes as part of your backups if you’re using one of -the supported cloud providers’ block storage offerings (Amazon EBS Volumes, Azure Managed Disks, Google Persistent Disks). -It also provides a plugin model that enables anyone to implement additional object and block storage backends, outside the -main Velero repository. - -Velero's Restic integration was added to give you an out-of-the-box solution for backing up and restoring almost any type of Kubernetes volume. This integration is an addition to Velero's capabilities, not a replacement for existing functionality. If you're running on AWS, and taking EBS snapshots as part of your regular Velero backups, there's no need to switch to using Restic. However, if you need a volume snapshot plugin for your storage platform, or if you're using EFS, AzureFile, NFS, emptyDir, -local, or any other volume type that doesn't have a native snapshot concept, Restic might be for you. - -Restic is not tied to a specific storage platform, which means that this integration also paves the way for future work to enable -cross-volume-type data migrations. - -**NOTE:** hostPath volumes are not supported, but the [local volume type][4] is supported. - -## Setup Restic - -### Prerequisites - -- Understand how Velero performs [backups with the Restic integration](#how-backup-and-restore-work-with-restic). -- [Download][3] the latest Velero release. -- Kubernetes v1.16.0 and later. Velero's Restic integration requires the Kubernetes [MountPropagation feature][6]. - -### Install Restic - -To install Restic, use the `--use-restic` flag in the `velero install` command. See the [install overview][2] for more details on other flags for the install command. - -``` -velero install --use-restic -``` - -When using Restic on a storage provider that doesn't have Velero support for snapshots, the `--use-volume-snapshots=false` flag prevents an unused `VolumeSnapshotLocation` from being created on installation. - -Velero handles the creation of the restic repo prefix for Amazon, Azure, and GCP plugins, if you are using a different [provider plugin](supported-providers.md), then you will need to make sure the `resticRepoPrefix` is set in the [BackupStorageLocation `config`](api-types/backupstoragelocation.md). The value for `resticRepoPrefix` should be the cloud storage URL where all namespace restic repos will be created. Velero creates one restic repo per namespace. For example, if backing up 2 namespaces, namespace1 and namespace2, using restic on AWS, the `resticRepoPrefix` would be something like `s3:s3-us-west-2.amazonaws.com/bucket/restic` and the full restic repo path for namespace1 would be `s3:s3-us-west-2.amazonaws.com/bucket/restic/ns1` and for namespace2 would be `s3:s3-us-west-2.amazonaws.com/bucket/restic/ns2`. - -There may be additional installation steps depending on the cloud provider plugin you are using. You should refer to the [plugin specific documentation](supported-providers.md) for the must up to date information. - -### Configure Restic DaemonSet spec - -After installation, some PaaS/CaaS platforms based on Kubernetes also require modifications the Restic DaemonSet spec. The steps in this section are only needed if you are installing on RancherOS, OpenShift, VMware Tanzu Kubernetes Grid Integrated Edition (formerly VMware Enterprise PKS), or Microsoft Azure. - - -**RancherOS** - - -Update the host path for volumes in the Restic DaemonSet in the Velero namespace from `/var/lib/kubelet/pods` to `/opt/rke/var/lib/kubelet/pods`. - -```yaml -hostPath: - path: /var/lib/kubelet/pods -``` - -to - -```yaml -hostPath: - path: /opt/rke/var/lib/kubelet/pods -``` - - -**OpenShift** - - -To mount the correct hostpath to pods volumes, run the Restic pod in `privileged` mode. - -1. Add the `velero` ServiceAccount to the `privileged` SCC: - - ``` - $ oc adm policy add-scc-to-user privileged -z velero -n velero - ``` - -2. For OpenShift version >= `4.1`, modify the DaemonSet yaml to request a privileged mode: - - ```diff - @@ -67,3 +67,5 @@ spec: - value: /credentials/cloud - - name: VELERO_SCRATCH_DIR - value: /scratch - + securityContext: - + privileged: true - ``` - - or - - ```shell - oc patch ds/restic \ - --namespace velero \ - --type json \ - -p '[{"op":"add","path":"/spec/template/spec/containers/0/securityContext","value": { "privileged": true}}]' - ``` - -3. For OpenShift version < `4.1`, modify the DaemonSet yaml to request a privileged mode and mount the correct hostpath to pods volumes. - - ```diff - @@ -35,7 +35,7 @@ spec: - secretName: cloud-credentials - - name: host-pods - hostPath: - - path: /var/lib/kubelet/pods - + path: /var/lib/origin/openshift.local.volumes/pods - - name: scratch - emptyDir: {} - containers: - @@ -67,3 +67,5 @@ spec: - value: /credentials/cloud - - name: VELERO_SCRATCH_DIR - value: /scratch - + securityContext: - + privileged: true - ``` - - or - - ```shell - oc patch ds/restic \ - --namespace velero \ - --type json \ - -p '[{"op":"add","path":"/spec/template/spec/containers/0/securityContext","value": { "privileged": true}}]' - - oc patch ds/restic \ - --namespace velero \ - --type json \ - -p '[{"op":"replace","path":"/spec/template/spec/volumes/0/hostPath","value": { "path": "/var/lib/origin/openshift.local.volumes/pods"}}]' - ``` - - -If Restic is not running in a privileged mode, it will not be able to access pods volumes within the mounted hostpath directory because of the default enforced SELinux mode configured in the host system level. You can [create a custom SCC](https://docs.openshift.com/container-platform/3.11/admin_guide/manage_scc.html) to relax the security in your cluster so that Restic pods are allowed to use the hostPath volume plug-in without granting them access to the `privileged` SCC. - -By default a userland openshift namespace will not schedule pods on all nodes in the cluster. - -To schedule on all nodes the namespace needs an annotation: - -``` -oc annotate namespace openshift.io/node-selector="" -``` - -This should be done before velero installation. - -Or the ds needs to be deleted and recreated: - -``` -oc get ds restic -o yaml -n > ds.yaml -oc annotate namespace openshift.io/node-selector="" -oc create -n -f ds.yaml -``` - -**VMware Tanzu Kubernetes Grid Integrated Edition (formerly VMware Enterprise PKS)** - -You need to enable the `Allow Privileged` option in your plan configuration so that Restic is able to mount the hostpath. - -The hostPath should be changed from `/var/lib/kubelet/pods` to `/var/vcap/data/kubelet/pods` - -```yaml -hostPath: - path: /var/vcap/data/kubelet/pods -``` - - -**Microsoft Azure** - -If you are using [Azure Files][8], you need to add `nouser_xattr` to your storage class's `mountOptions`. See [this restic issue][9] for more details. - -You can use the following command to patch the storage class: - -```bash -kubectl patch storageclass/ \ - --type json \ - --patch '[{"op":"add","path":"/mountOptions/-","value":"nouser_xattr"}]' -``` - -## To back up - -Velero supports two approaches of discovering pod volumes that need to be backed up using Restic: - -- Opt-in approach: Where every pod containing a volume to be backed up using Restic must be annotated with the volume's name. -- Opt-out approach: Where all pod volumes are backed up using Restic, with the ability to opt-out any volumes that should not be backed up. - -The following sections provide more details on the two approaches. - -### Using the opt-out approach - -In this approach, Velero will back up all pod volumes using Restic with the exception of: - -- Volumes mounting the default service account token, Kubernetes secrets, and config maps -- Hostpath volumes - -It is possible to exclude volumes from being backed up using the `backup.velero.io/backup-volumes-excludes` annotation on the pod. - -Instructions to back up using this approach are as follows: - -1. Run the following command on each pod that contains volumes that should **not** be backed up using Restic - - ```bash - kubectl -n YOUR_POD_NAMESPACE annotate pod/YOUR_POD_NAME backup.velero.io/backup-volumes-excludes=YOUR_VOLUME_NAME_1,YOUR_VOLUME_NAME_2,... - ``` - where the volume names are the names of the volumes in the pod spec. - - For example, in the following pod: - - ```yaml - apiVersion: v1 - kind: Pod - metadata: - name: app1 - namespace: sample - spec: - containers: - - image: k8s.gcr.io/test-webserver - name: test-webserver - volumeMounts: - - name: pvc1-vm - mountPath: /volume-1 - - name: pvc2-vm - mountPath: /volume-2 - volumes: - - name: pvc1-vm - persistentVolumeClaim: - claimName: pvc1 - - name: pvc2-vm - claimName: pvc2 - ``` - to exclude Restic backup of volume `pvc1-vm`, you would run: - - ```bash - kubectl -n sample annotate pod/app1 backup.velero.io/backup-volumes-excludes=pvc1-vm - ``` - -2. Take a Velero backup: - - ```bash - velero backup create BACKUP_NAME --default-volumes-to-restic OTHER_OPTIONS - ``` - - The above steps uses the opt-out approach on a per backup basis. - - Alternatively, this behavior may be enabled on all velero backups running the `velero install` command with the `--default-volumes-to-restic` flag. Refer [install overview][11] for details. - -3. When the backup completes, view information about the backups: - - ```bash - velero backup describe YOUR_BACKUP_NAME - ``` - ```bash - kubectl -n velero get podvolumebackups -l velero.io/backup-name=YOUR_BACKUP_NAME -o yaml - ``` - -### Using opt-in pod volume backup - -Velero, by default, uses this approach to discover pod volumes that need to be backed up using Restic. Every pod containing a volume to be backed up using Restic must be annotated with the volume's name using the `backup.velero.io/backup-volumes` annotation. - -Instructions to back up using this approach are as follows: - -1. Run the following for each pod that contains a volume to back up: - - ```bash - kubectl -n YOUR_POD_NAMESPACE annotate pod/YOUR_POD_NAME backup.velero.io/backup-volumes=YOUR_VOLUME_NAME_1,YOUR_VOLUME_NAME_2,... - ``` - - where the volume names are the names of the volumes in the pod spec. - - For example, for the following pod: - - ```yaml - apiVersion: v1 - kind: Pod - metadata: - name: sample - namespace: foo - spec: - containers: - - image: k8s.gcr.io/test-webserver - name: test-webserver - volumeMounts: - - name: pvc-volume - mountPath: /volume-1 - - name: emptydir-volume - mountPath: /volume-2 - volumes: - - name: pvc-volume - persistentVolumeClaim: - claimName: test-volume-claim - - name: emptydir-volume - emptyDir: {} - ``` - - You'd run: - - ```bash - kubectl -n foo annotate pod/sample backup.velero.io/backup-volumes=pvc-volume,emptydir-volume - ``` - - This annotation can also be provided in a pod template spec if you use a controller to manage your pods. - -1. Take a Velero backup: - - ```bash - velero backup create NAME OPTIONS... - ``` - -1. When the backup completes, view information about the backups: - - ```bash - velero backup describe YOUR_BACKUP_NAME - ``` - ```bash - kubectl -n velero get podvolumebackups -l velero.io/backup-name=YOUR_BACKUP_NAME -o yaml - ``` - -## To restore - -Regardless of how volumes are discovered for backup using Restic, the process of restoring remains the same. - -1. Restore from your Velero backup: - - ```bash - velero restore create --from-backup BACKUP_NAME OPTIONS... - ``` - -1. When the restore completes, view information about your pod volume restores: - - ```bash - velero restore describe YOUR_RESTORE_NAME - ``` - ```bash - kubectl -n velero get podvolumerestores -l velero.io/restore-name=YOUR_RESTORE_NAME -o yaml - ``` - -## Limitations - -- `hostPath` volumes are not supported. [Local persistent volumes][4] are supported. -- Those of you familiar with [restic][1] may know that it encrypts all of its data. Velero uses a static, -common encryption key for all Restic repositories it creates. **This means that anyone who has access to your -bucket can decrypt your Restic backup data**. Make sure that you limit access to the Restic bucket -appropriately. -- An incremental backup chain will be maintained across pod reschedules for PVCs. However, for pod volumes that are *not* -PVCs, such as `emptyDir` volumes, when a pod is deleted/recreated (for example, by a ReplicaSet/Deployment), the next backup of those -volumes will be full rather than incremental, because the pod volume's lifecycle is assumed to be defined by its pod. -- Restic scans each file in a single thread. This means that large files (such as ones storing a database) will take a long time to scan for data deduplication, even if the actual -difference is small. -- If you plan to use Velero's Restic integration to backup 100GB of data or more, you may need to [customize the resource limits](/docs/main/customize-installation/#customize-resource-requests-and-limits) to make sure backups complete successfully. -- Velero's Restic integration backs up data from volumes by accessing the node's filesystem, on which the pod is running. For this reason, Velero's Restic integration can only backup volumes that are mounted by a pod and not directly from the PVC. For orphan PVC/PV pairs (without running pods), some Velero users overcame this limitation running a staging pod (i.e. a busybox or alpine container with an infinite sleep) to mount these PVC/PV pairs prior taking a Velero backup. - -## Customize Restore Helper Container - -Velero uses a helper init container when performing a Restic restore. By default, the image for this container is `velero/velero-restic-restore-helper:`, -where `VERSION` matches the version/tag of the main Velero image. You can customize the image that is used for this helper by creating a ConfigMap in the Velero namespace with -the alternate image. - -In addition, you can customize the resource requirements for the init container, should you need. - -The ConfigMap must look like the following: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - # any name can be used; Velero uses the labels (below) - # to identify it rather than the name - name: restic-restore-action-config - # must be in the velero namespace - namespace: velero - # the below labels should be used verbatim in your - # ConfigMap. - labels: - # this value-less label identifies the ConfigMap as - # config for a plugin (i.e. the built-in restic restore - # item action plugin) - velero.io/plugin-config: "" - # this label identifies the name and kind of plugin - # that this ConfigMap is for. - velero.io/restic: RestoreItemAction -data: - # The value for "image" can either include a tag or not; - # if the tag is *not* included, the tag from the main Velero - # image will automatically be used. - image: myregistry.io/my-custom-helper-image[:OPTIONAL_TAG] - - # "cpuRequest" sets the request.cpu value on the restic init containers during restore. - # If not set, it will default to "100m". A value of "0" is treated as unbounded. - cpuRequest: 200m - - # "memRequest" sets the request.memory value on the restic init containers during restore. - # If not set, it will default to "128Mi". A value of "0" is treated as unbounded. - memRequest: 128Mi - - # "cpuLimit" sets the request.cpu value on the restic init containers during restore. - # If not set, it will default to "100m". A value of "0" is treated as unbounded. - cpuLimit: 200m - - # "memLimit" sets the request.memory value on the restic init containers during restore. - # If not set, it will default to "128Mi". A value of "0" is treated as unbounded. - memLimit: 128Mi - - # "secCtxRunAsUser" sets the securityContext.runAsUser value on the restic init containers during restore. - secCtxRunAsUser: 1001 - - # "secCtxRunAsGroup" sets the securityContext.runAsGroup value on the restic init containers during restore. - secCtxRunAsGroup: 999 - - # "secCtxAllowPrivilegeEscalation" sets the securityContext.allowPrivilegeEscalation value on the restic init containers during restore. - secCtxAllowPrivilegeEscalation: false - - # "secCtx" sets the securityContext object value on the restic init containers during restore. - # This key override `secCtxRunAsUser`, `secCtxRunAsGroup`, `secCtxAllowPrivilegeEscalation` if `secCtx.runAsUser`, `secCtx.runAsGroup` or `secCtx.allowPrivilegeEscalation` are set. - secCtx: | - capabilities: - drop: - - ALL - add: [] - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - runAsUser: 1001 - runAsGroup: 999 - -``` - -## Troubleshooting - -Run the following checks: - -Are your Velero server and daemonset pods running? - -```bash -kubectl get pods -n velero -``` - -Does your Restic repository exist, and is it ready? - -```bash -velero restic repo get - -velero restic repo get REPO_NAME -o yaml -``` - -Are there any errors in your Velero backup/restore? - -```bash -velero backup describe BACKUP_NAME -velero backup logs BACKUP_NAME - -velero restore describe RESTORE_NAME -velero restore logs RESTORE_NAME -``` - -What is the status of your pod volume backups/restores? - -```bash -kubectl -n velero get podvolumebackups -l velero.io/backup-name=BACKUP_NAME -o yaml - -kubectl -n velero get podvolumerestores -l velero.io/restore-name=RESTORE_NAME -o yaml -``` - -Is there any useful information in the Velero server or daemon pod logs? - -```bash -kubectl -n velero logs deploy/velero -kubectl -n velero logs DAEMON_POD_NAME -``` - -**NOTE**: You can increase the verbosity of the pod logs by adding `--log-level=debug` as an argument -to the container command in the deployment/daemonset pod template spec. - -## How backup and restore work with Restic - -Velero has three custom resource definitions and associated controllers: - -- `ResticRepository` - represents/manages the lifecycle of Velero's [restic repositories][5]. Velero creates -a Restic repository per namespace when the first Restic backup for a namespace is requested. The controller -for this custom resource executes Restic repository lifecycle commands -- `restic init`, `restic check`, -and `restic prune`. - - You can see information about your Velero's Restic repositories by running `velero restic repo get`. - -- `PodVolumeBackup` - represents a Restic backup of a volume in a pod. The main Velero backup process creates -one or more of these when it finds an annotated pod. Each node in the cluster runs a controller for this -resource (in a daemonset) that handles the `PodVolumeBackups` for pods on that node. The controller executes -`restic backup` commands to backup pod volume data. - -- `PodVolumeRestore` - represents a Restic restore of a pod volume. The main Velero restore process creates one -or more of these when it encounters a pod that has associated Restic backups. Each node in the cluster runs a -controller for this resource (in the same daemonset as above) that handles the `PodVolumeRestores` for pods -on that node. The controller executes `restic restore` commands to restore pod volume data. - -### Backup - -1. Based on configuration, the main Velero backup process uses the opt-in or opt-out approach to check each pod that it's backing up for the volumes to be backed up using Restic. -1. When found, Velero first ensures a Restic repository exists for the pod's namespace, by: - - checking if a `ResticRepository` custom resource already exists - - if not, creating a new one, and waiting for the `ResticRepository` controller to init/check it -1. Velero then creates a `PodVolumeBackup` custom resource per volume listed in the pod annotation -1. The main Velero process now waits for the `PodVolumeBackup` resources to complete or fail -1. Meanwhile, each `PodVolumeBackup` is handled by the controller on the appropriate node, which: - - has a hostPath volume mount of `/var/lib/kubelet/pods` to access the pod volume data - - finds the pod volume's subdirectory within the above volume - - runs `restic backup` - - updates the status of the custom resource to `Completed` or `Failed` -1. As each `PodVolumeBackup` finishes, the main Velero process adds it to the Velero backup in a file named `-podvolumebackups.json.gz`. This file gets uploaded to object storage alongside the backup tarball. It will be used for restores, as seen in the next section. - -### Restore - -1. The main Velero restore process checks each existing `PodVolumeBackup` custom resource in the cluster to backup from. -1. For each `PodVolumeBackup` found, Velero first ensures a Restic repository exists for the pod's namespace, by: - - checking if a `ResticRepository` custom resource already exists - - if not, creating a new one, and waiting for the `ResticRepository` controller to init/check it (note that - in this case, the actual repository should already exist in object storage, so the Velero controller will simply - check it for integrity) -1. Velero adds an init container to the pod, whose job is to wait for all Restic restores for the pod to complete (more -on this shortly) -1. Velero creates the pod, with the added init container, by submitting it to the Kubernetes API. Then, the Kubernetes scheduler schedules this pod to a worker node, and the pod must be in a running state. If the pod fails to start for some reason (i.e. lack of cluster resources), the Restic restore will not be done. -1. Velero creates a `PodVolumeRestore` custom resource for each volume to be restored in the pod -1. The main Velero process now waits for each `PodVolumeRestore` resource to complete or fail -1. Meanwhile, each `PodVolumeRestore` is handled by the controller on the appropriate node, which: - - has a hostPath volume mount of `/var/lib/kubelet/pods` to access the pod volume data - - waits for the pod to be running the init container - - finds the pod volume's subdirectory within the above volume - - runs `restic restore` - - on success, writes a file into the pod volume, in a `.velero` subdirectory, whose name is the UID of the Velero restore - that this pod volume restore is for - - updates the status of the custom resource to `Completed` or `Failed` -1. The init container that was added to the pod is running a process that waits until it finds a file -within each restored volume, under `.velero`, whose name is the UID of the Velero restore being run -1. Once all such files are found, the init container's process terminates successfully and the pod moves -on to running other init containers/the main containers. - -Velero won't restore a resource if a that resource is scaled to 0 and already exists in the cluster. If Velero restored the requested pods in this scenario, the Kubernetes reconciliation loops that manage resources would delete the running pods because its scaled to be 0. Velero will be able to restore once the resources is scaled up, and the pods are created and remain running. - -## 3rd party controllers - -### Monitor backup annotation - -Velero does not provide a mechanism to detect persistent volume claims that are missing the Restic backup annotation. - -To solve this, a controller was written by Thomann Bits&Beats: [velero-pvc-watcher][7] - -[1]: https://github.com/restic/restic -[2]: customize-installation.md#enable-restic-integration -[3]: https://github.com/vmware-tanzu/velero/releases/ -[4]: https://kubernetes.io/docs/concepts/storage/volumes/#local -[5]: http://restic.readthedocs.io/en/latest/100_references.html#terminology -[6]: https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation -[7]: https://github.com/bitsbeats/velero-pvc-watcher -[8]: https://docs.microsoft.com/en-us/azure/aks/azure-files-dynamic-pv -[9]: https://github.com/restic/restic/issues/1800 -[11]: customize-installation.md#default-pod-volume-backup-to-restic diff --git a/site/content/docs/main/restore-hooks.md b/site/content/docs/main/restore-hooks.md index 0ec36c5d7a..0e2e6b5326 100644 --- a/site/content/docs/main/restore-hooks.md +++ b/site/content/docs/main/restore-hooks.md @@ -12,7 +12,7 @@ Velero supports Restore Hooks, custom actions that can be executed during or aft Use an `InitContainer` hook to add init containers into a pod before it's restored. You can use these init containers to run any setup needed for the pod to resume running from its backed-up state. The InitContainer added by the restore hook will be the first init container in the `podSpec` of the restored pod. -In the case where the pod had volumes backed up using restic, then, the restore hook InitContainer will be added after the `restic-wait` InitContainer. +In the case where the pod had volumes backed up using File System Backup, then, the restore hook InitContainer will be added after the `restore-wait` InitContainer. NOTE: This ordering can be altered by any mutating webhooks that may be installed in the cluster. diff --git a/site/content/docs/main/restore-reference.md b/site/content/docs/main/restore-reference.md index a91ef960db..31a3e79602 100644 --- a/site/content/docs/main/restore-reference.md +++ b/site/content/docs/main/restore-reference.md @@ -29,7 +29,7 @@ The following is an overview of Velero's restore process that starts after you r 1. The `RestoreController` notices the new Restore object and performs validation. -1. The `RestoreController` fetches basic information about the backup being restored, like the [BackupStorageLocation](locations.md) (BSL). It also fetches a tarball of the cluster resources in the backup, any volumes that will be restored using Restic, and any volume snapshots to be restored. +1. The `RestoreController` fetches basic information about the backup being restored, like the [BackupStorageLocation](locations.md) (BSL). It also fetches a tarball of the cluster resources in the backup, any volumes that will be restored using File System Backup, and any volume snapshots to be restored. 1. The `RestoreController` then extracts the tarball of backup cluster resources to the /tmp folder and performs some pre-processing on the resources, including: @@ -56,14 +56,14 @@ The following is an overview of Velero's restore process that starts after you r * The `RestoreController` adds a `velero.io/backup-name` label with the backup name and a `velero.io/restore-name` with the restore name to the resource. This can help you easily identify restored resources and which backup they were restored from. -1. The `RestoreController` creates the resource object on the target cluster. If the resource is a PV then the `RestoreController` will restore the PV data from the [durable snapshot](#durable-snapshot-pv-restore), [Restic](#restic-pv-restore), or [CSI snapshot](#csi-pv-restore) depending on how the PV was backed up. +1. The `RestoreController` creates the resource object on the target cluster. If the resource is a PV then the `RestoreController` will restore the PV data from the [durable snapshot](#durable-snapshot-pv-restore), [File System Backup](#file-system-backup-pv-restore), or [CSI snapshot](#csi-pv-restore) depending on how the PV was backed up. If the resource already exists in the target cluster, which is determined by the Kubernetes API during resource creation, the `RestoreController` will skip the resource. The only [exception](#restore-existing-resource-policy) are Service Accounts, which Velero will attempt to merge differences between the backed up ServiceAccount into the ServiceAccount on the target cluster. You can [change the default existing resource restore policy](#restore-existing-resource-policy) to update resources instead of skipping them using the `--existing-resource-policy`. 1. Once the resource is created on the target cluster, Velero may take some additional steps or wait for additional processes to complete before moving onto the next resource to restore. * If the resource is a Pod, the `RestoreController` will execute any [Restore Hooks](restore-hooks.md) and wait for the hook to finish. - * If the resource is a PV restored by Restic, the `RestoreController` waits for Restic’s restore to complete. The `RestoreController` sets a timeout for any resources restored with Restic during a restore. The default timeout is 4 hours, but you can configure this be setting using `--restic-timeout` restore option. + * If the resource is a PV restored by File System Backup, the `RestoreController` waits for File System Backup’s restore to complete. The `RestoreController` sets a timeout for any resources restored with File System Backup during a restore. The default timeout is 4 hours, but you can configure this be setting using `--fs-backup-timeout` restore option. * If the resource is a Custom Resource Definition, the `RestoreController` waits for its availability in the cluster. The timeout is 1 minute. If any failures happen finishing these steps, the `RestoreController` will log an error in the restore result and will continue restoring. @@ -106,16 +106,16 @@ clusterresourcesets.addons.cluster.x-k8s.io Velero has three approaches when restoring a PV, depending on how the backup was taken. 1. When restoring a snapshot, Velero statically creates the PV and then binds it to a restored PVC. Velero's PV rename and remap process is used only in this case because this is the only case where Velero creates the PV resource directly. -1. When restoring with Restic, Velero uses Kubernetes’ [dynamic provision process](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/) to provision the PV after creating the PVC. In this case, the PV object is not actually created by Velero. +1. When restoring with File System Backup, Velero uses Kubernetes’ [dynamic provision process](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/) to provision the PV after creating the PVC. In this case, the PV object is not actually created by Velero. 1. When restoring with the [CSI plugin](csi.md), the PV is created from a CSI snapshot by the CSI driver. Velero doesn’t create the PV directly. Instead Velero creates a PVC with its DataSource referring to the CSI VolumeSnapshot object. ### Snapshot PV Restore PV data backed up by durable snapshots is restored by VolumeSnapshot plugins. Velero calls the plugins’ interface to create a volume from a snapshot. The plugin returns the volume’s `volumeID`. This ID is created by storage vendors and will be updated in the PV object created by Velero, so that the PV object is connected to the volume restored from a snapshot. -### Restic PV Restore +### File System Backup PV Restore -For more information on Restic restores, see the [Restic integration](restic.md#restore) page. +For more information on File System Backup restores, see the [File System Backup](file-system-backup.md#restore) page. ### CSI PV Restore diff --git a/site/content/docs/main/self-signed-certificates.md b/site/content/docs/main/self-signed-certificates.md index 2aab5c1b97..9724606de4 100644 --- a/site/content/docs/main/self-signed-certificates.md +++ b/site/content/docs/main/self-signed-certificates.md @@ -52,7 +52,7 @@ You will need to change this setting on the server to make it work. **Note:** The `--insecure-skip-tls-verify` flag is insecure and susceptible to man-in-the-middle attacks and meant to help your testing and developing scenarios in an on-premise environment. Using this flag in production is not recommended. -Velero provides a way for you to skip TLS verification on the object store when using the [AWS provider plugin](https://github.com/vmware-tanzu/velero-plugin-for-aws) or [Restic](restic.md) by passing the `--insecure-skip-tls-verify` flag with the following Velero commands, +Velero provides a way for you to skip TLS verification on the object store when using the [AWS provider plugin](https://github.com/vmware-tanzu/velero-plugin-for-aws) or [File System Backup](file-system-backup.md) by passing the `--insecure-skip-tls-verify` flag with the following Velero commands, * velero backup describe * velero backup download @@ -60,6 +60,6 @@ Velero provides a way for you to skip TLS verification on the object store when * velero restore describe * velero restore log -If true, the object store's TLS certificate will not be checked for validity before Velero connects to the object store or Restic repo. You can permanently skip TLS verification for an object store by setting `Spec.Config.InsecureSkipTLSVerify` to true in the [BackupStorageLocation](api-types/backupstoragelocation.md) CRD. +If true, the object store's TLS certificate will not be checked for validity before Velero or backup repository connects to the object storage. You can permanently skip TLS verification for an object store by setting `Spec.Config.InsecureSkipTLSVerify` to true in the [BackupStorageLocation](api-types/backupstoragelocation.md) CRD. -Note that Velero's Restic integration uses Restic commands to do data transfer between object store and Kubernetes cluster disks. This means that when you specify `--insecure-skip-tls-verify` in Velero operations that involve interacting with Restic, Velero will add the Restic global command parameter `--insecure-tls` to Restic commands. +Note that Velero's File System Backup uses Restic or Kopia to do data transfer between object store and Kubernetes cluster disks. This means that when you specify `--insecure-skip-tls-verify` in Velero operations that involve File System Backup, Velero will convey this information to Restic or Kopia. For example, for Restic, Velero will add the Restic global command parameter `--insecure-tls` to Restic commands. diff --git a/site/content/docs/main/supported-providers.md b/site/content/docs/main/supported-providers.md index fe8c03d3a1..0fc4a48aa0 100644 --- a/site/content/docs/main/supported-providers.md +++ b/site/content/docs/main/supported-providers.md @@ -54,7 +54,7 @@ _Some storage providers, like Quobyte, may need a different [signature algorithm ## Non-supported volume snapshots -In the case you want to take volume snapshots but didn't find a plugin for your provider, Velero has support for snapshotting using restic. Please see the [restic integration][30] documentation. +In the case you want to take volume snapshots but didn't find a plugin for your provider, Velero has support for snapshotting using File System Backup. Please see the [File System Backup][30] documentation. [0]: https://github.com/aws/aws-sdk-go/aws [1]: contributions/ibm-config.md @@ -65,6 +65,6 @@ In the case you want to take volume snapshots but didn't find a plugin for your [6]: https://github.com/vmware-tanzu/velero-plugin-for-aws/blob/main/backupstoragelocation.md [7]: contributions/tencent-config.md [25]: https://github.com/hpe-storage/velero-plugin -[30]: restic.md +[30]: file-system-backup.md [36]: https://github.com/vmware-tanzu/velero-plugin-for-gcp#setup [38]: https://www.cloudian.com/ diff --git a/site/content/docs/main/tilt.md b/site/content/docs/main/tilt.md index 3f2a8d90eb..19ec6003bc 100644 --- a/site/content/docs/main/tilt.md +++ b/site/content/docs/main/tilt.md @@ -7,7 +7,7 @@ layout: docs This document describes how to use [Tilt](https://tilt.dev) with any cluster for a simplified workflow that offers easy deployments and rapid iterative builds. -This setup allows for continuing deployment of the Velero server and, if specified, any provider plugin or the restic daemonset. +This setup allows for continuing deployment of the Velero server and, if specified, any provider plugin or the node-agent daemonset. It does this work by: 1. Deploying the necessary Kubernetes resources, such as the Velero CRDs and Velero deployment @@ -60,7 +60,7 @@ Here is an example: "allowed_contexts": [ "development" ], - "enable_restic": false, + "use_node_agent": false, "create_backup_locations": true, "setup-minio": true, "enable_debug": false, @@ -82,8 +82,8 @@ Tilt: an existing image and version might be specified in the Velero deployment **allowed_contexts** (Array, default=[]): A list of kubeconfig contexts Tilt is allowed to use. See the Tilt documentation on *[allow_k8s_contexts](https://docs.tilt.dev/api.html#api.allow_k8s_contexts) for more details. Note: Kind is automatically allowed. -**enable_restic** (Bool, default=false): Indicate whether to deploy the restic Daemonset. If set to `true`, Tilt will look for a `velero/tilt-resources/restic.yaml` file -containing the configuration of the Velero restic DaemonSet. +**use_node_agent** (Bool, default=false): Indicate whether to deploy the node-agent Daemonset. If set to `true`, Tilt will look for a `velero/tilt-resources/node-agent.yaml` file +containing the configuration of the Velero node-agent DaemonSet. **create_backup_locations** (Bool, default=false): Indicate whether to create one or more backup storage locations. If set to `true`, Tilt will look for a `velero/tilt-resources/velero_v1_backupstoragelocation.yaml` file containing at least one configuration for a Velero backup storage location. @@ -97,7 +97,7 @@ containing at least one configuration for a Velero backup storage location. ### Create Kubernetes resource files to deploy All needed Kubernetes resource files are provided as ready to use samples in the `velero/tilt-resources/examples` directory. You only have to move them to the `velero/tilt-resources` level. -Because the Velero Kubernetes deployment as well as the restic DaemonSet contain the configuration +Because the Velero Kubernetes deployment as well as the node-agent DaemonSet contain the configuration for any plugin to be used, files for these resources are expected to be provided by the user so you may choose which provider plugin to load as a init container. Currently, the sample files provided are configured with all the plugins supported by Velero, feel free to remove any of them as needed. diff --git a/site/content/docs/main/troubleshooting.md b/site/content/docs/main/troubleshooting.md index dd1a331493..d7588f23a5 100644 --- a/site/content/docs/main/troubleshooting.md +++ b/site/content/docs/main/troubleshooting.md @@ -148,9 +148,9 @@ Follow the below troubleshooting steps to confirm that Velero is using the corre ] ``` - If [restic-integration][3] is enabled, then, confirm that the restic daemonset is also mounting the `cloud-credentials` secret. + If [File System Backup][3] is enabled, then, confirm that the node-agent daemonset is also mounting the `cloud-credentials` secret. ```bash - $ kubectl -n velero get ds restic -ojson |jq .spec.template.spec.containers[0].volumeMounts + $ kubectl -n velero get ds node-agent -ojson |jq .spec.template.spec.containers[0].volumeMounts [ { "mountPath": "/host_pods", @@ -217,7 +217,7 @@ Follow the below troubleshooting steps to confirm that Velero is using the corre [1]: debugging-restores.md [2]: debugging-install.md -[3]: restic.md +[3]: file-system-backup.md [4]: https://github.com/vmware-tanzu/velero/issues [5]: https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html [6]: https://github.com/vmware-tanzu/helm-charts/blob/main/charts/velero diff --git a/site/content/docs/main/velero-install.md b/site/content/docs/main/velero-install.md index 8cc6d4d2e7..7fdd0f9361 100644 --- a/site/content/docs/main/velero-install.md +++ b/site/content/docs/main/velero-install.md @@ -21,12 +21,12 @@ velero install \ --velero-pod-mem-request \ --velero-pod-cpu-limit \ --velero-pod-mem-limit \ - [--use-restic] \ - [--default-volumes-to-restic] \ - [--restic-pod-cpu-request ] \ - [--restic-pod-mem-request ] \ - [--restic-pod-cpu-limit ] \ - [--restic-pod-mem-limit ] + [--use-node-agent] \ + [--default-volumes-to-fs-backup] \ + [--node-agent-pod-cpu-request ] \ + [--node-agent-pod-mem-request ] \ + [--node-agent-pod-cpu-limit ] \ + [--node-agent-pod-mem-limit ] ``` The values for the resource requests and limits flags follow the same format as [Kubernetes resource requirements][3] @@ -39,7 +39,7 @@ This section provides examples that serve as a starting point for more customize ```bash velero install --provider gcp --plugins velero/velero-plugin-for-gcp:v1.0.0 --bucket mybucket --secret-file ./gcp-service-account.json -velero install --provider aws --plugins velero/velero-plugin-for-aws:v1.0.0 --bucket backups --provider aws --secret-file ./aws-iam-creds --backup-location-config region=us-east-2 --snapshot-location-config region=us-east-2 --use-restic +velero install --provider aws --plugins velero/velero-plugin-for-aws:v1.0.0 --bucket backups --provider aws --secret-file ./aws-iam-creds --backup-location-config region=us-east-2 --snapshot-location-config region=us-east-2 --use-node-agent velero install --provider azure --plugins velero/velero-plugin-for-microsoft-azure:v1.0.0 --bucket $BLOB_CONTAINER --secret-file ./credentials-velero --backup-location-config resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,storageAccount=$AZURE_STORAGE_ACCOUNT_ID[,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID] --snapshot-location-config apiTimeout=[,resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID] ``` diff --git a/site/content/docs/v1.7/upgrade-to-1.7.md b/site/content/docs/v1.7/upgrade-to-1.7.md index c46b881c1d..6cf7748c6c 100644 --- a/site/content/docs/v1.7/upgrade-to-1.7.md +++ b/site/content/docs/v1.7/upgrade-to-1.7.md @@ -53,7 +53,7 @@ Before upgrading, check the [Velero compatibility matrix](https://github.com/vmw # if you are using other plugin kubectl set image deployment/velero \ velero=velero/velero:v1.7.0 \ - velero-velero-plugin-for-aws=velero/velero-plugin-for-aws:v1.3.0 \ + velero-plugin-for-aws=velero/velero-plugin-for-aws:v1.3.0 \ --namespace velero # optional, if using the restic daemon set diff --git a/site/content/resources/_index.md b/site/content/resources/_index.md index 5d9221e00b..5f05a811a5 100644 --- a/site/content/resources/_index.md +++ b/site/content/resources/_index.md @@ -70,10 +70,4 @@ Here you will find external resources about Velero, such as videos, podcasts, an * [Cormac Hogan has written a series of blog posts on Velero](https://cormachogan.com/?s=velero) -* [Backup and Restore MariaDB Galera Deployments on Kubernetes - by Vikram Vaswani](https://docs.bitnami.com/tutorials/backup-restore-data-mariadb-galera-kubernetes/) - - -* Two great blog posts by community member Imran Pochi: - * [Backup and Restore of Kubernetes Applications using Heptio’s Velero with Restic and Rook-Ceph as the storage provider](https://blog.kubernauts.io/backup-and-restore-of-kubernetes-applications-using-heptios-velero-with-restic-and-rook-ceph-as-2e8df15b1487) - * [Backup and Restore PVCs using Velero with restic and OpenEBS from Baremetal cluster to AWS - ](https://blog.kubernauts.io/backup-and-restore-pvcs-using-velero-with-restic-and-openebs-from-baremetal-cluster-to-aws-d3ac54386109) \ No newline at end of file +* [Backup and Restore MariaDB Galera Deployments on Kubernetes - by Vikram Vaswani](https://docs.bitnami.com/tutorials/backup-restore-data-mariadb-galera-kubernetes/) \ No newline at end of file diff --git a/site/data/docs/main-toc.yml b/site/data/docs/main-toc.yml index 0402cc29cd..5442c62cc7 100644 --- a/site/data/docs/main-toc.yml +++ b/site/data/docs/main-toc.yml @@ -19,8 +19,8 @@ toc: url: /supported-providers - page: Evaluation install url: /contributions/minio - - page: Restic integration - url: /restic + - page: File system backup + url: /file-system-backup - page: Examples url: /examples - page: Uninstalling @@ -65,8 +65,8 @@ toc: url: /debugging-install - page: Troubleshoot a restore url: /debugging-restores - - page: Troubleshoot Restic - url: /restic#troubleshooting + - page: Troubleshoot file system backup + url: /file-system-backup#troubleshooting - title: Contribute subfolderitems: - page: Start Contributing diff --git a/site/layouts/index.redirects b/site/layouts/index.redirects index b336ccfb1d..bd30d5d67d 100644 --- a/site/layouts/index.redirects +++ b/site/layouts/index.redirects @@ -11,4 +11,4 @@ /docs/customize-installation /docs/{{ $latest }}/customize-installation /docs/faq /docs/{{ $latest }}/faq /docs/csi /docs/{{ $latest }}/csi -/docs/restic /docs/{{ $latest }}/restic +/docs/file-system-backup /docs/{{ $latest }}/file-system-backup diff --git a/test/e2e/testdata/storage-class/gcp.yaml b/test/e2e/testdata/storage-class/gcp.yaml index 30ee8fc1fa..397a4b19f3 100644 --- a/test/e2e/testdata/storage-class/gcp.yaml +++ b/test/e2e/testdata/storage-class/gcp.yaml @@ -9,5 +9,5 @@ parameters: type: pd-standard provisioner: kubernetes.io/gce-pd reclaimPolicy: Delete -volumeBindingMode: volumeBindingMode: WaitForFirstConsumer +volumeBindingMode: WaitForFirstConsumer diff --git a/test/e2e/util/velero/install.go b/test/e2e/util/velero/install.go index 7f50d780cc..14c6e622b0 100644 --- a/test/e2e/util/velero/install.go +++ b/test/e2e/util/velero/install.go @@ -352,8 +352,8 @@ func patchResources(ctx context.Context, resources *unstructured.UnstructuredLis Name: "restic-restore-action-config", Namespace: namespace, Labels: map[string]string{ - "velero.io/plugin-config": "", - "velero.io/restic": "RestoreItemAction", + "velero.io/plugin-config": "", + "velero.io/pod-volume-restore": "RestoreItemAction", }, }, Data: map[string]string{