diff --git a/chart/etcd-backup-restore/templates/etcd-backup-secret.yaml b/chart/etcd-backup-restore/templates/etcd-backup-secret.yaml index a446de78d..0ae067c8e 100644 --- a/chart/etcd-backup-restore/templates/etcd-backup-secret.yaml +++ b/chart/etcd-backup-restore/templates/etcd-backup-secret.yaml @@ -20,8 +20,8 @@ data: {{- if .Values.backup.abs.emulatorEnabled }} emulatorEnabled: {{ .Values.backup.abs.emulatorEnabled | b64enc}} {{- end }} - {{- if .Values.backup.abs.storageAPIEndpoint }} - storageAPIEndpoint: {{ .Values.backup.abs.storageAPIEndpoint | b64enc}} + {{- if .Values.backup.abs.domain }} + domain: {{ .Values.backup.abs.domain | b64enc}} {{- end }} {{- else if eq .Values.backup.storageProvider "GCS" }} serviceaccount.json : {{ .Values.backup.gcs.serviceAccountJson | b64enc }} diff --git a/chart/etcd-backup-restore/templates/etcd-statefulset.yaml b/chart/etcd-backup-restore/templates/etcd-statefulset.yaml index 368f9854c..6cc33f94e 100644 --- a/chart/etcd-backup-restore/templates/etcd-statefulset.yaml +++ b/chart/etcd-backup-restore/templates/etcd-statefulset.yaml @@ -199,25 +199,9 @@ spec: key: "emulatorEnabled" optional: true {{- end }} - {{- if .Values.backup.abs.storageAPIEndpoint }} - - name: "AZURE_STORAGE_API_ENDPOINT" - valueFrom: - secretKeyRef: - name: {{ .Release.Name }}-etcd-backup - key: "storageAPIEndpoint" - optional: true - {{- end }} {{- else if eq .Values.backup.storageProvider "GCS" }} - name: "GOOGLE_APPLICATION_CREDENTIALS" value: "/root/.gcp/serviceaccount.json" - {{- if .Values.backup.gcs.storageAPIEndpoint }} - - name: "GOOGLE_STORAGE_API_ENDPOINT" - valueFrom: - secretKeyRef: - name: {{ .Release.Name }}-etcd-backup - key: "storageAPIEndpoint" - optional: true - {{- end }} {{- if .Values.backup.gcs.emulatorEnabled }} - name: "GOOGLE_EMULATOR_ENABLED" valueFrom: diff --git a/chart/etcd-backup-restore/values.yaml b/chart/etcd-backup-restore/values.yaml index 5d3f04184..788b16e81 100644 --- a/chart/etcd-backup-restore/values.yaml +++ b/chart/etcd-backup-restore/values.yaml @@ -97,8 +97,8 @@ backup: # abs: # storageAccount: storage-account-with-object-storage-privileges # storageKey: storage-key-with-object-storage-privileges + # domain: non-default-domain-for-object-storage-service # emulatorEnabled: boolean-float-to-enable-e2e-tests-to-use-azure-emulator # optional - # storageAPIEndpoint: endpoint-override-for-storage-api # if emulatorEnabled is true # swift: # authURL: identity-server-url # domainName: domain-name diff --git a/docs/deployment/getting_started.md b/docs/deployment/getting_started.md index 18bf3e398..121cc0806 100644 --- a/docs/deployment/getting_started.md +++ b/docs/deployment/getting_started.md @@ -20,10 +20,11 @@ The procedure to provide credentials to access the cloud provider object store v * For `Google Cloud Storage`: 1. The service account json file should be provided in the `~/.gcp` as a `service-account-file.json` file. 2. The service account json file should be provided, and the file path should be made available as environment variable `GOOGLE_APPLICATION_CREDENTIALS`. - 3. If using a storage API [endpoint override](https://pkg.go.dev/cloud.google.com/go#hdr-Endpoint_Override), such as a [regional endpoint](https://cloud.google.com/storage/docs/regional-endpoints) or a local GCS emulator endpoint, then the endpoint must be made available via environment variable `GOOGLE_STORAGE_API_ENDPOINT`, in the format `http[s]://host[:port]/storage/v1/`. + 3. If using a storage API [endpoint override](https://pkg.go.dev/cloud.google.com/go#hdr-Endpoint_Override), such as a [regional endpoint](https://cloud.google.com/storage/docs/regional-endpoints) or a local GCS emulator endpoint, then the endpoint must be made available via a file named `storageAPIEndpoint` residing in the `~/.gcp` directory. * For `Azure Blob storage`: - 1. The secret file should be provided, and the file path should be made available as an environment variable: `AZURE_APPLICATION_CREDENTIALS`. + 1. The JSON secret file should be provided, and the file path should be made available as an environment variable: `AZURE_APPLICATION_CREDENTIALS`. + 2. The Azure Blob Storage domain can be overridden by providing it via an optional field `domain` in the above-mentioned JSON secret file. * For `Openstack Swift`: 1. The secret file should be provided, and the file path should be made available as an environment variable: `OPENSTACK_APPLICATION_CREDENTIALS`. diff --git a/example/storage-provider-secrets/00-azure-blob-storage-secret.yaml b/example/storage-provider-secrets/00-azure-blob-storage-secret.yaml index 80899b98e..227ebde51 100644 --- a/example/storage-provider-secrets/00-azure-blob-storage-secret.yaml +++ b/example/storage-provider-secrets/00-azure-blob-storage-secret.yaml @@ -8,4 +8,4 @@ data: storageAccount: YWRtaW4= # admin storageKey: YWRtaW4= # admin # emulatorEnabled: dHJ1ZQ== # true (optional) - # storageAPIEndpoint: aHR0cDovL2F6dXJpdGUtc2VydmljZToxMDAwMA== # http://azurite-service:10000 (optional) + # domain: YmxvYi5jb3JlLmNoaW5hY2xvdWRhcGkuY24= # blob.core.chinacloudapi.cn (optional) diff --git a/pkg/snapstore/abs_snapstore.go b/pkg/snapstore/abs_snapstore.go index 05e35ee9b..472d47b9e 100644 --- a/pkg/snapstore/abs_snapstore.go +++ b/pkg/snapstore/abs_snapstore.go @@ -22,6 +22,7 @@ import ( "github.com/Azure/azure-storage-blob-go/azblob" "github.com/sirupsen/logrus" + "k8s.io/utils/pointer" brtypes "github.com/gardener/etcd-backup-restore/pkg/types" ) @@ -29,8 +30,6 @@ import ( const ( absCredentialDirectory = "AZURE_APPLICATION_CREDENTIALS" absCredentialJSONFile = "AZURE_APPLICATION_CREDENTIALS_JSON" - // AzuriteEndpoint is the environment variable which indicates the endpoint at which the Azurite emulator is hosted - AzuriteEndpoint = "AZURE_STORAGE_API_ENDPOINT" ) // ABSSnapStore is an ABS backed snapstore. @@ -44,29 +43,35 @@ type ABSSnapStore struct { } type absCredentials struct { - BucketName string `json:"bucketName"` - SecretKey string `json:"storageKey"` - StorageAccount string `json:"storageAccount"` + BucketName string `json:"bucketName"` + StorageAccount string `json:"storageAccount"` + StorageKey string `json:"storageKey"` + Domain *string `json:"domain,omitempty"` } // NewABSSnapStore creates a new ABSSnapStore using a shared configuration and a specified bucket func NewABSSnapStore(config *brtypes.SnapstoreConfig) (*ABSSnapStore, error) { - storageAccount, storageKey, err := getCredentials(getEnvPrefixString(config.IsSource)) + absCreds, err := getCredentials(getEnvPrefixString(config.IsSource)) if err != nil { - return nil, fmt.Errorf("failed to get credentials: %v", err) + return nil, fmt.Errorf("failed to get sharedKeyCredential: %v", err) } - credentials, err := azblob.NewSharedKeyCredential(storageAccount, storageKey) + sharedKeyCredential, err := azblob.NewSharedKeyCredential(absCreds.StorageAccount, absCreds.StorageKey) if err != nil { - return nil, fmt.Errorf("failed to create shared key credentials: %v", err) + return nil, fmt.Errorf("failed to create shared key sharedKeyCredential: %v", err) } - pipeline := azblob.NewPipeline(credentials, azblob.PipelineOptions{ + pipeline := azblob.NewPipeline(sharedKeyCredential, azblob.PipelineOptions{ Retry: azblob.RetryOptions{ TryTimeout: downloadTimeout, }}) - blobURL, err := ConstructBlobServiceURL(credentials) + domain := brtypes.AzureBlobStorageGlobalDomain + if absCreds.Domain != nil { + domain = *absCreds.Domain + } + + blobURL, err := ConstructBlobServiceURL(absCreds.StorageAccount, domain) if err != nil { return nil, err } @@ -78,41 +83,34 @@ func NewABSSnapStore(config *brtypes.SnapstoreConfig) (*ABSSnapStore, error) { } // ConstructBlobServiceURL constructs the Blob Service URL based on the activation status of the Azurite Emulator. -// It checks the environment variables for emulator configuration and constructs the URL accordingly. -// The function expects two environment variables: +// It checks the environment variable for emulator configuration and constructs the URL accordingly. +// The function expects the following environment variable: // - AZURE_EMULATOR_ENABLED: Indicates whether the Azurite Emulator is enabled (expects "true" or "false"). -// - AZURE_STORAGE_API_ENDPOINT: Specifies the Azurite Emulator endpoint when the emulator is enabled. -func ConstructBlobServiceURL(credentials *azblob.SharedKeyCredential) (*url.URL, error) { - defaultURL, err := url.Parse(fmt.Sprintf("https://%s.%s", credentials.AccountName(), brtypes.AzureBlobStorageHostName)) - if err != nil { - return nil, fmt.Errorf("failed to parse default service URL: %w", err) - } +func ConstructBlobServiceURL(storageAccount, domain string) (*url.URL, error) { + scheme := "https" + emulatorEnabled, ok := os.LookupEnv(EnvAzureEmulatorEnabled) - if !ok { - return defaultURL, nil - } - isEmulator, err := strconv.ParseBool(emulatorEnabled) - if err != nil { - return nil, fmt.Errorf("invalid value for %s: %s, error: %w", EnvAzureEmulatorEnabled, emulatorEnabled, err) - } - if !isEmulator { - return defaultURL, nil - } - endpoint, ok := os.LookupEnv(AzuriteEndpoint) - if !ok { - return nil, fmt.Errorf("%s environment variable not set while %s is true", AzuriteEndpoint, EnvAzureEmulatorEnabled) + if ok { + isEmulator, err := strconv.ParseBool(emulatorEnabled) + if err != nil { + return nil, fmt.Errorf("invalid value for %s: %s, error: %w", EnvAzureEmulatorEnabled, emulatorEnabled, err) + } + if isEmulator { + // TODO: going forward, use Azurite with HTTPS (TLS) communication + scheme = "http" + } } - // Application protocol (http or https) is determined by the user of the Azurite, not by this function. - return url.Parse(fmt.Sprintf("%s/%s", endpoint, credentials.AccountName())) + + return url.Parse(fmt.Sprintf("%s://%s.%s", scheme, storageAccount, domain)) } -func getCredentials(prefixString string) (string, string, error) { +func getCredentials(prefixString string) (*absCredentials, error) { if filename, isSet := os.LookupEnv(prefixString + absCredentialJSONFile); isSet { credentials, err := readABSCredentialsJSON(filename) if err != nil { - return "", "", fmt.Errorf("error getting credentials using %v file", filename) + return nil, fmt.Errorf("error getting credentials using %v file with error %w", filename, err) } - return credentials.StorageAccount, credentials.SecretKey, nil + return credentials, nil } // TODO: @renormalize Remove this extra handling in v0.31.0 @@ -120,14 +118,14 @@ func getCredentials(prefixString string) (string, string, error) { if dir, isSet := os.LookupEnv(prefixString + absCredentialDirectory); isSet { jsonCredentialFile, err := findFileWithExtensionInDir(dir, ".json") if err != nil { - return "", "", fmt.Errorf("error while finding a JSON credential file in %v directory with error: %w", dir, err) + return nil, fmt.Errorf("error while finding a JSON credential file in %v directory with error: %w", dir, err) } if jsonCredentialFile != "" { credentials, err := readABSCredentialsJSON(jsonCredentialFile) if err != nil { - return "", "", fmt.Errorf("error getting credentials using %v JSON file in a directory with error: %w", jsonCredentialFile, err) + return nil, fmt.Errorf("error getting credentials using %v JSON file in a directory with error: %w", jsonCredentialFile, err) } - return credentials.StorageAccount, credentials.SecretKey, nil + return credentials, nil } // Non JSON credential files might exist in the credential directory, do not return } @@ -135,12 +133,12 @@ func getCredentials(prefixString string) (string, string, error) { if dir, isSet := os.LookupEnv(prefixString + absCredentialDirectory); isSet { credentials, err := readABSCredentialFiles(dir) if err != nil { - return "", "", fmt.Errorf("error getting credentials from %v dir", dir) + return nil, fmt.Errorf("error getting credentials from %v directory with error %w", dir, err) } - return credentials.StorageAccount, credentials.SecretKey, nil + return credentials, nil } - return "", "", fmt.Errorf("unable to get credentials") + return nil, fmt.Errorf("unable to get credentials") } func readABSCredentialsJSON(filename string) (*absCredentials, error) { @@ -172,17 +170,23 @@ func readABSCredentialFiles(dirname string) (*absCredentials, error) { for _, file := range files { if file.Name() == "storageAccount" { - data, err := os.ReadFile(dirname + "/storageAccount") + data, err := os.ReadFile(path.Join(dirname, file.Name())) if err != nil { return nil, err } absConfig.StorageAccount = string(data) } else if file.Name() == "storageKey" { - data, err := os.ReadFile(dirname + "/storageKey") + data, err := os.ReadFile(path.Join(dirname, file.Name())) + if err != nil { + return nil, err + } + absConfig.StorageKey = string(data) + } else if file.Name() == "domain" { + data, err := os.ReadFile(path.Join(dirname, file.Name())) if err != nil { return nil, err } - absConfig.SecretKey = string(data) + absConfig.Domain = pointer.String(string(data)) } } @@ -430,7 +434,7 @@ func GetABSCredentialsLastModifiedTime() (time.Time, error) { } func isABSConfigEmpty(config *absCredentials) error { - if len(config.SecretKey) != 0 && len(config.StorageAccount) != 0 { + if len(config.StorageKey) != 0 && len(config.StorageAccount) != 0 { return nil } return fmt.Errorf("azure object storage credentials: storageKey or storageAccount is missing") diff --git a/pkg/snapstore/abs_snapstore_test.go b/pkg/snapstore/abs_snapstore_test.go index b057fc1a5..519a0efeb 100644 --- a/pkg/snapstore/abs_snapstore_test.go +++ b/pkg/snapstore/abs_snapstore_test.go @@ -31,7 +31,7 @@ func newFakeABSSnapstore() brtypes.SnapStore { newFakePolicyFactory(bucket, prefixV2, objectMap), } p := pipeline.NewPipeline(f, pipeline.Options{HTTPSender: newFakePolicyFactory(bucket, prefixV2, objectMap)}) - u, err := url.Parse(fmt.Sprintf("https://%s.%s", "dummyaccount", brtypes.AzureBlobStorageHostName)) + u, err := url.Parse(fmt.Sprintf("https://%s.%s", "dummyaccount", brtypes.AzureBlobStorageGlobalDomain)) Expect(err).ShouldNot(HaveOccurred()) serviceURL := azblob.NewServiceURL(*u, p) containerURL := serviceURL.NewContainerURL(bucket) diff --git a/pkg/snapstore/gcs_snapstore.go b/pkg/snapstore/gcs_snapstore.go index 1018dac3a..eaf26ffe2 100644 --- a/pkg/snapstore/gcs_snapstore.go +++ b/pkg/snapstore/gcs_snapstore.go @@ -30,6 +30,8 @@ const ( envStoreCredentials = "GOOGLE_APPLICATION_CREDENTIALS" envStorageAPIEndpoint = "GOOGLE_STORAGE_API_ENDPOINT" envSourceStoreCredentials = "SOURCE_GOOGLE_APPLICATION_CREDENTIALS" + + storageAPIEndpointFileName = "storageAPIEndpoint" ) // GCSSnapStore is snapstore with GCS object store as backend. @@ -62,8 +64,12 @@ func NewGCSSnapStore(config *brtypes.SnapstoreConfig) (*GCSSnapStore, error) { emulatorConfig.enabled = isEmulatorEnabled() var opts []option.ClientOption // no need to explicitly set store credentials here since the Google SDK picks it up from the standard environment variable - if _, ok := os.LookupEnv(envSourceStoreCredentials); !ok || emulatorConfig.enabled { // do not set endpoint override when copying backups between buckets, since the buckets may reside on different regions - endpoint := strings.TrimSpace(os.Getenv(envStorageAPIEndpoint)) + if gcsApplicationCredentialsPath, isSet := os.LookupEnv(getEnvPrefixString(config.IsSource) + envStoreCredentials); isSet { + storageAPIEndpointFilePath := path.Join(path.Dir(gcsApplicationCredentialsPath), storageAPIEndpointFileName) + endpoint, err := getGCSStorageAPIEndpoint(storageAPIEndpointFilePath) + if err != nil { + return nil, fmt.Errorf("error getting storage API endpoint from %v", storageAPIEndpointFilePath) + } if endpoint != "" { opts = append(opts, option.WithEndpoint(endpoint)) if emulatorConfig.enabled { @@ -71,6 +77,7 @@ func NewGCSSnapStore(config *brtypes.SnapstoreConfig) (*GCSSnapStore, error) { } } } + var chunkDirSuffix string if emulatorConfig.enabled { err := emulatorConfig.configureClient(opts) @@ -96,6 +103,22 @@ func NewGCSSnapStore(config *brtypes.SnapstoreConfig) (*GCSSnapStore, error) { return NewGCSSnapStoreFromClient(config.Container, config.Prefix, config.TempDir, config.MaxParallelChunkUploads, config.MinChunkSize, chunkDirSuffix, gcsClient), nil } +func getGCSStorageAPIEndpoint(path string) (string, error) { + if _, err := os.Stat(path); err == nil { + data, err := os.ReadFile(path) + if err != nil { + return "", err + } + if len(data) == 0 { + return "", fmt.Errorf("file %s is empty", path) + } + return strings.TrimSpace(string(data)), nil + } + + // support falling back to environment variable `GOOGLE_STORAGE_API_ENDPOINT` + return strings.TrimSpace(os.Getenv(envStorageAPIEndpoint)), nil +} + // NewGCSSnapStoreFromClient create new GCSSnapStore from shared configuration with specified bucket. func NewGCSSnapStoreFromClient(bucket, prefix, tempDir string, maxParallelChunkUploads uint, minChunkSize int64, chunkDirSuffix string, cli stiface.Client) *GCSSnapStore { return &GCSSnapStore{ @@ -306,11 +329,11 @@ func (s *GCSSnapStore) Delete(snap brtypes.Snapshot) error { // GetGCSCredentialsLastModifiedTime returns the latest modification timestamp of the GCS credential file func GetGCSCredentialsLastModifiedTime() (time.Time, error) { - if filename, isSet := os.LookupEnv(envStoreCredentials); isSet { - credentialFiles := []string{filename} + if credentialsFilePath, isSet := os.LookupEnv(envStoreCredentials); isSet { + credentialFiles := []string{credentialsFilePath} gcsTimeStamp, err := getLatestCredentialsModifiedTime(credentialFiles) if err != nil { - return time.Time{}, fmt.Errorf("failed to fetch file information of the GCS JSON credential file %v with error: %v", filename, err) + return time.Time{}, fmt.Errorf("failed to fetch file information of the GCS JSON credential dir %v with error: %v", path.Dir(credentialsFilePath), err) } return gcsTimeStamp, nil } diff --git a/pkg/snapstore/snapstore_test.go b/pkg/snapstore/snapstore_test.go index d12be8543..d913caeb2 100644 --- a/pkg/snapstore/snapstore_test.go +++ b/pkg/snapstore/snapstore_test.go @@ -510,6 +510,7 @@ var _ = Describe("Dynamic access credential rotation test for each provider", fu var _ = Describe("Blob Service URL construction for Azure", func() { var credentials *azblob.SharedKeyCredential + domain := "test.domain" BeforeEach(func() { var err error // test strings @@ -519,39 +520,31 @@ var _ = Describe("Blob Service URL construction for Azure", func() { }) Context(fmt.Sprintf("when the environment variable %q is not set", EnvAzureEmulatorEnabled), func() { It("should return the default blob service URL", func() { - blobServiceURL, err := ConstructBlobServiceURL(credentials) + blobServiceURL, err := ConstructBlobServiceURL(credentials.AccountName(), domain) Expect(err).ShouldNot(HaveOccurred()) - Expect(blobServiceURL.String()).Should(Equal(fmt.Sprintf("https://%s.%s", credentials.AccountName(), brtypes.AzureBlobStorageHostName))) + Expect(blobServiceURL.String()).Should(Equal(fmt.Sprintf("https://%s.%s", credentials.AccountName(), domain))) }) }) Context(fmt.Sprintf("when the environment variable %q is set", EnvAzureEmulatorEnabled), func() { Context("to values which are not \"true\"", func() { It("should error when the environment variable is not \"true\" or \"false\"", func() { GinkgoT().Setenv(EnvAzureEmulatorEnabled, "") - _, err := ConstructBlobServiceURL(credentials) + _, err := ConstructBlobServiceURL(credentials.AccountName(), domain) Expect(err).Should(HaveOccurred()) }) It("should return the default blob service URL when the environment variable is set to \"false\"", func() { GinkgoT().Setenv(EnvAzureEmulatorEnabled, "false") - blobServiceURL, err := ConstructBlobServiceURL(credentials) + blobServiceURL, err := ConstructBlobServiceURL(credentials.AccountName(), domain) Expect(err).ShouldNot(HaveOccurred()) - Expect(blobServiceURL.String()).Should(Equal(fmt.Sprintf("https://%s.%s", credentials.AccountName(), brtypes.AzureBlobStorageHostName))) + Expect(blobServiceURL.String()).Should(Equal(fmt.Sprintf("https://%s.%s", credentials.AccountName(), domain))) }) }) Context("to \"true\"", func() { - const endpoint string = "http://localhost:12345" - BeforeEach(func() { + It(fmt.Sprintf("should return the Azurite blob service URL with HTTP scheme"), func() { GinkgoT().Setenv(EnvAzureEmulatorEnabled, "true") - }) - It(fmt.Sprintf("should error when the %q environment variable is not set", AzuriteEndpoint), func() { - _, err := ConstructBlobServiceURL(credentials) - Expect(err).Should(HaveOccurred()) - }) - It(fmt.Sprintf("should return the Azurite blob service URL when the %q environment variable is set to %q", AzuriteEndpoint, endpoint), func() { - GinkgoT().Setenv(AzuriteEndpoint, endpoint) - blobServiceURL, err := ConstructBlobServiceURL(credentials) + blobServiceURL, err := ConstructBlobServiceURL(credentials.AccountName(), domain) Expect(err).ShouldNot(HaveOccurred()) - Expect(blobServiceURL.String()).Should(Equal(fmt.Sprintf("%s/%s", endpoint, credentials.AccountName()))) + Expect(blobServiceURL.String()).Should(Equal(fmt.Sprintf("http://%s.%s", credentials.AccountName(), domain))) }) }) }) diff --git a/pkg/types/snapstore.go b/pkg/types/snapstore.go index 0f7895bc8..93de02397 100644 --- a/pkg/types/snapstore.go +++ b/pkg/types/snapstore.go @@ -42,8 +42,8 @@ const ( // SnapshotKindChunk is constant for chunk snapshot kind. SnapshotKindChunk = "Chunk" - // AzureBlobStorageHostName is the host name for azure blob storage service. - AzureBlobStorageHostName = "blob.core.windows.net" + // AzureBlobStorageGlobalDomain is the default domain for azure blob storage service. + AzureBlobStorageGlobalDomain = "blob.core.windows.net" // FinalSuffix is the suffix appended to the names of final snapshots. FinalSuffix = ".final"