From 885f81a6abff827b3f7332884530f927f7241896 Mon Sep 17 00:00:00 2001 From: "Elena Xin (Centific Technologies Inc)" Date: Fri, 2 Feb 2024 09:44:17 +0800 Subject: [PATCH 1/5] resolve conflict --- internal/provider/services.go | 1 + ...ckup_policy_kubernetes_cluster_resource.go | 836 ++++++++++++++++++ ...policy_kubernetes_cluster_resource_test.go | 250 ++++++ .../services/dataprotection/registration.go | 13 + ...up_policy_kubernetes_cluster.html.markdown | 149 ++++ ...ion_backup_policy_postgresql.html.markdown | 2 +- 6 files changed, 1250 insertions(+), 1 deletion(-) create mode 100644 internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go create mode 100644 internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource_test.go create mode 100644 website/docs/r/data_protection_backup_policy_kubernetes_cluster.html.markdown diff --git a/internal/provider/services.go b/internal/provider/services.go index 2150d8c510cb..a06947d02fe8 100644 --- a/internal/provider/services.go +++ b/internal/provider/services.go @@ -160,6 +160,7 @@ func SupportedTypedServices() []sdk.TypedServiceRegistration { databoxedge.Registration{}, databricks.Registration{}, datafactory.Registration{}, + dataprotection.Registration{}, desktopvirtualization.Registration{}, digitaltwins.Registration{}, disks.Registration{}, diff --git a/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go b/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go new file mode 100644 index 000000000000..7872afa5fe7d --- /dev/null +++ b/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go @@ -0,0 +1,836 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dataprotection + +import ( + "context" + "encoding/json" + "fmt" + "regexp" + "strings" + "time" + + "github.com/hashicorp/go-azure-helpers/lang/pointer" + "github.com/hashicorp/go-azure-helpers/lang/response" + "github.com/hashicorp/go-azure-helpers/resourcemanager/commonschema" + "github.com/hashicorp/go-azure-sdk/resource-manager/dataprotection/2023-05-01/backuppolicies" + "github.com/hashicorp/terraform-provider-azurerm/helpers/validate" + "github.com/hashicorp/terraform-provider-azurerm/internal/sdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/validation" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +type BackupPolicyKubernatesClusterModel struct { + Name string `tfschema:"name"` + ResourceGroupName string `tfschema:"resource_group_name"` + VaultName string `tfschema:"vault_name"` + BackupRepeatingTimeIntervals []string `tfschema:"backup_repeating_time_intervals"` + DefaultRetentionRule []DefaultRetentionRule `tfschema:"default_retention_rule"` + RetentionRule []RetentionRule `tfschema:"retention_rule"` + TimeZone string `tfschema:"time_zone"` +} + +type DefaultRetentionRule struct { + LifeCycle []LifeCycle `tfschema:"life_cycle"` +} + +type RetentionRule struct { + Name string `tfschema:"name"` + Criteria []Criteria `tfschema:"criteria"` + Priority int `tfschema:"priority"` + LifeCycle []LifeCycle `tfschema:"life_cycle"` +} + +type LifeCycle struct { + DataStoreType string `tfschema:"data_store_type"` + Duration string `tfschema:"duration"` + TargetCopySetting []TargetCopySetting `tfschema:"target_copy_setting"` +} + +type TargetCopySetting struct { + CopyAfter string `tfschema:"copy_after"` + DataStoreType string `tfschema:"data_store_type"` +} + +type Criteria struct { + AbsoluteCriteria string `tfschema:"absolute_criteria"` + DaysOfWeek []string `tfschema:"days_of_week"` + MonthsOfYear []string `tfschema:"months_of_year"` + ScheduledBackupTimes []string `tfschema:"scheduled_backup_times"` + WeeksOfMonth []string `tfschema:"weeks_of_month"` +} + +type DataProtectionBackupPolicyKubernatesClusterResource struct{} + +var _ sdk.Resource = DataProtectionBackupPolicyKubernatesClusterResource{} + +func (r DataProtectionBackupPolicyKubernatesClusterResource) ResourceType() string { + return "azurerm_data_protection_backup_policy_kubernetes_cluster" +} + +func (r DataProtectionBackupPolicyKubernatesClusterResource) ModelObject() interface{} { + return &BackupPolicyKubernatesClusterModel{} +} + +func (r DataProtectionBackupPolicyKubernatesClusterResource) IDValidationFunc() pluginsdk.SchemaValidateFunc { + return backuppolicies.ValidateBackupPolicyID +} + +func (r DataProtectionBackupPolicyKubernatesClusterResource) Arguments() map[string]*pluginsdk.Schema { + arguments := map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringMatch( + regexp.MustCompile("^[-a-zA-Z0-9]{3,150}$"), + "DataProtection BackupPolicy name must be 3 - 150 characters long, contain only letters, numbers and hyphens.", + ), + }, + + "resource_group_name": commonschema.ResourceGroupName(), + + "vault_name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + + "backup_repeating_time_intervals": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + MinItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + }, + }, + + "default_retention_rule": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "life_cycle": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "data_store_type": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + // confirmed with the service team that current possible value only support `OperationalStore`. + // However, considering that `VaultStore` might be supported in the future, it would be exposed for user specification. + string(backuppolicies.DataStoreTypesOperationalStore), + }, false), + }, + + "duration": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ISO8601Duration, + }, + + "target_copy_setting": { + Type: pluginsdk.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "copy_after": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringIsJSON, + }, + "data_store_type": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + // confirmed with the service team that currently only `OperationalStore` is supported. + // However, since `VaultStore` is in public preview and will be supported in the future, it is open to user specification. + string(backuppolicies.DataStoreTypesOperationalStore), + }, false), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + + "retention_rule": { + Type: pluginsdk.TypeList, + Optional: true, + ForceNew: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "name": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + }, + + "criteria": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "absolute_criteria": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + string(backuppolicies.AbsoluteMarkerAllBackup), + string(backuppolicies.AbsoluteMarkerFirstOfDay), + string(backuppolicies.AbsoluteMarkerFirstOfMonth), + string(backuppolicies.AbsoluteMarkerFirstOfWeek), + string(backuppolicies.AbsoluteMarkerFirstOfYear), + }, false), + }, + + "days_of_week": { + Type: pluginsdk.TypeSet, + Optional: true, + ForceNew: true, + MinItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.IsDayOfTheWeek(false), + }, + }, + + "months_of_year": { + Type: pluginsdk.TypeSet, + Optional: true, + ForceNew: true, + MinItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.IsMonth(false), + }, + }, + + "scheduled_backup_times": { + Type: pluginsdk.TypeSet, + Optional: true, + ForceNew: true, + MinItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.IsRFC3339Time, + }, + }, + + "weeks_of_month": { + Type: pluginsdk.TypeSet, + Optional: true, + ForceNew: true, + MinItems: 1, + Elem: &pluginsdk.Schema{ + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringInSlice([]string{ + string(backuppolicies.WeekNumberFirst), + string(backuppolicies.WeekNumberSecond), + string(backuppolicies.WeekNumberThird), + string(backuppolicies.WeekNumberFourth), + string(backuppolicies.WeekNumberLast), + }, false), + }, + }, + }, + }, + }, + + "life_cycle": { + Type: pluginsdk.TypeList, + Required: true, + ForceNew: true, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "data_store_type": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + // confirmed with the service team that currently only `OperationalStore` is supported. + // However, since `VaultStore` is in public preview and will be supported in the future, it is open to user specification. + string(backuppolicies.DataStoreTypesOperationalStore), + }, false), + }, + + "duration": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validate.ISO8601Duration, + }, + + "target_copy_setting": { + Type: pluginsdk.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &pluginsdk.Resource{ + Schema: map[string]*pluginsdk.Schema{ + "copy_after": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringIsJSON, + }, + "data_store_type": { + Type: pluginsdk.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{ + /// confirmed with the service team that currently only `OperationalStore` is supported. + // However, since `VaultStore` is in public preview and will be supported in the future, it is open to user specification. + string(backuppolicies.DataStoreTypesOperationalStore), + }, false), + }, + }, + }, + }, + }, + }, + }, + + "priority": { + Type: pluginsdk.TypeInt, + Required: true, + ForceNew: true, + }, + }, + }, + }, + + "time_zone": { + Type: pluginsdk.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringIsNotEmpty, + }, + } + return arguments +} + +func (r DataProtectionBackupPolicyKubernatesClusterResource) Attributes() map[string]*pluginsdk.Schema { + return map[string]*pluginsdk.Schema{} +} + +func (r DataProtectionBackupPolicyKubernatesClusterResource) Create() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + var model BackupPolicyKubernatesClusterModel + if err := metadata.Decode(&model); err != nil { + return fmt.Errorf("decoding: %+v", err) + } + + client := metadata.Client.DataProtection.BackupPolicyClient + subscriptionId := metadata.Client.Account.SubscriptionId + + id := backuppolicies.NewBackupPolicyID(subscriptionId, model.ResourceGroupName, model.VaultName, model.Name) + existing, err := client.Get(ctx, id) + if err != nil { + if !response.WasNotFound(existing.HttpResponse) { + return fmt.Errorf("checking for existing %s: %+v", id, err) + } + } + + if !response.WasNotFound(existing.HttpResponse) { + return metadata.ResourceRequiresImport(r.ResourceType(), id) + } + + taggingCriteria, err := expandBackupPolicyKubernetesClusterTaggingCriteriaArray(model.RetentionRule) + if err != nil { + return err + } + + policyRules := make([]backuppolicies.BasePolicyRule, 0) + policyRules = append(policyRules, expandBackupPolicyKubernetesClusterAzureBackupRuleArray(model.BackupRepeatingTimeIntervals, model.TimeZone, taggingCriteria)...) + policyRules = append(policyRules, expandBackupPolicyKubernetesClusterDefaultRetentionRule(model.DefaultRetentionRule)) + policyRules = append(policyRules, expandBackupPolicyKubernetesClusterAzureRetentionRules(model.RetentionRule)...) + + parameters := backuppolicies.BaseBackupPolicyResource{ + Properties: &backuppolicies.BackupPolicy{ + PolicyRules: policyRules, + DatasourceTypes: []string{"Microsoft.ContainerService/managedClusters"}, + }, + } + + if _, err := client.CreateOrUpdate(ctx, id, parameters); err != nil { + return fmt.Errorf("creating/updating DataProtection BackupPolicy (%q): %+v", id, err) + } + + metadata.SetID(id) + + return nil + }, + } +} + +func (r DataProtectionBackupPolicyKubernatesClusterResource) Read() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 5 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.DataProtection.BackupPolicyClient + + id, err := backuppolicies.ParseBackupPolicyID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + resp, err := client.Get(ctx, *id) + if err != nil { + if response.WasNotFound(resp.HttpResponse) { + return metadata.MarkAsGone(id) + } + + return fmt.Errorf("retrieving %s: %+v", *id, err) + } + + model := resp.Model + if model == nil { + return fmt.Errorf("retrieving %s: model was nil", id) + } + + state := BackupPolicyKubernatesClusterModel{ + Name: id.BackupPolicyName, + ResourceGroupName: id.ResourceGroupName, + VaultName: id.BackupVaultName, + } + + if properties, ok := model.Properties.(backuppolicies.BackupPolicy); ok { + state.DefaultRetentionRule = flattenBackupPolicyKubernetesClusterDefaultRetentionRule(&properties.PolicyRules) + state.RetentionRule = flattenBackupPolicyKubernetesClusterRetentionRules(&properties.PolicyRules) + state.BackupRepeatingTimeIntervals = flattenBackupPolicyKubernetesClusterBackupRuleArray(&properties.PolicyRules) + state.TimeZone = flattenBackupPolicyDiskBackupTimeZone(&properties.PolicyRules) + } + + return metadata.Encode(&state) + }, + } +} + +func (r DataProtectionBackupPolicyKubernatesClusterResource) Delete() sdk.ResourceFunc { + return sdk.ResourceFunc{ + Timeout: 30 * time.Minute, + Func: func(ctx context.Context, metadata sdk.ResourceMetaData) error { + client := metadata.Client.DataProtection.BackupPolicyClient + + id, err := backuppolicies.ParseBackupPolicyID(metadata.ResourceData.Id()) + if err != nil { + return err + } + + if _, err := client.Delete(ctx, *id); err != nil { + return fmt.Errorf("deleting %s: %+v", id, err) + } + + return nil + }, + } +} + +func expandBackupPolicyKubernetesClusterAzureBackupRuleArray(input []string, timeZone string, taggingCriteria *[]backuppolicies.TaggingCriteria) []backuppolicies.BasePolicyRule { + results := make([]backuppolicies.BasePolicyRule, 0) + results = append(results, backuppolicies.AzureBackupRule{ + Name: "BackupIntervals", + DataStore: backuppolicies.DataStoreInfoBase{ + DataStoreType: backuppolicies.DataStoreTypesOperationalStore, + ObjectType: "DataStoreInfoBase", + }, + BackupParameters: &backuppolicies.AzureBackupParams{ + BackupType: "Incremental", + }, + Trigger: backuppolicies.ScheduleBasedTriggerContext{ + Schedule: backuppolicies.BackupSchedule{ + RepeatingTimeIntervals: input, + TimeZone: pointer.To(timeZone), + }, + TaggingCriteria: *taggingCriteria, + }, + }) + + return results +} + +func expandBackupPolicyKubernetesClusterDefaultRetentionRule(input []DefaultRetentionRule) backuppolicies.BasePolicyRule { + results := backuppolicies.AzureRetentionRule{} + for _, item := range input { + lifeCycle := expandBackupPolicyKubernetesClusterLifeCycle(item.LifeCycle) + results.Name = "Default" + results.IsDefault = pointer.To(true) + results.Lifecycles = lifeCycle + } + return results +} + +func expandBackupPolicyKubernetesClusterAzureRetentionRules(input []RetentionRule) []backuppolicies.BasePolicyRule { + results := make([]backuppolicies.BasePolicyRule, 0) + for _, item := range input { + lifeCycle := expandBackupPolicyKubernetesClusterLifeCycle(item.LifeCycle) + + results = append(results, backuppolicies.AzureRetentionRule{ + Name: item.Name, + IsDefault: pointer.To(false), + Lifecycles: lifeCycle, + }) + } + return results +} + +func expandBackupPolicyKubernetesClusterLifeCycle(input []LifeCycle) []backuppolicies.SourceLifeCycle { + results := make([]backuppolicies.SourceLifeCycle, 0) + for _, item := range input { + targetCopySettingList := make([]backuppolicies.TargetCopySetting, 0) + if tcs := item.TargetCopySetting; len(tcs) > 0 { + copyAfter, err := expandTargetCopySettingFromJSON(tcs[0].CopyAfter) + if err != nil { + return results + } + + targetCopySetting := backuppolicies.TargetCopySetting{ + CopyAfter: copyAfter, + DataStore: backuppolicies.DataStoreInfoBase{ + DataStoreType: backuppolicies.DataStoreTypes(tcs[0].DataStoreType), + ObjectType: "DataStoreInfoBase", + }, + } + targetCopySettingList = append(targetCopySettingList, targetCopySetting) + } + + sourceLifeCycle := backuppolicies.SourceLifeCycle{ + DeleteAfter: backuppolicies.AbsoluteDeleteOption{ + Duration: item.Duration, + }, + SourceDataStore: backuppolicies.DataStoreInfoBase{ + DataStoreType: backuppolicies.DataStoreTypes(item.DataStoreType), + ObjectType: "DataStoreInfoBase", + }, + TargetDataStoreCopySettings: pointer.To(targetCopySettingList), + } + results = append(results, sourceLifeCycle) + } + + return results +} + +func expandTargetCopySettingFromJSON(input string) (backuppolicies.CopyOption, error) { + if input == "" { + return nil, nil + } + targetCopySetting := &backuppolicies.TargetCopySetting{} + err := targetCopySetting.UnmarshalJSON([]byte(fmt.Sprintf(`{ "copyAfter": %s }`, input))) + if err != nil { + return nil, err + } + return targetCopySetting.CopyAfter, nil +} + +func flattenTargetCopySettingFromJSON(input backuppolicies.CopyOption) (string, error) { + if input == nil { + return "", nil + } + result, err := json.Marshal(input) + return string(result), err +} + +func expandBackupPolicyKubernetesClusterTaggingCriteriaArray(input []RetentionRule) (*[]backuppolicies.TaggingCriteria, error) { + results := []backuppolicies.TaggingCriteria{ + { + Criteria: nil, + IsDefault: true, + TaggingPriority: 99, + TagInfo: backuppolicies.RetentionTag{ + Id: utils.String("Default_"), + TagName: "Default", + }, + }, + } + for _, item := range input { + result := backuppolicies.TaggingCriteria{ + IsDefault: false, + TaggingPriority: int64(item.Priority), + TagInfo: backuppolicies.RetentionTag{ + Id: utils.String(item.Name + "_"), + TagName: item.Name, + }, + } + + criteria, err := expandBackupPolicyKubernetesClusterCriteriaArray(item.Criteria) + if err != nil { + return nil, err + } + result.Criteria = criteria + results = append(results, result) + } + return &results, nil +} + +func expandBackupPolicyKubernetesClusterCriteriaArray(input []Criteria) (*[]backuppolicies.BackupCriteria, error) { + if len(input) == 0 { + return nil, fmt.Errorf("criteria is a required field, cannot leave blank") + } + + results := make([]backuppolicies.BackupCriteria, 0) + + for _, item := range input { + var absoluteCriteria []backuppolicies.AbsoluteMarker + if absoluteCriteriaRaw := item.AbsoluteCriteria; len(absoluteCriteriaRaw) > 0 { + absoluteCriteria = []backuppolicies.AbsoluteMarker{backuppolicies.AbsoluteMarker(absoluteCriteriaRaw)} + } + + var daysOfWeek []backuppolicies.DayOfWeek + if len(item.DaysOfWeek) > 0 { + daysOfWeek = make([]backuppolicies.DayOfWeek, 0) + for _, value := range item.DaysOfWeek { + daysOfWeek = append(daysOfWeek, backuppolicies.DayOfWeek(value)) + } + } + + var monthsOfYear []backuppolicies.Month + if len(item.MonthsOfYear) > 0 { + monthsOfYear = make([]backuppolicies.Month, 0) + for _, value := range item.MonthsOfYear { + monthsOfYear = append(monthsOfYear, backuppolicies.Month(value)) + } + } + + var weeksOfMonth []backuppolicies.WeekNumber + if len(item.WeeksOfMonth) > 0 { + weeksOfMonth = make([]backuppolicies.WeekNumber, 0) + for _, value := range item.WeeksOfMonth { + weeksOfMonth = append(weeksOfMonth, backuppolicies.WeekNumber(value)) + } + } + + results = append(results, backuppolicies.ScheduleBasedBackupCriteria{ + AbsoluteCriteria: &absoluteCriteria, + DaysOfMonth: nil, + DaysOfTheWeek: &daysOfWeek, + MonthsOfYear: &monthsOfYear, + ScheduleTimes: pointer.To(item.ScheduledBackupTimes), + WeeksOfTheMonth: &weeksOfMonth, + }) + } + return &results, nil +} + +func flattenBackupPolicyKubernetesClusterBackupRuleArray(input *[]backuppolicies.BasePolicyRule) []string { + if input == nil { + return make([]string, 0) + } + for _, item := range *input { + if backupRule, ok := item.(backuppolicies.AzureBackupRule); ok { + if backupRule.Trigger != nil { + if scheduleBasedTrigger, ok := backupRule.Trigger.(backuppolicies.ScheduleBasedTriggerContext); ok { + return scheduleBasedTrigger.Schedule.RepeatingTimeIntervals + } + } + } + } + return make([]string, 0) +} + +func flattenBackupPolicyKubernetesClusterBackupTimeZone(input *[]backuppolicies.BasePolicyRule) string { + if input == nil { + return "" + } + for _, item := range *input { + if backupRule, ok := item.(backuppolicies.AzureBackupRule); ok { + if backupRule.Trigger != nil { + if scheduleBasedTrigger, ok := backupRule.Trigger.(backuppolicies.ScheduleBasedTriggerContext); ok { + return pointer.From(scheduleBasedTrigger.Schedule.TimeZone) + } + } + } + } + return "" +} + +func flattenBackupPolicyKubernetesClusterRetentionRules(input *[]backuppolicies.BasePolicyRule) []RetentionRule { + results := make([]RetentionRule, 0) + if input == nil { + return results + } + + var taggingCriterias []backuppolicies.TaggingCriteria + for _, item := range *input { + if backupRule, ok := item.(backuppolicies.AzureBackupRule); ok { + if trigger, ok := backupRule.Trigger.(backuppolicies.ScheduleBasedTriggerContext); ok { + if trigger.TaggingCriteria != nil { + taggingCriterias = trigger.TaggingCriteria + } + } + } + } + + for _, item := range *input { + if retentionRule, ok := item.(backuppolicies.AzureRetentionRule); ok { + var name string + var taggingPriority int + var taggingCriteria []Criteria + if retentionRule.IsDefault == nil || !*retentionRule.IsDefault { + name = retentionRule.Name + for _, criteria := range taggingCriterias { + if strings.EqualFold(criteria.TagInfo.TagName, name) { + taggingPriority = int(criteria.TaggingPriority) + taggingCriteria = flattenBackupPolicyKubernetesClusterBackupCriteriaArray(criteria.Criteria) + } + } + + var lifeCycle []LifeCycle + if v := retentionRule.Lifecycles; len(v) > 0 { + lifeCycle = flattenBackupPolicyKubernetesClusterBackupLifeCycleArray(v) + } + results = append(results, RetentionRule{ + Name: name, + Priority: taggingPriority, + Criteria: taggingCriteria, + LifeCycle: lifeCycle, + }) + } + } + } + return results +} + +func flattenBackupPolicyKubernetesClusterDefaultRetentionRule(input *[]backuppolicies.BasePolicyRule) []DefaultRetentionRule { + results := make([]DefaultRetentionRule, 0) + if input == nil { + return results + } + + for _, item := range *input { + if retentionRule, ok := item.(backuppolicies.AzureRetentionRule); ok { + if pointer.From(retentionRule.IsDefault) { + var lifeCycle []LifeCycle + if v := retentionRule.Lifecycles; len(v) > 0 { + lifeCycle = flattenBackupPolicyKubernetesClusterBackupLifeCycleArray(v) + } + + results = append(results, DefaultRetentionRule{ + LifeCycle: lifeCycle, + }) + } + } + } + return results +} + +func flattenBackupPolicyKubernetesClusterBackupCriteriaArray(input *[]backuppolicies.BackupCriteria) []Criteria { + results := make([]Criteria, 0) + if input == nil { + return results + } + + for _, item := range *input { + if criteria, ok := item.(backuppolicies.ScheduleBasedBackupCriteria); ok { + var absoluteCriteria string + if criteria.AbsoluteCriteria != nil && len(*criteria.AbsoluteCriteria) > 0 { + absoluteCriteria = string((*criteria.AbsoluteCriteria)[0]) + } + var daysOfWeek []string + if criteria.DaysOfTheWeek != nil { + daysOfWeek = make([]string, 0) + for _, item := range *criteria.DaysOfTheWeek { + daysOfWeek = append(daysOfWeek, (string)(item)) + } + } + var monthsOfYear []string + if criteria.MonthsOfYear != nil { + monthsOfYear = make([]string, 0) + for _, item := range *criteria.MonthsOfYear { + monthsOfYear = append(monthsOfYear, (string)(item)) + } + } + var weeksOfMonth []string + if criteria.WeeksOfTheMonth != nil { + weeksOfMonth = make([]string, 0) + for _, item := range *criteria.WeeksOfTheMonth { + weeksOfMonth = append(weeksOfMonth, (string)(item)) + } + } + var scheduleTimes []string + if criteria.ScheduleTimes != nil { + scheduleTimes = make([]string, 0) + scheduleTimes = append(scheduleTimes, *criteria.ScheduleTimes...) + } + + results = append(results, Criteria{ + AbsoluteCriteria: absoluteCriteria, + DaysOfWeek: daysOfWeek, + MonthsOfYear: monthsOfYear, + WeeksOfMonth: weeksOfMonth, + ScheduledBackupTimes: scheduleTimes, + }) + } + } + return results +} + +func flattenBackupPolicyKubernetesClusterBackupLifeCycleArray(input []backuppolicies.SourceLifeCycle) []LifeCycle { + results := make([]LifeCycle, 0) + if input == nil { + return results + } + + for _, item := range input { + var targetDataStoreCopySetting []TargetCopySetting + var duration string + var dataStoreType string + if v := item.TargetDataStoreCopySettings; v != nil && len(*v) > 0 { + targetDataStoreCopySetting = flattenBackupPolicyKubernetesClusterBackupTargetDataStoreCopySettingArray(v) + } + if deleteOption, ok := item.DeleteAfter.(backuppolicies.AbsoluteDeleteOption); ok { + duration = deleteOption.Duration + } + dataStoreType = string(item.SourceDataStore.DataStoreType) + + results = append(results, LifeCycle{ + Duration: duration, + TargetCopySetting: targetDataStoreCopySetting, + DataStoreType: dataStoreType, + }) + } + return results +} + +func flattenBackupPolicyKubernetesClusterBackupTargetDataStoreCopySettingArray(input *[]backuppolicies.TargetCopySetting) []TargetCopySetting { + results := make([]TargetCopySetting, 0) + if input == nil || len(*input) == 0 { + return results + } + + for _, item := range *input { + copyAfter, err := flattenTargetCopySettingFromJSON(item.CopyAfter) + if err != nil { + return nil + } + dataStoreType := string(item.DataStore.DataStoreType) + + results = append(results, TargetCopySetting{ + CopyAfter: copyAfter, + DataStoreType: dataStoreType, + }) + } + return results +} diff --git a/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource_test.go b/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource_test.go new file mode 100644 index 000000000000..54cf7fd9811d --- /dev/null +++ b/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource_test.go @@ -0,0 +1,250 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dataprotection_test + +import ( + "context" + "fmt" + "testing" + + "github.com/hashicorp/go-azure-helpers/lang/response" + "github.com/hashicorp/go-azure-sdk/resource-manager/dataprotection/2023-05-01/backuppolicies" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance" + "github.com/hashicorp/terraform-provider-azurerm/internal/acceptance/check" + "github.com/hashicorp/terraform-provider-azurerm/internal/clients" + "github.com/hashicorp/terraform-provider-azurerm/internal/tf/pluginsdk" + "github.com/hashicorp/terraform-provider-azurerm/utils" +) + +type DataProtectionBackupPolicyKubernatesClusterTestResource struct{} + +func TestAccDataProtectionBackupPolicyKubernatesCluster_basic(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_policy_kubernetes_cluster", "test") + r := DataProtectionBackupPolicyKubernatesClusterTestResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataProtectionBackupPolicyKubernatesCluster_requiresImport(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_policy_kubernetes_cluster", "test") + r := DataProtectionBackupPolicyKubernatesClusterTestResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.RequiresImportErrorStep(r.requiresImport), + }) +} + +func TestAccDataProtectionBackupPolicyKubernatesCluster_complete(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_policy_kubernetes_cluster", "test") + r := DataProtectionBackupPolicyKubernatesClusterTestResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func TestAccDataProtectionBackupPolicyKubernatesCluster_update(t *testing.T) { + data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_policy_kubernetes_cluster", "test") + r := DataProtectionBackupPolicyKubernatesClusterTestResource{} + data.ResourceTest(t, r, []acceptance.TestStep{ + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.complete(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + { + Config: r.basic(data), + Check: acceptance.ComposeTestCheckFunc( + check.That(data.ResourceName).ExistsInAzure(r), + ), + }, + data.ImportStep(), + }) +} + +func (r DataProtectionBackupPolicyKubernatesClusterTestResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { + id, err := backuppolicies.ParseBackupPolicyID(state.ID) + if err != nil { + return nil, err + } + resp, err := client.DataProtection.BackupPolicyClient.Get(ctx, *id) + if err != nil { + if response.WasNotFound(resp.HttpResponse) { + return utils.Bool(false), nil + } + return nil, fmt.Errorf("retrieving DataProtection BackupPolicy (%q): %+v", id, err) + } + return utils.Bool(true), nil +} + +func (r DataProtectionBackupPolicyKubernatesClusterTestResource) template(data acceptance.TestData) string { + return fmt.Sprintf(` +provider "azurerm" { + features {} +} + +resource "azurerm_resource_group" "test" { + name = "acctest-dataprotection-%d" + location = "%s" +} + +resource "azurerm_data_protection_backup_vault" "test" { + name = "acctest-dbv-%d" + resource_group_name = azurerm_resource_group.test.name + location = azurerm_resource_group.test.location + datastore_type = "VaultStore" + redundancy = "LocallyRedundant" +} +`, data.RandomInteger, data.Locations.Primary, data.RandomInteger) +} + +func (r DataProtectionBackupPolicyKubernatesClusterTestResource) basic(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_data_protection_backup_policy_kubernetes_cluster" "test" { + name = "acctest-aks-%d" + resource_group_name = azurerm_resource_group.test.name + vault_name = azurerm_data_protection_backup_vault.test.name + + backup_repeating_time_intervals = ["R/2021-05-23T02:30:00+00:00/P1W"] + + retention_rule { + name = "Daily" + priority = 25 + + life_cycle { + duration = "P84D" + data_store_type = "OperationalStore" + } + + criteria { + days_of_week = ["Thursday"] + months_of_year = ["November"] + weeks_of_month = ["First"] + scheduled_backup_times = ["2021-05-23T02:30:00Z"] + } + } + + default_retention_rule { + life_cycle { + duration = "P7D" + data_store_type = "OperationalStore" + } + } +} +`, template, data.RandomInteger) +} + +func (r DataProtectionBackupPolicyKubernatesClusterTestResource) requiresImport(data acceptance.TestData) string { + config := r.basic(data) + return fmt.Sprintf(` +%s + +resource "azurerm_data_protection_backup_policy_kubernetes_cluster" "import" { + name = azurerm_data_protection_backup_policy_kubernetes_cluster.test.name + resource_group_name = azurerm_data_protection_backup_policy_kubernetes_cluster.test.resource_group_name + vault_name = azurerm_data_protection_backup_policy_kubernetes_cluster.test.vault_name + + backup_repeating_time_intervals = ["R/2021-05-23T02:30:00+00:00/P1W"] + + retention_rule { + name = "Daily" + priority = 25 + + life_cycle { + duration = "P84D" + data_store_type = "OperationalStore" + } + + criteria { + days_of_week = ["Thursday"] + months_of_year = ["November"] + scheduled_backup_times = ["2021-05-23T02:30:00Z"] + } + } + + default_retention_rule { + life_cycle { + duration = "P7D" + data_store_type = "OperationalStore" + } + } +} +`, config) +} + +func (r DataProtectionBackupPolicyKubernatesClusterTestResource) complete(data acceptance.TestData) string { + template := r.template(data) + return fmt.Sprintf(` +%s + +resource "azurerm_data_protection_backup_policy_kubernetes_cluster" "test" { + name = "acctest-aks-%d" + resource_group_name = azurerm_resource_group.test.name + vault_name = azurerm_data_protection_backup_vault.test.name + backup_repeating_time_intervals = ["R/2021-05-23T02:30:00+00:00/P1W"] + time_zone = "India Standard Time" + + retention_rule { + name = "Daily" + priority = 25 + + life_cycle { + duration = "P7D" + data_store_type = "OperationalStore" + target_copy_setting { + copy_after = jsonencode({ + objectType = "ImmediateCopyOption" + }) + data_store_type = "OperationalStore" + } + } + + life_cycle { + duration = "P84D" + data_store_type = "OperationalStore" + } + + criteria { + absolute_criteria = "FirstOfDay" + } + } + + default_retention_rule { + life_cycle { + duration = "P7D" + data_store_type = "OperationalStore" + } + } +} +`, template, data.RandomInteger) +} diff --git a/internal/services/dataprotection/registration.go b/internal/services/dataprotection/registration.go index e58d277d9e70..a6ab55f74a20 100644 --- a/internal/services/dataprotection/registration.go +++ b/internal/services/dataprotection/registration.go @@ -10,6 +10,7 @@ import ( type Registration struct{} +var _ sdk.TypedServiceRegistration = Registration{} var _ sdk.UntypedServiceRegistrationWithAGitHubLabel = Registration{} func (r Registration) AssociatedGitHubLabel() string { @@ -48,3 +49,15 @@ func (r Registration) SupportedResources() map[string]*pluginsdk.Resource { "azurerm_data_protection_resource_guard": resourceDataProtectionResourceGuard(), } } + +// DataSources returns a list of Data Sources supported by this Service +func (r Registration) DataSources() []sdk.DataSource { + return []sdk.DataSource{} +} + +// Resources returns a list of Resources supported by this Service +func (r Registration) Resources() []sdk.Resource { + return []sdk.Resource{ + DataProtectionBackupPolicyKubernatesClusterResource{}, + } +} diff --git a/website/docs/r/data_protection_backup_policy_kubernetes_cluster.html.markdown b/website/docs/r/data_protection_backup_policy_kubernetes_cluster.html.markdown new file mode 100644 index 000000000000..ecde245f0ec8 --- /dev/null +++ b/website/docs/r/data_protection_backup_policy_kubernetes_cluster.html.markdown @@ -0,0 +1,149 @@ +--- +subcategory: "DataProtection" +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_data_protection_backup_policy_kubernetes_cluster" +description: |- + Manages a Backup Policy to back up Kubernetes Cluster. +--- + +# azurerm_data_protection_backup_policy_kubernetes_cluster + +Manages a Backup Policy to back up Kubernetes Cluster. + +## Example Usage + +```hcl +resource "azurerm_resource_group" "example" { + name = "example-resources" + location = "West Europe" +} + +resource "azurerm_data_protection_backup_vault" "example" { + name = "example-backup-vault" + resource_group_name = azurerm_resource_group.example.name + location = azurerm_resource_group.example.location + datastore_type = "VaultStore" + redundancy = "LocallyRedundant" +} + +resource "azurerm_data_protection_backup_policy_kubernetes_cluster" "example" { + name = "example-backup-policy" + resource_group_name = azurerm_resource_group.example.name + vault_name = azurerm_data_protection_backup_vault.example.name + + backup_repeating_time_intervals = ["R/2021-05-23T02:30:00+00:00/P1W"] + time_zone = "India Standard Time" + default_retention_duration = "P4M" + + retention_rule { + name = "Daily" + priority = 25 + + life_cycle { + duration = "P84D" + data_store_type = "OperationalStore" + } + + criteria { + absolute_criteria = "FirstOfDay" + } + } + + default_retention_rule { + life_cycle { + duration = "P7D" + data_store_type = "OperationalStore" + } + } +} +``` + +## Arguments Reference + +The following arguments are supported: + +* `name` - (Required) The name which should be used for the Backup Policy Kubernetes Cluster. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the Resource Group where the Backup Policy Kubernetes Cluster should exist. Changing this forces a new resource to be created. + +* `vault_name` - (Required) The name of the Backup Vault where the Backup Policy Kubernetes Cluster should exist. Changing this forces a new resource to be created. + +* `backup_repeating_time_intervals` - (Required) Specifies a list of repeating time interval. It supports weekly back. It should follow `ISO 8601` repeating time interval. Changing this forces a new resource to be created. + +* `default_retention_rule` - (Required) A `default_retention_rule` block as defined below. Changing this forces a new resource to be created. + +* `retention_rule` - (Optional) One or more `retention_rule` blocks as defined below. Changing this forces a new resource to be created. + +* `time_zone` - (Optional) Specifies the Time Zone which should be used by the backup schedule. Changing this forces a new resource to be created. + +--- + +A `default_retention_rule` block supports the following: + +* `life_cycle` - (Required) A `life_cycle` block as defined below. Changing this forces a new resource to be created. + +--- + +A `retention_rule` block supports the following: + +* `name` - (Required) The name which should be used for this retention rule. Changing this forces a new resource to be created. + +* `criteria` - (Required) A `criteria` block as defined below. Changing this forces a new resource to be created. + +* `life_cycle` - (Required) A `life_cycle` block as defined below. Changing this forces a new resource to be created. + +* `priority` - (Required) Specifies the priority of the rule. The priority number must be unique for each rule. The lower the priority number, the higher the priority of the rule. Changing this forces a new resource to be created. + +--- + +A `criteria` block supports the following: + +* `absolute_criteria` - (Optional) Possible values are `AllBackup`, `FirstOfDay`, `FirstOfWeek`, `FirstOfMonth` and `FirstOfYear`. These values mean the first successful backup of the day/week/month/year. Changing this forces a new resource to be created. + +* `days_of_week` - (Optional) Possible values are `Monday`, `Tuesday`, `Thursday`, `Friday`, `Saturday` and `Sunday`. Changing this forces a new resource to be created. + +* `months_of_year` - (Optional) Possible values are `January`, `February`, `March`, `April`, `May`, `June`, `July`, `August`, `September`, `October`, `November` and `December`. Changing this forces a new resource to be created. + +* `scheduled_backup_times` - (Optional) Specifies a list of backup times for backup in the `RFC3339` format. Changing this forces a new resource to be created. + +* `weeks_of_month` - (Optional) Possible values are `First`, `Second`, `Third`, `Fourth` and `Last`. Changing this forces a new resource to be created. + +--- + +A `life_cycle` block supports the following: + +* `data_store_type` - (Required) The type of data store. Possible values is `OperationalStore`. + +* `duration` - (Required) The retention duration up to which the backups are to be retained in the data stores. It should follow `ISO 8601` duration format. Changing this forces a new resource to be created. + +* `target_copy_setting` - (Optional) A `target_copy_setting` block as defined below. Changing this forces a new resource to be created. + +--- + +A `target_copy_setting` block supports the following: + +* `copy_after` - (Required) Specifies when the backups are tiered across two or more selected data stores as a json encoded string. Changing this forces a new resource to be created. + +* `data_store_type` - (Required) The target copy data store type. Possible values is `OperationalStore`. + +## Attributes Reference + +In addition to the Arguments listed above - the following Attributes are exported: + +* `id` - The ID of the Backup Policy Kubernetes Cluster. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/language/resources/syntax#operation-timeouts) for certain actions: + +* `create` - (Defaults to 30 minutes) Used when creating the Backup Policy Kubernetes Cluster. +* `read` - (Defaults to 5 minutes) Used when retrieving the Backup Policy Kubernetes Cluster. +* `delete` - (Defaults to 30 minutes) Used when deleting the Backup Policy Kubernetes Cluster. + +## Import + +Backup Policy Kubernetes Cluster's can be imported using the `resource id`, e.g. + +```shell +terraform import azurerm_data_protection_backup_policy_kubernetes_cluster.example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.DataProtection/backupVaults/vault1/backupPolicies/backupPolicy1 +``` diff --git a/website/docs/r/data_protection_backup_policy_postgresql.html.markdown b/website/docs/r/data_protection_backup_policy_postgresql.html.markdown index f5bdbac99422..3407d655344e 100644 --- a/website/docs/r/data_protection_backup_policy_postgresql.html.markdown +++ b/website/docs/r/data_protection_backup_policy_postgresql.html.markdown @@ -83,7 +83,7 @@ The following arguments are supported: * `retention_rule` - (Optional) One or more `retention_rule` blocks as defined below. Changing this forces a new Backup Policy PostgreSQL to be created. -* `time_zone` - (Optional) Specifies the Time Zone which should be used by the backup schedule. Changing this forces a new Backup Policy Disk to be created. +* `time_zone` - (Optional) Specifies the Time Zone which should be used by the backup schedule. Changing this forces a new Backup Policy PostgreSQL to be created. --- From 7a7760377033a9bc7e46df1a623cccc8739335b8 Mon Sep 17 00:00:00 2001 From: "Elena Xin (Centific Technologies Inc)" Date: Wed, 31 Jan 2024 15:04:02 +0800 Subject: [PATCH 2/5] update code --- ...data_protection_backup_policy_kubernetes_cluster_resource.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go b/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go index 7872afa5fe7d..9dd6b6ea761c 100644 --- a/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go +++ b/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go @@ -421,7 +421,7 @@ func (r DataProtectionBackupPolicyKubernatesClusterResource) Read() sdk.Resource state.DefaultRetentionRule = flattenBackupPolicyKubernetesClusterDefaultRetentionRule(&properties.PolicyRules) state.RetentionRule = flattenBackupPolicyKubernetesClusterRetentionRules(&properties.PolicyRules) state.BackupRepeatingTimeIntervals = flattenBackupPolicyKubernetesClusterBackupRuleArray(&properties.PolicyRules) - state.TimeZone = flattenBackupPolicyDiskBackupTimeZone(&properties.PolicyRules) + state.TimeZone = flattenBackupPolicyKubernetesClusterBackupTimeZone(&properties.PolicyRules) } return metadata.Encode(&state) From 6cfcae3e3ae68df916f2d68f30ddb07915a4af92 Mon Sep 17 00:00:00 2001 From: "Elena Xin (Centific Technologies Inc)" Date: Thu, 1 Feb 2024 14:50:44 +0800 Subject: [PATCH 3/5] update code to fix internal review feedback --- ...ckup_policy_kubernetes_cluster_resource.go | 170 ++---------------- ...policy_kubernetes_cluster_resource_test.go | 34 ---- ...up_policy_kubernetes_cluster.html.markdown | 12 +- 3 files changed, 17 insertions(+), 199 deletions(-) diff --git a/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go b/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go index 9dd6b6ea761c..1f8a6930332f 100644 --- a/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go +++ b/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go @@ -5,7 +5,6 @@ package dataprotection import ( "context" - "encoding/json" "fmt" "regexp" "strings" @@ -44,14 +43,8 @@ type RetentionRule struct { } type LifeCycle struct { - DataStoreType string `tfschema:"data_store_type"` - Duration string `tfschema:"duration"` - TargetCopySetting []TargetCopySetting `tfschema:"target_copy_setting"` -} - -type TargetCopySetting struct { - CopyAfter string `tfschema:"copy_after"` DataStoreType string `tfschema:"data_store_type"` + Duration string `tfschema:"duration"` } type Criteria struct { @@ -138,33 +131,6 @@ func (r DataProtectionBackupPolicyKubernatesClusterResource) Arguments() map[str ForceNew: true, ValidateFunc: validate.ISO8601Duration, }, - - "target_copy_setting": { - Type: pluginsdk.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Elem: &pluginsdk.Resource{ - Schema: map[string]*pluginsdk.Schema{ - "copy_after": { - Type: pluginsdk.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringIsJSON, - }, - "data_store_type": { - Type: pluginsdk.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - // confirmed with the service team that currently only `OperationalStore` is supported. - // However, since `VaultStore` is in public preview and will be supported in the future, it is open to user specification. - string(backuppolicies.DataStoreTypesOperationalStore), - }, false), - }, - }, - }, - }, }, }, }, @@ -195,13 +161,8 @@ func (r DataProtectionBackupPolicyKubernatesClusterResource) Arguments() map[str Type: pluginsdk.TypeString, Optional: true, ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - string(backuppolicies.AbsoluteMarkerAllBackup), - string(backuppolicies.AbsoluteMarkerFirstOfDay), - string(backuppolicies.AbsoluteMarkerFirstOfMonth), - string(backuppolicies.AbsoluteMarkerFirstOfWeek), - string(backuppolicies.AbsoluteMarkerFirstOfYear), - }, false), + ValidateFunc: validation.StringInSlice( + backuppolicies.PossibleValuesForAbsoluteMarker(), false), }, "days_of_week": { @@ -243,14 +204,8 @@ func (r DataProtectionBackupPolicyKubernatesClusterResource) Arguments() map[str ForceNew: true, MinItems: 1, Elem: &pluginsdk.Schema{ - Type: pluginsdk.TypeString, - ValidateFunc: validation.StringInSlice([]string{ - string(backuppolicies.WeekNumberFirst), - string(backuppolicies.WeekNumberSecond), - string(backuppolicies.WeekNumberThird), - string(backuppolicies.WeekNumberFourth), - string(backuppolicies.WeekNumberLast), - }, false), + Type: pluginsdk.TypeString, + ValidateFunc: validation.StringInSlice(backuppolicies.PossibleValuesForWeekNumber(), false), }, }, }, @@ -280,33 +235,6 @@ func (r DataProtectionBackupPolicyKubernatesClusterResource) Arguments() map[str ForceNew: true, ValidateFunc: validate.ISO8601Duration, }, - - "target_copy_setting": { - Type: pluginsdk.TypeList, - Optional: true, - ForceNew: true, - MaxItems: 1, - Elem: &pluginsdk.Resource{ - Schema: map[string]*pluginsdk.Schema{ - "copy_after": { - Type: pluginsdk.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringIsJSON, - }, - "data_store_type": { - Type: pluginsdk.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{ - /// confirmed with the service team that currently only `OperationalStore` is supported. - // However, since `VaultStore` is in public preview and will be supported in the future, it is open to user specification. - string(backuppolicies.DataStoreTypesOperationalStore), - }, false), - }, - }, - }, - }, }, }, }, @@ -400,28 +328,25 @@ func (r DataProtectionBackupPolicyKubernatesClusterResource) Read() sdk.Resource resp, err := client.Get(ctx, *id) if err != nil { if response.WasNotFound(resp.HttpResponse) { - return metadata.MarkAsGone(id) + return metadata.MarkAsGone(*id) } return fmt.Errorf("retrieving %s: %+v", *id, err) } - model := resp.Model - if model == nil { - return fmt.Errorf("retrieving %s: model was nil", id) - } - state := BackupPolicyKubernatesClusterModel{ Name: id.BackupPolicyName, ResourceGroupName: id.ResourceGroupName, VaultName: id.BackupVaultName, } - if properties, ok := model.Properties.(backuppolicies.BackupPolicy); ok { - state.DefaultRetentionRule = flattenBackupPolicyKubernetesClusterDefaultRetentionRule(&properties.PolicyRules) - state.RetentionRule = flattenBackupPolicyKubernetesClusterRetentionRules(&properties.PolicyRules) - state.BackupRepeatingTimeIntervals = flattenBackupPolicyKubernetesClusterBackupRuleArray(&properties.PolicyRules) - state.TimeZone = flattenBackupPolicyKubernetesClusterBackupTimeZone(&properties.PolicyRules) + if model := resp.Model; model != nil { + if properties, ok := model.Properties.(backuppolicies.BackupPolicy); ok { + state.DefaultRetentionRule = flattenBackupPolicyKubernetesClusterDefaultRetentionRule(&properties.PolicyRules) + state.RetentionRule = flattenBackupPolicyKubernetesClusterRetentionRules(&properties.PolicyRules) + state.BackupRepeatingTimeIntervals = flattenBackupPolicyKubernetesClusterBackupRuleArray(&properties.PolicyRules) + state.TimeZone = flattenBackupPolicyKubernetesClusterBackupTimeZone(&properties.PolicyRules) + } } return metadata.Encode(&state) @@ -441,7 +366,7 @@ func (r DataProtectionBackupPolicyKubernatesClusterResource) Delete() sdk.Resour } if _, err := client.Delete(ctx, *id); err != nil { - return fmt.Errorf("deleting %s: %+v", id, err) + return fmt.Errorf("deleting %s: %+v", *id, err) } return nil @@ -500,23 +425,6 @@ func expandBackupPolicyKubernetesClusterAzureRetentionRules(input []RetentionRul func expandBackupPolicyKubernetesClusterLifeCycle(input []LifeCycle) []backuppolicies.SourceLifeCycle { results := make([]backuppolicies.SourceLifeCycle, 0) for _, item := range input { - targetCopySettingList := make([]backuppolicies.TargetCopySetting, 0) - if tcs := item.TargetCopySetting; len(tcs) > 0 { - copyAfter, err := expandTargetCopySettingFromJSON(tcs[0].CopyAfter) - if err != nil { - return results - } - - targetCopySetting := backuppolicies.TargetCopySetting{ - CopyAfter: copyAfter, - DataStore: backuppolicies.DataStoreInfoBase{ - DataStoreType: backuppolicies.DataStoreTypes(tcs[0].DataStoreType), - ObjectType: "DataStoreInfoBase", - }, - } - targetCopySettingList = append(targetCopySettingList, targetCopySetting) - } - sourceLifeCycle := backuppolicies.SourceLifeCycle{ DeleteAfter: backuppolicies.AbsoluteDeleteOption{ Duration: item.Duration, @@ -525,7 +433,7 @@ func expandBackupPolicyKubernetesClusterLifeCycle(input []LifeCycle) []backuppol DataStoreType: backuppolicies.DataStoreTypes(item.DataStoreType), ObjectType: "DataStoreInfoBase", }, - TargetDataStoreCopySettings: pointer.To(targetCopySettingList), + TargetDataStoreCopySettings: &[]backuppolicies.TargetCopySetting{}, } results = append(results, sourceLifeCycle) } @@ -533,26 +441,6 @@ func expandBackupPolicyKubernetesClusterLifeCycle(input []LifeCycle) []backuppol return results } -func expandTargetCopySettingFromJSON(input string) (backuppolicies.CopyOption, error) { - if input == "" { - return nil, nil - } - targetCopySetting := &backuppolicies.TargetCopySetting{} - err := targetCopySetting.UnmarshalJSON([]byte(fmt.Sprintf(`{ "copyAfter": %s }`, input))) - if err != nil { - return nil, err - } - return targetCopySetting.CopyAfter, nil -} - -func flattenTargetCopySettingFromJSON(input backuppolicies.CopyOption) (string, error) { - if input == nil { - return "", nil - } - result, err := json.Marshal(input) - return string(result), err -} - func expandBackupPolicyKubernetesClusterTaggingCriteriaArray(input []RetentionRule) (*[]backuppolicies.TaggingCriteria, error) { results := []backuppolicies.TaggingCriteria{ { @@ -794,41 +682,15 @@ func flattenBackupPolicyKubernetesClusterBackupLifeCycleArray(input []backuppoli } for _, item := range input { - var targetDataStoreCopySetting []TargetCopySetting var duration string var dataStoreType string - if v := item.TargetDataStoreCopySettings; v != nil && len(*v) > 0 { - targetDataStoreCopySetting = flattenBackupPolicyKubernetesClusterBackupTargetDataStoreCopySettingArray(v) - } if deleteOption, ok := item.DeleteAfter.(backuppolicies.AbsoluteDeleteOption); ok { duration = deleteOption.Duration } dataStoreType = string(item.SourceDataStore.DataStoreType) results = append(results, LifeCycle{ - Duration: duration, - TargetCopySetting: targetDataStoreCopySetting, - DataStoreType: dataStoreType, - }) - } - return results -} - -func flattenBackupPolicyKubernetesClusterBackupTargetDataStoreCopySettingArray(input *[]backuppolicies.TargetCopySetting) []TargetCopySetting { - results := make([]TargetCopySetting, 0) - if input == nil || len(*input) == 0 { - return results - } - - for _, item := range *input { - copyAfter, err := flattenTargetCopySettingFromJSON(item.CopyAfter) - if err != nil { - return nil - } - dataStoreType := string(item.DataStore.DataStoreType) - - results = append(results, TargetCopySetting{ - CopyAfter: copyAfter, + Duration: duration, DataStoreType: dataStoreType, }) } diff --git a/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource_test.go b/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource_test.go index 54cf7fd9811d..9ada9a330186 100644 --- a/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource_test.go +++ b/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource_test.go @@ -61,34 +61,6 @@ func TestAccDataProtectionBackupPolicyKubernatesCluster_complete(t *testing.T) { }) } -func TestAccDataProtectionBackupPolicyKubernatesCluster_update(t *testing.T) { - data := acceptance.BuildTestData(t, "azurerm_data_protection_backup_policy_kubernetes_cluster", "test") - r := DataProtectionBackupPolicyKubernatesClusterTestResource{} - data.ResourceTest(t, r, []acceptance.TestStep{ - { - Config: r.basic(data), - Check: acceptance.ComposeTestCheckFunc( - check.That(data.ResourceName).ExistsInAzure(r), - ), - }, - data.ImportStep(), - { - Config: r.complete(data), - Check: acceptance.ComposeTestCheckFunc( - check.That(data.ResourceName).ExistsInAzure(r), - ), - }, - data.ImportStep(), - { - Config: r.basic(data), - Check: acceptance.ComposeTestCheckFunc( - check.That(data.ResourceName).ExistsInAzure(r), - ), - }, - data.ImportStep(), - }) -} - func (r DataProtectionBackupPolicyKubernatesClusterTestResource) Exists(ctx context.Context, client *clients.Client, state *pluginsdk.InstanceState) (*bool, error) { id, err := backuppolicies.ParseBackupPolicyID(state.ID) if err != nil { @@ -221,12 +193,6 @@ resource "azurerm_data_protection_backup_policy_kubernetes_cluster" "test" { life_cycle { duration = "P7D" data_store_type = "OperationalStore" - target_copy_setting { - copy_after = jsonencode({ - objectType = "ImmediateCopyOption" - }) - data_store_type = "OperationalStore" - } } life_cycle { diff --git a/website/docs/r/data_protection_backup_policy_kubernetes_cluster.html.markdown b/website/docs/r/data_protection_backup_policy_kubernetes_cluster.html.markdown index ecde245f0ec8..637d57f8de54 100644 --- a/website/docs/r/data_protection_backup_policy_kubernetes_cluster.html.markdown +++ b/website/docs/r/data_protection_backup_policy_kubernetes_cluster.html.markdown @@ -112,20 +112,10 @@ A `criteria` block supports the following: A `life_cycle` block supports the following: -* `data_store_type` - (Required) The type of data store. Possible values is `OperationalStore`. +* `data_store_type` - (Required) The type of data store. The only possible value is `OperationalStore`. * `duration` - (Required) The retention duration up to which the backups are to be retained in the data stores. It should follow `ISO 8601` duration format. Changing this forces a new resource to be created. -* `target_copy_setting` - (Optional) A `target_copy_setting` block as defined below. Changing this forces a new resource to be created. - ---- - -A `target_copy_setting` block supports the following: - -* `copy_after` - (Required) Specifies when the backups are tiered across two or more selected data stores as a json encoded string. Changing this forces a new resource to be created. - -* `data_store_type` - (Required) The target copy data store type. Possible values is `OperationalStore`. - ## Attributes Reference In addition to the Arguments listed above - the following Attributes are exported: From 17b42dbae8bc3279d53117b06c3415f211ca4033 Mon Sep 17 00:00:00 2001 From: "Elena Xin (Centific Technologies Inc)" Date: Thu, 1 Feb 2024 15:51:47 +0800 Subject: [PATCH 4/5] update code --- ...ackup_policy_kubernetes_cluster_resource.go | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go b/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go index 1f8a6930332f..8dac4e1a9cf5 100644 --- a/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go +++ b/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go @@ -399,12 +399,15 @@ func expandBackupPolicyKubernetesClusterAzureBackupRuleArray(input []string, tim func expandBackupPolicyKubernetesClusterDefaultRetentionRule(input []DefaultRetentionRule) backuppolicies.BasePolicyRule { results := backuppolicies.AzureRetentionRule{} - for _, item := range input { - lifeCycle := expandBackupPolicyKubernetesClusterLifeCycle(item.LifeCycle) - results.Name = "Default" - results.IsDefault = pointer.To(true) - results.Lifecycles = lifeCycle + if len(input) == 0 { + return results } + + lifeCycle := expandBackupPolicyKubernetesClusterLifeCycle(input[0].LifeCycle) + results.Name = "Default" + results.IsDefault = pointer.To(true) + results.Lifecycles = lifeCycle + return results } @@ -564,9 +567,7 @@ func flattenBackupPolicyKubernetesClusterRetentionRules(input *[]backuppolicies. for _, item := range *input { if backupRule, ok := item.(backuppolicies.AzureBackupRule); ok { if trigger, ok := backupRule.Trigger.(backuppolicies.ScheduleBasedTriggerContext); ok { - if trigger.TaggingCriteria != nil { - taggingCriterias = trigger.TaggingCriteria - } + taggingCriterias = trigger.TaggingCriteria } } } @@ -582,6 +583,7 @@ func flattenBackupPolicyKubernetesClusterRetentionRules(input *[]backuppolicies. if strings.EqualFold(criteria.TagInfo.TagName, name) { taggingPriority = int(criteria.TaggingPriority) taggingCriteria = flattenBackupPolicyKubernetesClusterBackupCriteriaArray(criteria.Criteria) + break } } From 3c757cd11a1d82003e2c1100551d7b15b3d18f94 Mon Sep 17 00:00:00 2001 From: "Elena Xin (Centific Technologies Inc)" Date: Thu, 1 Feb 2024 18:17:05 +0800 Subject: [PATCH 5/5] update code --- ...ckup_policy_kubernetes_cluster_resource.go | 21 +++++++++---------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go b/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go index 8dac4e1a9cf5..3073a7392555 100644 --- a/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go +++ b/internal/services/dataprotection/azurerm_data_protection_backup_policy_kubernetes_cluster_resource.go @@ -293,7 +293,9 @@ func (r DataProtectionBackupPolicyKubernatesClusterResource) Create() sdk.Resour policyRules := make([]backuppolicies.BasePolicyRule, 0) policyRules = append(policyRules, expandBackupPolicyKubernetesClusterAzureBackupRuleArray(model.BackupRepeatingTimeIntervals, model.TimeZone, taggingCriteria)...) - policyRules = append(policyRules, expandBackupPolicyKubernetesClusterDefaultRetentionRule(model.DefaultRetentionRule)) + if v := expandBackupPolicyKubernetesClusterDefaultRetentionRule(model.DefaultRetentionRule); v != nil { + policyRules = append(policyRules, pointer.From(v)) + } policyRules = append(policyRules, expandBackupPolicyKubernetesClusterAzureRetentionRules(model.RetentionRule)...) parameters := backuppolicies.BaseBackupPolicyResource{ @@ -397,18 +399,15 @@ func expandBackupPolicyKubernetesClusterAzureBackupRuleArray(input []string, tim return results } -func expandBackupPolicyKubernetesClusterDefaultRetentionRule(input []DefaultRetentionRule) backuppolicies.BasePolicyRule { - results := backuppolicies.AzureRetentionRule{} +func expandBackupPolicyKubernetesClusterDefaultRetentionRule(input []DefaultRetentionRule) *backuppolicies.AzureRetentionRule { if len(input) == 0 { - return results + return nil + } + return &backuppolicies.AzureRetentionRule{ + Name: "Default", + IsDefault: pointer.To(true), + Lifecycles: expandBackupPolicyKubernetesClusterLifeCycle(input[0].LifeCycle), } - - lifeCycle := expandBackupPolicyKubernetesClusterLifeCycle(input[0].LifeCycle) - results.Name = "Default" - results.IsDefault = pointer.To(true) - results.Lifecycles = lifeCycle - - return results } func expandBackupPolicyKubernetesClusterAzureRetentionRules(input []RetentionRule) []backuppolicies.BasePolicyRule {