diff --git a/go.mod b/go.mod index 1af90fb0..03408d1b 100644 --- a/go.mod +++ b/go.mod @@ -19,6 +19,7 @@ require ( // github.com/nutanix-core/ntnx-api-golang-sdk-internal/vmm-go-client/v16 v16.9.0-9781 // github.com/nutanix-core/ntnx-api-golang-sdk-internal/volumes-go-client/v16 v16.9.0-9552 github.com/nutanix/ntnx-api-golang-clients/clustermgmt-go-client/v4 v4.0.1 + github.com/nutanix/ntnx-api-golang-clients/datapolicies-go-client/v4 v4.0.1 github.com/nutanix/ntnx-api-golang-clients/dataprotection-go-client/v4 v4.0.1 github.com/nutanix/ntnx-api-golang-clients/iam-go-client/v4 v4.0.1 github.com/nutanix/ntnx-api-golang-clients/microseg-go-client/v4 v4.0.1 diff --git a/go.sum b/go.sum index f33583c3..50db4e99 100644 --- a/go.sum +++ b/go.sum @@ -458,6 +458,8 @@ github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce h1:RPclfga2SEJmgMmz2k github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce/go.mod h1:uFMI8w+ref4v2r9jz+c9i1IfIttS/OkmLfrk1jne5hs= github.com/nutanix/ntnx-api-golang-clients/clustermgmt-go-client/v4 v4.0.1 h1:OmOuXNY2DSsR4GUwECV2N6YK5OywXjwEFQSZou6x2HQ= github.com/nutanix/ntnx-api-golang-clients/clustermgmt-go-client/v4 v4.0.1/go.mod h1:sd4Fnk6MVfEDVY+8WyRoQTmLhi2SgZ3riySWErVHf8E= +github.com/nutanix/ntnx-api-golang-clients/datapolicies-go-client/v4 v4.0.1 h1:hxPJFARcKD2G9WrPhfX8YlzSoL73/MtWdtIfEQopnw4= +github.com/nutanix/ntnx-api-golang-clients/datapolicies-go-client/v4 v4.0.1/go.mod h1:rucOCp3ocrHS9juBpJGQYPfftCiTlI4fXvy5dirKlz8= github.com/nutanix/ntnx-api-golang-clients/dataprotection-go-client/v4 v4.0.1 h1:IaTTfK9Uy7uUE70b3h8G55/p6LHTwt2lHU33subKyJQ= github.com/nutanix/ntnx-api-golang-clients/dataprotection-go-client/v4 v4.0.1/go.mod h1:S07bx9/6uUbMOY/OUJsaIUdvZ/LDaE46Kx9VX7Pt7Ek= github.com/nutanix/ntnx-api-golang-clients/iam-go-client/v4 v4.0.1 h1:zWbA2qtSJt0WsBcEhqqv6FQTSz8pIwBnHA5etaQg4qo= diff --git a/nutanix/config.go b/nutanix/config.go index 03e5eb49..9774d1b5 100644 --- a/nutanix/config.go +++ b/nutanix/config.go @@ -2,6 +2,7 @@ package nutanix import ( "fmt" + "github.com/terraform-providers/terraform-provider-nutanix/nutanix/sdks/v4/datapolicies" "github.com/terraform-providers/terraform-provider-nutanix/nutanix/client" era "github.com/terraform-providers/terraform-provider-nutanix/nutanix/sdks/v3/era" @@ -111,6 +112,10 @@ func (c *Config) Client() (*Client, error) { if err != nil { return nil, err } + dataPoliciesClient, err := datapolicies.NewDataPoliciesClient(configCreds) + if err != nil { + return nil, err + } return &Client{ WaitTimeout: c.WaitTimeout, API: v3Client, @@ -126,6 +131,7 @@ func (c *Config) Client() (*Client, error) { VolumeAPI: volumeClient, DataProtectionAPI: dataprotectionClient, VmmAPI: vmmClient, + DataPoliciesAPI: dataPoliciesClient, }, nil } @@ -145,4 +151,5 @@ type Client struct { VolumeAPI *volumes.Client DataProtectionAPI *dataprotection.Client VmmAPI *vmm.Client + DataPoliciesAPI *datapolicies.Client } diff --git a/nutanix/provider/provider.go b/nutanix/provider/provider.go index cf370544..6293d4d2 100644 --- a/nutanix/provider/provider.go +++ b/nutanix/provider/provider.go @@ -3,6 +3,7 @@ package provider import ( "context" "fmt" + "github.com/terraform-providers/terraform-provider-nutanix/nutanix/services/datapoliciesv2" "log" "strings" @@ -379,6 +380,7 @@ func Provider() *schema.Provider { "nutanix_recovery_points_v2": dataprotectionv2.ResourceNutanixRecoveryPointsV2(), "nutanix_recovery_point_replicate_v2": dataprotectionv2.ResourceNutanixRecoveryPointReplicateV2(), "nutanix_recovery_point_restore_v2": dataprotectionv2.ResourceNutanixRecoveryPointRestoreV2(), + "nutanix_protection_policy_v2": datapoliciesv2.ResourceNutanixProtectionPoliciesV2(), "nutanix_vm_revert_v2": vmmv2.ResourceNutanixRevertVMRecoveryPointV2(), "nutanix_virtual_machine_v2": vmmv2.ResourceNutanixVirtualMachineV2(), "nutanix_vm_shutdown_action_v2": vmmv2.ResourceNutanixVmsShutdownActionV2(), diff --git a/nutanix/sdks/v4/datapolicies/datapolicies.go b/nutanix/sdks/v4/datapolicies/datapolicies.go new file mode 100644 index 00000000..942512d0 --- /dev/null +++ b/nutanix/sdks/v4/datapolicies/datapolicies.go @@ -0,0 +1,35 @@ +package datapolicies + +import ( + "github.com/nutanix/ntnx-api-golang-clients/datapolicies-go-client/v4/api" + datapolicies "github.com/nutanix/ntnx-api-golang-clients/datapolicies-go-client/v4/client" + "github.com/terraform-providers/terraform-provider-nutanix/nutanix/client" +) + +type Client struct { + APIClientInstance *datapolicies.ApiClient + ProtectionPolicies *api.ProtectionPoliciesApi +} + +func NewDataPoliciesClient(credentials client.Credentials) (*Client, error) { + var baseClient *datapolicies.ApiClient + + // check if all required fields are present. Else create an empty client + if credentials.Username != "" && credentials.Password != "" && credentials.Endpoint != "" { + pcClient := datapolicies.NewApiClient() + + pcClient.Host = credentials.Endpoint + pcClient.Password = credentials.Password + pcClient.Username = credentials.Username + pcClient.Port = 9440 + pcClient.VerifySSL = false + + baseClient = pcClient + } + + f := &Client{ + ProtectionPolicies: api.NewProtectionPoliciesApi(baseClient), + } + + return f, nil +} diff --git a/nutanix/services/datapoliciesv2/helper.go b/nutanix/services/datapoliciesv2/helper.go new file mode 100644 index 00000000..91e208e7 --- /dev/null +++ b/nutanix/services/datapoliciesv2/helper.go @@ -0,0 +1,9 @@ +package datapoliciesv2 + +func expandListOfString(list []interface{}) []string { + stringListStr := make([]string, len(list)) + for i, v := range list { + stringListStr[i] = v.(string) + } + return stringListStr +} diff --git a/nutanix/services/datapoliciesv2/resource_nutanix_protection_policies_v2.go b/nutanix/services/datapoliciesv2/resource_nutanix_protection_policies_v2.go new file mode 100644 index 00000000..15cea9d7 --- /dev/null +++ b/nutanix/services/datapoliciesv2/resource_nutanix_protection_policies_v2.go @@ -0,0 +1,566 @@ +package datapoliciesv2 + +import ( + "context" + "encoding/json" + "fmt" + conns "github.com/terraform-providers/terraform-provider-nutanix/nutanix" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/nutanix/ntnx-api-golang-clients/datapolicies-go-client/v4/models/datapolicies/v4/config" + "github.com/nutanix/ntnx-api-golang-clients/datapolicies-go-client/v4/models/dataprotection/v4/common" + prism "github.com/nutanix/ntnx-api-golang-clients/datapolicies-go-client/v4/models/prism/v4/config" + import2 "github.com/nutanix/ntnx-api-golang-clients/prism-go-client/v4/models/prism/v4/config" + + prismSdk "github.com/terraform-providers/terraform-provider-nutanix/nutanix/sdks/v4/prism" + "github.com/terraform-providers/terraform-provider-nutanix/utils" +) + +func ResourceNutanixProtectionPoliciesV2() *schema.Resource { + return &schema.Resource{ + CreateContext: ResourceNutanixProtectionPoliciesV2Create, + ReadContext: ResourceNutanixProtectionPoliciesV2Read, + UpdateContext: ResourceNutanixProtectionPoliciesV2Update, + DeleteContext: ResourceNutanixProtectionPoliciesV2Delete, + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "replication_locations": { + Type: schema.TypeList, + Required: true, + MaxItems: 3, //nolint:gomnd + Elem: schemaReplicationLocations(), + }, + "replication_configurations": { + Type: schema.TypeList, + Required: true, + MaxItems: 9, //nolint:gomnd + Elem: schemaReplicationConfigurations(), + }, + "category_ids": { + Type: schema.TypeList, + Required: true, + MaxItems: 10, //nolint:gomnd + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func ResourceNutanixProtectionPoliciesV2Create(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + conn := meta.(*conns.Client).DataPoliciesAPI + + bodySpec := config.NewProtectionPolicy() + + if name, ok := d.GetOk("name"); ok { + bodySpec.Name = utils.StringPtr(name.(string)) + } + if description, ok := d.GetOk("description"); ok { + bodySpec.Description = utils.StringPtr(description.(string)) + } + if replicationLocations, ok := d.GetOk("replication_locations"); ok { + bodySpec.ReplicationLocations = expandReplicationLocations(replicationLocations.([]interface{})) + } + if replicationConfigurations, ok := d.GetOk("replication_configurations"); ok { + bodySpec.ReplicationConfigurations = expandReplicationConfigurations(replicationConfigurations.([]interface{})) + } + if categoryIds, ok := d.GetOk("category_ids"); ok { + bodySpec.CategoryIds = expandListOfString(categoryIds.([]interface{})) + } + + aJSON, _ := json.MarshalIndent(bodySpec, "", " ") + log.Printf("[DEBUG] Create Protection Policy Body Spec: %s", string(aJSON)) + //return nil + + resp, err := conn.ProtectionPolicies.CreateProtectionPolicy(bodySpec) + if err != nil { + return nil + } + + TaskRef := resp.Data.GetValue().(prism.TaskReference) + taskUUID := TaskRef.ExtId + + taskconn := meta.(*conns.Client).PrismAPI + // Wait for the cluster to be available + stateConf := &resource.StateChangeConf{ + Pending: []string{"QUEUED", "RUNNING", "PENDING"}, + Target: []string{"SUCCEEDED"}, + Refresh: taskStateRefreshPrismTaskGroupFunc(ctx, taskconn, utils.StringValue(taskUUID)), + Timeout: d.Timeout(schema.TimeoutCreate), + } + + if _, errWaitTask := stateConf.WaitForStateContext(ctx); errWaitTask != nil { + return diag.Errorf("error waiting for Protection Policy (%s) to create: %s", utils.StringValue(taskUUID), errWaitTask) + } + + // Get UUID from TASK API + + resourceUUID, err := taskconn.TaskRefAPI.GetTaskById(taskUUID, nil) + if err != nil { + return diag.Errorf("error while fetching cluster UUID : %v", err) + } + rUUID := resourceUUID.Data.GetValue().(import2.Task) + aJSON, _ := json.MarshalIndent(rUUID, "", " ") + log.Printf("[DEBUG] Create Protection Policy Task Response Details: %s", string(aJSON)) + + uuid := rUUID.EntitiesAffected[0].ExtId + d.SetId(*uuid) + d.Set("ext_id", *uuid) + + return ResourceNutanixProtectionPoliciesV2Read(ctx, d, meta) +} + +func ResourceNutanixProtectionPoliciesV2Read(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} + +func ResourceNutanixProtectionPoliciesV2Update(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return ResourceNutanixProtectionPoliciesV2Read(ctx, d, meta) +} + +func ResourceNutanixProtectionPoliciesV2Delete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + return nil +} + +// schemas funcs +func schemaReplicationLocations() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "label": { + Type: schema.TypeString, + Required: true, + }, + "domain_manager_ext_id": { + Type: schema.TypeString, + Required: true, + }, + "replication_sub_location": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, //nolint:gomnd + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_ext_ids": { + Type: schema.TypeList, + Required: true, + MinItems: 1, //nolint:gomnd + MaxItems: 200, //nolint:gomnd + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "is_primary": { + Type: schema.TypeBool, + Optional: true, + }, + }, + } +} + +func schemaReplicationConfigurations() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source_location_label": { + Type: schema.TypeString, + Required: true, + }, + "remote_location_label": { + Type: schema.TypeString, + Optional: true, + }, + "schedule": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, //nolint:gomnd + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "recovery_point_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"CRASH_CONSISTENT", "APPLICATION_CONSISTENT"}, false), + }, + "recovery_point_objective_time_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtLeast(0), + }, + "retention": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, //nolint:gomnd + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "linear_retention": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, //nolint:gomnd + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "local": { + Type: schema.TypeInt, + Required: true, + }, + "remote": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + "auto_rollup_retention": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, //nolint:gomnd + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "local": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, //nolint:gomnd + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "snapshot_interval_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"YEARLY", "WEEKLY", "DAILY", "MONTHLY", "HOURLY"}, false), + }, + "frequency": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 24), + }, + }, + }, + }, + "remote": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, //nolint:gomnd + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "snapshot_interval_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"YEARLY", "WEEKLY", "DAILY", "MONTHLY", "HOURLY"}, false), + }, + "frequency": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(1, 24), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "start_time": { + Type: schema.TypeString, + Optional: true, + }, + "sync_replication_auto_suspend_timeout_seconds": { + Type: schema.TypeInt, + Optional: true, + ValidateFunc: validation.IntAtMost(300), + }, + }, + }, + }, + }, + } +} + +// expander funcs +func expandReplicationLocations(replicationLocations []interface{}) []config.ReplicationLocation { + if len(replicationLocations) == 0 { + return nil + } + + replicationLocationsSpec := make([]config.ReplicationLocation, 0) + + for _, replicationLocation := range replicationLocations { + replicationLocationVal := replicationLocation.(map[string]interface{}) + + replicationLocationSpec := config.NewReplicationLocation() + if sourceLocationLabel, ok := replicationLocationVal["label"]; ok { + replicationLocationSpec.Label = utils.StringPtr(sourceLocationLabel.(string)) + } + if domainManagerExtId, ok := replicationLocationVal["domain_manager_ext_id"]; ok { + replicationLocationSpec.DomainManagerExtId = utils.StringPtr(domainManagerExtId.(string)) + } + if replicationSubLocation, ok := replicationLocationVal["replication_sub_location"]; ok { + replicationLocationSpec.ReplicationSubLocation = expandOneOfReplicationLocationReplicationSubLocation(replicationSubLocation.([]interface{})) + } + if isPrimary, ok := replicationLocationVal["is_primary"]; ok { + replicationLocationSpec.IsPrimary = utils.BoolPtr(isPrimary.(bool)) + } + replicationLocationsSpec = append(replicationLocationsSpec, *replicationLocationSpec) + } + + return replicationLocationsSpec +} + +func expandOneOfReplicationLocationReplicationSubLocation(oneOfReplicationLocationReplicationSubLocations []interface{}) *config.OneOfReplicationLocationReplicationSubLocation { + if len(oneOfReplicationLocationReplicationSubLocations) == 0 { + return nil + } + + oneOfReplicationLocationReplicationSubLocationI := oneOfReplicationLocationReplicationSubLocations[0] + oneOfReplicationLocationReplicationSubLocationVal := oneOfReplicationLocationReplicationSubLocationI.(map[string]interface{}) + + oneOfReplicationLocationReplicationSubLocationSpec := config.NewOneOfReplicationLocationReplicationSubLocation() + + nutanixCluster := config.NewNutanixCluster() + + if clusterExtIds, ok := oneOfReplicationLocationReplicationSubLocationVal["cluster_ext_ids"]; ok { + nutanixCluster.ClusterExtIds = expandListOfString(clusterExtIds.([]interface{})) + } + + err := oneOfReplicationLocationReplicationSubLocationSpec.SetValue(*nutanixCluster) + if err != nil { + log.Printf("[ERROR] Error while setting value for OneOfReplicationLocationReplicationSubLocation: %v", err) + return nil + } + + return oneOfReplicationLocationReplicationSubLocationSpec +} + +func expandReplicationConfigurations(replicationConfigurationsData []interface{}) []config.ReplicationConfiguration { + if len(replicationConfigurationsData) == 0 { + return nil + } + + replicationConfigurations := make([]config.ReplicationConfiguration, 0) + + for _, replicationConfigurationData := range replicationConfigurationsData { + replicationConfigurationDataMap := replicationConfigurationData.(map[string]interface{}) + + replicationConfiguration := config.ReplicationConfiguration{} + if sourceLocationLabel, ok := replicationConfigurationDataMap["source_location_label"]; ok { + replicationConfiguration.SourceLocationLabel = utils.StringPtr(sourceLocationLabel.(string)) + } + if remoteLocationLabel, ok := replicationConfigurationDataMap["remote_location_label"]; ok { + replicationConfiguration.RemoteLocationLabel = utils.StringPtr(remoteLocationLabel.(string)) + } + if schedule, ok := replicationConfigurationDataMap["schedule"]; ok { + replicationConfiguration.Schedule = expandSchedule(schedule.([]interface{})) + } + replicationConfigurations = append(replicationConfigurations, replicationConfiguration) + } + + return replicationConfigurations +} + +func expandSchedule(scheduleData []interface{}) *config.Schedule { + if len(scheduleData) == 0 { + return nil + } + + scheduleDataMap := scheduleData[0].(map[string]interface{}) + + schedule := config.NewSchedule() + if recoveryPointType, ok := scheduleDataMap["recovery_point_type"]; ok { + schedule.RecoveryPointType = expandRecoveryPointType(recoveryPointType.(string)) + } + if recoveryPointObjectiveTimeSeconds, ok := scheduleDataMap["recovery_point_objective_time_seconds"]; ok { + schedule.RecoveryPointObjectiveTimeSeconds = utils.IntPtr(recoveryPointObjectiveTimeSeconds.(int)) + } + if retention, ok := scheduleDataMap["retention"]; ok { + schedule.Retention = expandRetention(retention.([]interface{})) + } + if startTime, ok := scheduleDataMap["start_time"]; ok { + schedule.StartTime = utils.StringPtr(startTime.(string)) + } + if syncReplicationAutoSuspendTimeoutSeconds, ok := scheduleDataMap["sync_replication_auto_suspend_timeout_seconds"]; ok { + schedule.SyncReplicationAutoSuspendTimeoutSeconds = utils.IntPtr(syncReplicationAutoSuspendTimeoutSeconds.(int)) + } + + return schedule +} + +func expandRecoveryPointType(recoveryPointType string) *common.RecoveryPointType { + if recoveryPointType == "" { + return nil + } + + const CrashConsistent, ApplicationConsistent = 2, 3 + switch recoveryPointType { + case "CRASH_CONSISTENT": + p := common.RecoveryPointType(CrashConsistent) + return &p + case "APPLICATION_CONSISTENT": + p := common.RecoveryPointType(ApplicationConsistent) + return &p + } + return nil +} + +func expandRetention(retention []interface{}) *config.OneOfScheduleRetention { + if len(retention) == 0 { + return nil + } + + retentionData := retention[0].(map[string]interface{}) + + retentionSpec := config.NewOneOfScheduleRetention() + + if linearRetention, ok := retentionData["linear_retention"]; ok && len(linearRetention.([]interface{})) > 0 { + linearRetentionSpec := expandLinearRetention(linearRetention.([]interface{})) + err := retentionSpec.SetValue(*linearRetentionSpec) + if err != nil { + log.Printf("[ERROR] Error while setting value for LinearRetention: %v", err) + return nil + } + } else if autoRollupRetention, ok := retentionData["auto_rollup_retention"]; ok && len(autoRollupRetention.([]interface{})) > 0 { + autoRollupRetentionSpec := expandAutoRollupRetention(autoRollupRetention.([]interface{})) + err := retentionSpec.SetValue(*autoRollupRetentionSpec) + if err != nil { + log.Printf("[ERROR] Error while setting value for AutoRollupRetention: %v", err) + return nil + } + } + + return retentionSpec +} + +func expandLinearRetention(linearRetentionData []interface{}) *config.LinearRetention { + if len(linearRetentionData) == 0 { + return nil + } + + linearRetentionDataMap := linearRetentionData[0].(map[string]interface{}) + + linearRetention := config.NewLinearRetention() + if local, ok := linearRetentionDataMap["local"]; ok { + linearRetention.Local = utils.IntPtr(local.(int)) + } + if remote, ok := linearRetentionDataMap["remote"]; ok { + linearRetention.Remote = utils.IntPtr(remote.(int)) + } + + return linearRetention +} + +func expandAutoRollupRetention(autoRollupRetentionData []interface{}) *config.AutoRollupRetention { + if len(autoRollupRetentionData) == 0 { + return nil + } + + autoRollupRetentionDataMap := autoRollupRetentionData[0].(map[string]interface{}) + + autoRollupRetention := config.NewAutoRollupRetention() + if local, ok := autoRollupRetentionDataMap["local"]; ok { + autoRollupRetention.Local = expandAutoRollupRetentionDetails(local.([]interface{})) + } + if remote, ok := autoRollupRetentionDataMap["remote"]; ok { + autoRollupRetention.Remote = expandAutoRollupRetentionDetails(remote.([]interface{})) + } + + return autoRollupRetention +} + +func expandAutoRollupRetentionDetails(autoRollupRetentionLocal []interface{}) *config.AutoRollupRetentionDetails { + if len(autoRollupRetentionLocal) == 0 { + return nil + } + + autoRollupRetentionLocalDataMap := autoRollupRetentionLocal[0].(map[string]interface{}) + + autoRollupRetentionLocalSpec := config.AutoRollupRetentionDetails{} + if snapshotIntervalType, ok := autoRollupRetentionLocalDataMap["snapshot_interval_type"]; ok { + autoRollupRetentionLocalSpec.SnapshotIntervalType = expandSnapshotIntervalType(snapshotIntervalType.(string)) + } + if frequency, ok := autoRollupRetentionLocalDataMap["frequency"]; ok { + autoRollupRetentionLocalSpec.Frequency = utils.IntPtr(frequency.(int)) + } + + return &autoRollupRetentionLocalSpec +} + +func expandSnapshotIntervalType(snapshotIntervalType string) *config.SnapshotIntervalType { + if snapshotIntervalType == "" { + return nil + } + + const HOURLY, DAILY, WEEKLY, MONTHLY, YEARLY = 2, 3, 4, 5, 6 + switch snapshotIntervalType { + case "YEARLY": + p := config.SnapshotIntervalType(YEARLY) + return &p + case "WEEKLY": + p := config.SnapshotIntervalType(WEEKLY) + return &p + case "DAILY": + p := config.SnapshotIntervalType(DAILY) + return &p + case "MONTHLY": + p := config.SnapshotIntervalType(MONTHLY) + return &p + case "HOURLY": + p := config.SnapshotIntervalType(HOURLY) + return &p + } + return nil +} + +func taskStateRefreshPrismTaskGroupFunc(ctx context.Context, client *prismSdk.Client, taskUUID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + // data := base64.StdEncoding.EncodeToString([]byte("ergon")) + // encodeUUID := data + ":" + taskUUID + vresp, err := client.TaskRefAPI.GetTaskById(utils.StringPtr(taskUUID), nil) + if err != nil { + return "", "", (fmt.Errorf("error while polling prism task: %v", err)) + } + + // get the group results + + v := vresp.Data.GetValue().(import2.Task) + + if getTaskStatus(v.Status) == "CANCELED" || getTaskStatus(v.Status) == "FAILED" { + return v, getTaskStatus(v.Status), + fmt.Errorf("error_detail: %s, progress_message: %d", utils.StringValue(v.ErrorMessages[0].Message), utils.IntValue(v.ProgressPercentage)) + } + return v, getTaskStatus(v.Status), nil + } +} + +func getTaskStatus(pr *import2.TaskStatus) string { + const two, three, five, six, seven = 2, 3, 5, 6, 7 + if pr != nil { + if *pr == import2.TaskStatus(six) { + return "FAILED" + } + if *pr == import2.TaskStatus(seven) { + return "CANCELED" + } + if *pr == import2.TaskStatus(two) { + return "QUEUED" + } + if *pr == import2.TaskStatus(three) { + return "RUNNING" + } + if *pr == import2.TaskStatus(five) { + return "SUCCEEDED" + } + } + return "UNKNOWN" +}