From 7365db50f5fc275c7ed5526cace0edb734ba979a Mon Sep 17 00:00:00 2001 From: awsaxeman <34073510+awsaxeman@users.noreply.github.com> Date: Wed, 15 Dec 2021 11:54:10 -0500 Subject: [PATCH 01/12] add openzfs file system --- internal/provider/provider.go | 1 + internal/service/fsx/openzfs_file_system.go | 844 ++++++++++++++++++++ 2 files changed, 845 insertions(+) create mode 100644 internal/service/fsx/openzfs_file_system.go diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 6f7d62dc80f..4c952674431 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -1218,6 +1218,7 @@ func Provider() *schema.Provider { "aws_fsx_ontap_file_system": fsx.ResourceOntapFileSystem(), "aws_fsx_ontap_storage_virtual_machine": fsx.ResourceOntapStorageVirtualMachine(), "aws_fsx_ontap_volume": fsx.ResourceOntapVolume(), + "aws_fsx_openzfs_file_system": fsx.ResourceOpenzfsFileSystem(), "aws_fsx_windows_file_system": fsx.ResourceWindowsFileSystem(), "aws_gamelift_alias": gamelift.ResourceAlias(), diff --git a/internal/service/fsx/openzfs_file_system.go b/internal/service/fsx/openzfs_file_system.go new file mode 100644 index 00000000000..74ab7a7d754 --- /dev/null +++ b/internal/service/fsx/openzfs_file_system.go @@ -0,0 +1,844 @@ +package fsx + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" +) + +func ResourceOpenzfsFileSystem() *schema.Resource { + return &schema.Resource{ + Create: resourceOepnzfsFileSystemCreate, + Read: resourceOpenzfsFileSystemRead, + Update: resourceOpenzfsFileSystemUpdate, + Delete: resourceOpenzfsFileSystemDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(60 * time.Minute), + Update: schema.DefaultTimeout(60 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "automatic_backup_retention_days": { + Type: schema.TypeInt, + Optional: true, + Default: 0, + ValidateFunc: validation.IntBetween(0, 90), + }, + "backup_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "copy_tags_to_backups": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "copy_tags_to_volumes": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "daily_automatic_backup_start_time": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.All( + validation.StringLenBetween(5, 5), + validation.StringMatch(regexp.MustCompile(`^([01]\d|2[0-3]):?([0-5]\d)$`), "must be in the format HH:MM"), + ), + }, + "deployment_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice(fsx.OpenZFSDeploymentType_Values(), false), + }, + "disk_iops_configuration": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "iops": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(0, 160000), + }, + "mode": { + Type: schema.TypeString, + Optional: true, + Default: fsx.DiskIopsConfigurationModeAutomatic, + ValidateFunc: validation.StringInSlice(fsx.DiskIopsConfigurationMode_Values(), false), + }, + }, + }, + }, + "root_volume_configuration": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "copy_tags_to_snapshots": { + Type: schema.TypeBool, + Optional: true, + }, + "data_compression_type": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(fsx.DataCompressionType_Values(), false), + }, + "nfs_exports": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "clinet_configurations": { + Type: schema.TypeList, + Required: true, + MaxItems: 25, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "clients": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 128), + validation.StringMatch(regexp.MustCompile(`^[ -~]{1,128}$`), "must be either IP Address or CIDR"), + ), + }, + "options": { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 20, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + }, + }, + }, + }, + }, + }, + }, + "read_only": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "user_and_group_quotas": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 100, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 2147483647), + }, + "storage_capacity_quota_gib": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 2147483647), + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(fsx.OpenZFSQuotaType_Values(), false), + }, + }, + }, + }, + }, + }, + }, + "dns_name": { + Type: schema.TypeString, + Computed: true, + }, + "kms_key_id": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + ValidateFunc: verify.ValidARN, + }, + "network_interface_ids": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "owner_id": { + Type: schema.TypeString, + Computed: true, + }, + "root_volume_id": { + Type: schema.TypeString, + Computed: true, + }, + "security_group_ids": { + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + MaxItems: 50, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "storage_capacity": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(64, 512*1024), + }, + "storage_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Default: fsx.StorageTypeSsd, + ValidateFunc: validation.StringInSlice(fsx.StorageType_Values(), false), + }, + "subnet_ids": { + Type: schema.TypeList, + Required: true, + ForceNew: true, + MinItems: 1, + MaxItems: 1, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "tags": tftags.TagsSchema(), + "tags_all": tftags.TagsSchemaComputed(), + "throughput_capacity": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntInSlice([]int{64, 128, 256, 512, 1024, 2048, 3072, 4096}), + }, + "vpc_id": { + Type: schema.TypeString, + Computed: true, + }, + "weekly_maintenance_start_time": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.All( + validation.StringLenBetween(7, 7), + validation.StringMatch(regexp.MustCompile(`^[1-7]:([01]\d|2[0-3]):?([0-5]\d)$`), "must be in the format d:HH:MM"), + ), + }, + }, + + CustomizeDiff: verify.SetTagsDiff, + } +} + +func resourceOepnzfsFileSystemCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).FSxConn + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + tags := defaultTagsConfig.MergeTags(tftags.New(d.Get("tags").(map[string]interface{}))) + + input := &fsx.CreateFileSystemInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + FileSystemType: aws.String(fsx.FileSystemTypeOpenzfs), + StorageCapacity: aws.Int64(int64(d.Get("storage_capacity").(int))), + StorageType: aws.String(d.Get("storage_type").(string)), + SubnetIds: flex.ExpandStringList(d.Get("subnet_ids").([]interface{})), + OpenZFSConfiguration: &fsx.CreateFileSystemOpenZFSConfiguration{ + DeploymentType: aws.String(d.Get("deployment_type").(string)), + }, + } + + backupInput := &fsx.CreateFileSystemFromBackupInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + StorageType: aws.String(d.Get("storage_type").(string)), + SubnetIds: flex.ExpandStringList(d.Get("subnet_ids").([]interface{})), + OpenZFSConfiguration: &fsx.CreateFileSystemOpenZFSConfiguration{ + DeploymentType: aws.String(d.Get("deployment_type").(string)), + }, + } + + if v, ok := d.GetOk("disk_iops_configuration"); ok { + input.OpenZFSConfiguration.DiskIopsConfiguration = expandFsxOpenzfsFileDiskIopsConfiguration(v.([]interface{})) + backupInput.OpenZFSConfiguration.DiskIopsConfiguration = expandFsxOpenzfsFileDiskIopsConfiguration(v.([]interface{})) + } + + if v, ok := d.GetOk("root_volume_configuration"); ok { + input.OpenZFSConfiguration.RootVolumeConfiguration = expandFsxOpenzfsRootVolumeConfiguration(v.([]interface{})) + backupInput.OpenZFSConfiguration.RootVolumeConfiguration = expandFsxOpenzfsRootVolumeConfiguration(v.([]interface{})) + } + + if v, ok := d.GetOk("kms_key_id"); ok { + input.KmsKeyId = aws.String(v.(string)) + backupInput.KmsKeyId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("automatic_backup_retention_days"); ok { + input.OpenZFSConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(v.(int))) + backupInput.OpenZFSConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("daily_automatic_backup_start_time"); ok { + input.OpenZFSConfiguration.DailyAutomaticBackupStartTime = aws.String(v.(string)) + backupInput.OpenZFSConfiguration.DailyAutomaticBackupStartTime = aws.String(v.(string)) + } + + if v, ok := d.GetOk("security_group_ids"); ok { + input.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) + backupInput.SecurityGroupIds = flex.ExpandStringSet(v.(*schema.Set)) + } + + if len(tags) > 0 { + input.Tags = Tags(tags.IgnoreAWS()) + backupInput.Tags = Tags(tags.IgnoreAWS()) + } + + if v, ok := d.GetOk("weekly_maintenance_start_time"); ok { + input.OpenZFSConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string)) + backupInput.OpenZFSConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string)) + } + + if v, ok := d.GetOk("throughput_capacity"); ok { + input.OpenZFSConfiguration.ThroughputCapacity = aws.Int64(int64(v.(int))) + backupInput.OpenZFSConfiguration.ThroughputCapacity = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("copy_tags_to_backups"); ok { + input.OpenZFSConfiguration.CopyTagsToBackups = aws.Bool(v.(bool)) + backupInput.OpenZFSConfiguration.CopyTagsToBackups = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("copy_tags_to_volumes"); ok { + input.OpenZFSConfiguration.CopyTagsToVolumes = aws.Bool(v.(bool)) + backupInput.OpenZFSConfiguration.CopyTagsToVolumes = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("backup_id"); ok { + backupInput.BackupId = aws.String(v.(string)) + + log.Printf("[DEBUG] Creating FSx OpenZFS File System: %s", backupInput) + result, err := conn.CreateFileSystemFromBackup(backupInput) + + if err != nil { + return fmt.Errorf("error creating FSx OpenZFS File System from backup: %w", err) + } + + d.SetId(aws.StringValue(result.FileSystem.FileSystemId)) + } else { + log.Printf("[DEBUG] Creating FSx OpenZFS File System: %s", input) + result, err := conn.CreateFileSystem(input) + + if err != nil { + return fmt.Errorf("error creating FSx OpenZFS File System: %w", err) + } + + d.SetId(aws.StringValue(result.FileSystem.FileSystemId)) + } + + if _, err := waitFileSystemCreated(conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return fmt.Errorf("error waiting for FSx OpenZFS File System (%s) create: %w", d.Id(), err) + } + + return resourceOpenzfsFileSystemRead(d, meta) +} + +func resourceOpenzfsFileSystemRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).FSxConn + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + + filesystem, err := FindFileSystemByID(conn, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] FSx OpenZFS File System (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading FSx OpenZFS File System (%s): %w", d.Id(), err) + } + + openzfsConfig := filesystem.OpenZFSConfiguration + + if filesystem.WindowsConfiguration != nil { + return fmt.Errorf("expected FSx OpenZFS File System, found FSx Windows File System: %s", d.Id()) + } + + if filesystem.LustreConfiguration != nil { + return fmt.Errorf("expected FSx OpenZFS File System, found FSx Lustre File System: %s", d.Id()) + } + + if filesystem.OntapConfiguration != nil { + return fmt.Errorf("expected FSx OpeZFS File System, found FSx ONTAP File System: %s", d.Id()) + } + + if openzfsConfig == nil { + return fmt.Errorf("error describing FSx OpenZFS File System (%s): empty Openzfs configuration", d.Id()) + } + + d.Set("arn", filesystem.ResourceARN) + d.Set("dns_name", filesystem.DNSName) + d.Set("deployment_type", openzfsConfig.DeploymentType) + if openzfsConfig.ThroughputCapacity != nil { + d.Set("throughput_capacity", openzfsConfig.ThroughputCapacity) + } + d.Set("storage_type", filesystem.StorageType) + + if filesystem.KmsKeyId != nil { + d.Set("kms_key_id", filesystem.KmsKeyId) + } + + if err := d.Set("network_interface_ids", aws.StringValueSlice(filesystem.NetworkInterfaceIds)); err != nil { + return fmt.Errorf("error setting network_interface_ids: %w", err) + } + + d.Set("owner_id", filesystem.OwnerId) + d.Set("root_volume_id", openzfsConfig.RootVolumeId) + d.Set("storage_capacity", filesystem.StorageCapacity) + + if err := d.Set("subnet_ids", aws.StringValueSlice(filesystem.SubnetIds)); err != nil { + return fmt.Errorf("error setting subnet_ids: %w", err) + } + + tags := KeyValueTags(filesystem.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig) + + //lintignore:AWSR002 + if err := d.Set("tags", tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %w", err) + } + + if err := d.Set("tags_all", tags.Map()); err != nil { + return fmt.Errorf("error setting tags_all: %w", err) + } + + if err := d.Set("disk_iops_configuration", flattenFsxOpenzfsFileDiskIopsConfiguration(openzfsConfig.DiskIopsConfiguration)); err != nil { + return fmt.Errorf("error setting disk_iops_configuration: %w", err) + } + + d.Set("vpc_id", filesystem.VpcId) + d.Set("weekly_maintenance_start_time", openzfsConfig.WeeklyMaintenanceStartTime) + d.Set("automatic_backup_retention_days", openzfsConfig.AutomaticBackupRetentionDays) + d.Set("daily_automatic_backup_start_time", openzfsConfig.DailyAutomaticBackupStartTime) + d.Set("copy_tags_to_backups", openzfsConfig.CopyTagsToBackups) + d.Set("copy_tags_to_volumes", openzfsConfig.CopyTagsToVolumes) + + rootVolume, err := FindVolumeByID(conn, *openzfsConfig.RootVolumeId) + + if err != nil { + return fmt.Errorf("error reading FSx OpenZFS Root Volume Configuration (%s): %w", *openzfsConfig.RootVolumeId, err) + } + + if err := d.Set("root_volume_configuration", flattenFsxOpenzfsRootVolumeConfiguration(rootVolume)); err != nil { + return fmt.Errorf("error setting root_volume_configuration: %w", err) + } + + return nil +} + +func resourceOpenzfsFileSystemUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).FSxConn + + if d.HasChange("tags_all") { + o, n := d.GetChange("tags_all") + + if err := UpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating FSx OpenZFS File System (%s) tags: %w", d.Get("arn").(string), err) + } + } + + if d.HasChangesExcept("tags_all", "tags") { + input := &fsx.UpdateFileSystemInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + FileSystemId: aws.String(d.Id()), + OpenZFSConfiguration: &fsx.UpdateFileSystemOpenZFSConfiguration{}, + } + + if d.HasChange("automatic_backup_retention_days") { + input.OpenZFSConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))) + } + + if d.HasChange("copy_tags_to_backups") { + input.OpenZFSConfiguration.CopyTagsToBackups = aws.Bool(d.Get("copy_tags_to_backups").(bool)) + } + + if d.HasChange("copy_tags_to_volumes") { + input.OpenZFSConfiguration.CopyTagsToVolumes = aws.Bool(d.Get("copy_tags_to_volumes").(bool)) + } + + if d.HasChange("daily_automatic_backup_start_time") { + input.OpenZFSConfiguration.DailyAutomaticBackupStartTime = aws.String(d.Get("daily_automatic_backup_start_time").(string)) + } + + if d.HasChange("disk_iops_configuration") { + input.OpenZFSConfiguration.DiskIopsConfiguration = expandFsxOpenzfsFileDiskIopsConfiguration(d.Get("throughput_capacity").([]interface{})) + } + + if d.HasChange("throughput_capacity") { + input.OpenZFSConfiguration.ThroughputCapacity = aws.Int64(int64(d.Get("throughput_capacity").(int))) + } + + if d.HasChange("weekly_maintenance_start_time") { + input.OpenZFSConfiguration.WeeklyMaintenanceStartTime = aws.String(d.Get("weekly_maintenance_start_time").(string)) + } + + _, err := conn.UpdateFileSystem(input) + + if err != nil { + return fmt.Errorf("error updating FSx OpenZFS File System (%s): %w", d.Id(), err) + } + + if _, err := waitFileSystemUpdated(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return fmt.Errorf("error waiting for FSx OpenZFS File System (%s) update: %w", d.Id(), err) + } + + if d.HasChange("root_volume_configuration") { + input := &fsx.UpdateVolumeInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + VolumeId: aws.String(d.Get("root_volume_id").(string)), + OpenZFSConfiguration: &fsx.UpdateOpenZFSVolumeConfiguration{}, + } + + input.OpenZFSConfiguration = expandFsxOpenzfsUpdateRootVolumeConfiguration(d.Get("root_volume_configuration").([]interface{})) + + _, err := conn.UpdateVolume(input) + + if err != nil { + return fmt.Errorf("error updating FSx OpenZFS Root Volume (%s): %w", d.Get("root_volume_id").(string), err) + } + + if _, err := waitVolumeUpdated(conn, d.Get("root_volume_id").(string), d.Timeout(schema.TimeoutUpdate)); err != nil { + return fmt.Errorf("error waiting for FSx OpenZFS Root Volume (%s) update: %w", d.Get("root_volume_id").(string), err) + } + } + + } + + return resourceOpenzfsFileSystemRead(d, meta) +} + +func resourceOpenzfsFileSystemDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).FSxConn + + log.Printf("[DEBUG] Deleting FSx OpenZFS File System: %s", d.Id()) + _, err := conn.DeleteFileSystem(&fsx.DeleteFileSystemInput{ + FileSystemId: aws.String(d.Id()), + }) + + if tfawserr.ErrCodeEquals(err, fsx.ErrCodeFileSystemNotFound) { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting FSx OpenZFS File System (%s): %w", d.Id(), err) + } + + if _, err := waitFileSystemDeleted(conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return fmt.Errorf("error waiting for FSx OpenZFS File System (%s) delete: %w", d.Id(), err) + } + + return nil +} + +func expandFsxOpenzfsFileDiskIopsConfiguration(cfg []interface{}) *fsx.DiskIopsConfiguration { + if len(cfg) < 1 { + return nil + } + + conf := cfg[0].(map[string]interface{}) + + out := fsx.DiskIopsConfiguration{} + + if v, ok := conf["mode"].(string); ok && len(v) > 0 { + out.Mode = aws.String(v) + } + + if v, ok := conf["iops"].(int); ok { + out.Iops = aws.Int64(int64(v)) + } + + return &out +} + +func expandFsxOpenzfsRootVolumeConfiguration(cfg []interface{}) *fsx.OpenZFSCreateRootVolumeConfiguration { + if len(cfg) < 1 { + return nil + } + + conf := cfg[0].(map[string]interface{}) + + out := fsx.OpenZFSCreateRootVolumeConfiguration{} + + if v, ok := conf["copy_tags_to_snapshots"].(bool); ok { + out.CopyTagsToSnapshots = aws.Bool(v) + } + + if v, ok := conf["data_compression_type"].(string); ok { + out.DataCompressionType = aws.String(v) + } + + if v, ok := conf["read_only"].(bool); ok { + out.ReadOnly = aws.Bool(v) + } + + if v, ok := conf["user_and_group_quotas"].([]interface{}); ok { + out.UserAndGroupQuotas = expandFsxOpenzfsUserAndGroupQuotas(v) + } + + if v, ok := conf["nfs_exports"].([]interface{}); ok { + out.NfsExports = expandFsxOpenzfsNfsExports(v) + } + + return &out +} + +func expandFsxOpenzfsUpdateRootVolumeConfiguration(cfg []interface{}) *fsx.UpdateOpenZFSVolumeConfiguration { + if len(cfg) < 1 { + return nil + } + + conf := cfg[0].(map[string]interface{}) + + out := fsx.UpdateOpenZFSVolumeConfiguration{} + + if v, ok := conf["data_compression_type"].(string); ok { + out.DataCompressionType = aws.String(v) + } + + if v, ok := conf["read_only"].(bool); ok { + out.ReadOnly = aws.Bool(v) + } + + if v, ok := conf["user_and_group_quotas"].([]interface{}); ok { + out.UserAndGroupQuotas = expandFsxOpenzfsUserAndGroupQuotas(v) + } + + if v, ok := conf["nfs_exports"].([]interface{}); ok { + out.NfsExports = expandFsxOpenzfsNfsExports(v) + } + + return &out +} + +func expandFsxOpenzfsUserAndGroupQuotas(cfg []interface{}) []*fsx.OpenZFSUserOrGroupQuota { + quotas := []*fsx.OpenZFSUserOrGroupQuota{} + + for _, quota := range cfg { + expandedQuota := expandFsxOpenzfsUserAndGroupQuota(quota.([]interface{})) + if expandedQuota != nil { + quotas = append(quotas, expandedQuota) + } + } + + return quotas + +} + +func expandFsxOpenzfsUserAndGroupQuota(cfg []interface{}) *fsx.OpenZFSUserOrGroupQuota { + if len(cfg) < 1 { + return nil + } + + conf := cfg[0].(map[string]interface{}) + + out := fsx.OpenZFSUserOrGroupQuota{} + + if v, ok := conf["id"].(int); ok { + out.Id = aws.Int64(int64(v)) + } + + if v, ok := conf["storage_capacity_quota_gib"].(int); ok { + out.StorageCapacityQuotaGiB = aws.Int64(int64(v)) + } + + if v, ok := conf["type"].(string); ok { + out.Type = aws.String(v) + } + + return &out + +} + +func expandFsxOpenzfsNfsExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { + exports := []*fsx.OpenZFSNfsExport{} + + for _, export := range cfg { + expandedExport := expandFsxOpenzfsNfsExport(export.([]interface{})) + if expandedExport != nil { + exports = append(exports, expandedExport) + } + } + + return exports + +} + +func expandFsxOpenzfsNfsExport(cfg []interface{}) *fsx.OpenZFSNfsExport { + if len(cfg) < 1 { + return nil + } + + conf := cfg[0].(map[string]interface{}) + + out := fsx.OpenZFSNfsExport{} + + if v, ok := conf["clinet_configurations"].([]interface{}); ok { + out.ClientConfigurations = expandFsxOpenzfsClinetConfigurations(v) + } + + return &out +} + +func expandFsxOpenzfsClinetConfigurations(cfg []interface{}) []*fsx.OpenZFSClientConfiguration { + configurations := []*fsx.OpenZFSClientConfiguration{} + + for _, configuration := range cfg { + expandedConfiguration := expandFsxOpenzfsClientConfiguration(configuration.([]interface{})) + if expandedConfiguration != nil { + configurations = append(configurations, expandedConfiguration) + } + } + + return configurations + +} + +func expandFsxOpenzfsClientConfiguration(cfg []interface{}) *fsx.OpenZFSClientConfiguration { + if len(cfg) < 1 { + return nil + } + + conf := cfg[0].(map[string]interface{}) + + out := fsx.OpenZFSClientConfiguration{} + + if v, ok := conf["clients"].(string); ok && len(v) > 0 { + out.Clients = aws.String(v) + } + + if v, ok := conf["options"].(*schema.Set); ok { + out.Options = flex.ExpandStringSet(v) + } + + return &out +} + +func flattenFsxOpenzfsFileDiskIopsConfiguration(rs *fsx.DiskIopsConfiguration) []interface{} { + if rs == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + if rs.Mode != nil { + m["mode"] = aws.StringValue(rs.Mode) + } + if rs.Iops != nil { + m["iops"] = aws.Int64Value(rs.Iops) + } + + return []interface{}{m} +} + +func flattenFsxOpenzfsRootVolumeConfiguration(rs *fsx.Volume) []interface{} { + if rs == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + if rs.OpenZFSConfiguration.CopyTagsToSnapshots != nil { + m["copy_tags_to_snapshots"] = *rs.OpenZFSConfiguration.CopyTagsToSnapshots + } + if rs.OpenZFSConfiguration.DataCompressionType != nil { + m["data_compression_type"] = aws.StringValue(rs.OpenZFSConfiguration.DataCompressionType) + } + if rs.OpenZFSConfiguration.NfsExports != nil { + m["nfs_exports"] = flattenFsxOpenzfsFileNfsExports(rs.OpenZFSConfiguration.NfsExports) + } + if rs.OpenZFSConfiguration.ReadOnly != nil { + m["read_only"] = *rs.OpenZFSConfiguration.ReadOnly + } + if rs.OpenZFSConfiguration.UserAndGroupQuotas != nil { + m["user_and_group_quotas"] = flattenFsxOpenzfsFileUserAndGroupQuotas(rs.OpenZFSConfiguration.UserAndGroupQuotas) + } + + return []interface{}{m} +} + +func flattenFsxOpenzfsFileNfsExports(rs []*fsx.OpenZFSNfsExport) []map[string]interface{} { + exports := make([]map[string]interface{}, 0) + + for _, export := range rs { + cfg := make(map[string]interface{}) + cfg["clinet_configurations"] = flattenFsxOpenzfsClientConfigurations(export.ClientConfigurations) + exports = append(exports, cfg) + } + + if len(exports) > 0 { + return exports + } + + return nil +} + +func flattenFsxOpenzfsClientConfigurations(rs []*fsx.OpenZFSClientConfiguration) []map[string]interface{} { + configurations := make([]map[string]interface{}, 0) + + for _, configuration := range rs { + cfg := make(map[string]interface{}) + cfg["clients"] = aws.StringValue(configuration.Clients) + cfg["options"] = flex.FlattenStringList(configuration.Options) + configurations = append(configurations, cfg) + } + + if len(configurations) > 0 { + return configurations + } + + return nil +} + +func flattenFsxOpenzfsFileUserAndGroupQuotas(rs []*fsx.OpenZFSUserOrGroupQuota) []map[string]interface{} { + quotas := make([]map[string]interface{}, 0) + + for _, quota := range rs { + cfg := make(map[string]interface{}) + cfg["id"] = aws.Int64(*quota.Id) + cfg["storage_capacity_quota_gib"] = aws.Int64(*quota.StorageCapacityQuotaGiB) + cfg["type"] = aws.StringValue(quota.Type) + quotas = append(quotas, cfg) + } + + if len(quotas) > 0 { + return quotas + } + + return nil +} From 0e66bd7e0d118dbbf08d1bde016246a32959bf94 Mon Sep 17 00:00:00 2001 From: awsaxeman <34073510+awsaxeman@users.noreply.github.com> Date: Fri, 17 Dec 2021 11:57:17 -0500 Subject: [PATCH 02/12] add tests and docs --- .changelog/22234.txt | 3 + internal/service/ec2/sweep.go | 1 + internal/service/fsx/openzfs_file_system.go | 63 +- .../service/fsx/openzfs_file_system_test.go | 999 ++++++++++++++++++ internal/service/fsx/sweep.go | 5 + internal/service/fsx/wait.go | 2 +- .../r/fsx_openzfs_file_system.html.markdown | 112 ++ 7 files changed, 1148 insertions(+), 37 deletions(-) create mode 100644 .changelog/22234.txt create mode 100644 internal/service/fsx/openzfs_file_system_test.go create mode 100644 website/docs/r/fsx_openzfs_file_system.html.markdown diff --git a/.changelog/22234.txt b/.changelog/22234.txt new file mode 100644 index 00000000000..8d43ac42edd --- /dev/null +++ b/.changelog/22234.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_fsx_openzfs_file_system +``` \ No newline at end of file diff --git a/internal/service/ec2/sweep.go b/internal/service/ec2/sweep.go index 19222527cff..c7930de57af 100644 --- a/internal/service/ec2/sweep.go +++ b/internal/service/ec2/sweep.go @@ -183,6 +183,7 @@ func init() { "aws_emr_studio", "aws_fsx_lustre_file_system", "aws_fsx_ontap_file_system", + "aws_fsx_openzfs_file_system", "aws_fsx_windows_file_system", "aws_lambda_function", "aws_lb", diff --git a/internal/service/fsx/openzfs_file_system.go b/internal/service/fsx/openzfs_file_system.go index 74ab7a7d754..1882c5e0dbf 100644 --- a/internal/service/fsx/openzfs_file_system.go +++ b/internal/service/fsx/openzfs_file_system.go @@ -112,7 +112,7 @@ func ResourceOpenzfsFileSystem() *schema.Resource { "data_compression_type": { Type: schema.TypeString, Optional: true, - ValidateFunc: validation.StringInSlice(fsx.DataCompressionType_Values(), false), + ValidateFunc: validation.StringInSlice(fsx.OpenZFSDataCompressionType_Values(), false), }, "nfs_exports": { Type: schema.TypeList, @@ -120,7 +120,7 @@ func ResourceOpenzfsFileSystem() *schema.Resource { MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "clinet_configurations": { + "client_configurations": { Type: schema.TypeList, Required: true, MaxItems: 25, @@ -240,7 +240,6 @@ func ResourceOpenzfsFileSystem() *schema.Resource { "throughput_capacity": { Type: schema.TypeInt, Required: true, - ForceNew: true, ValidateFunc: validation.IntInSlice([]int{64, 128, 256, 512, 1024, 2048, 3072, 4096}), }, "vpc_id": { @@ -498,10 +497,6 @@ func resourceOpenzfsFileSystemUpdate(d *schema.ResourceData, meta interface{}) e input.OpenZFSConfiguration.DailyAutomaticBackupStartTime = aws.String(d.Get("daily_automatic_backup_start_time").(string)) } - if d.HasChange("disk_iops_configuration") { - input.OpenZFSConfiguration.DiskIopsConfiguration = expandFsxOpenzfsFileDiskIopsConfiguration(d.Get("throughput_capacity").([]interface{})) - } - if d.HasChange("throughput_capacity") { input.OpenZFSConfiguration.ThroughputCapacity = aws.Int64(int64(d.Get("throughput_capacity").(int))) } @@ -593,6 +588,8 @@ func expandFsxOpenzfsRootVolumeConfiguration(cfg []interface{}) *fsx.OpenZFSCrea return nil } + log.Printf("[WARN] Root Volume Info (%v) ", cfg) + conf := cfg[0].(map[string]interface{}) out := fsx.OpenZFSCreateRootVolumeConfiguration{} @@ -652,7 +649,7 @@ func expandFsxOpenzfsUserAndGroupQuotas(cfg []interface{}) []*fsx.OpenZFSUserOrG quotas := []*fsx.OpenZFSUserOrGroupQuota{} for _, quota := range cfg { - expandedQuota := expandFsxOpenzfsUserAndGroupQuota(quota.([]interface{})) + expandedQuota := expandFsxOpenzfsUserAndGroupQuota(quota.(map[string]interface{})) if expandedQuota != nil { quotas = append(quotas, expandedQuota) } @@ -662,13 +659,11 @@ func expandFsxOpenzfsUserAndGroupQuotas(cfg []interface{}) []*fsx.OpenZFSUserOrG } -func expandFsxOpenzfsUserAndGroupQuota(cfg []interface{}) *fsx.OpenZFSUserOrGroupQuota { - if len(cfg) < 1 { +func expandFsxOpenzfsUserAndGroupQuota(conf map[string]interface{}) *fsx.OpenZFSUserOrGroupQuota { + if len(conf) < 1 { return nil } - conf := cfg[0].(map[string]interface{}) - out := fsx.OpenZFSUserOrGroupQuota{} if v, ok := conf["id"].(int); ok { @@ -690,8 +685,10 @@ func expandFsxOpenzfsUserAndGroupQuota(cfg []interface{}) *fsx.OpenZFSUserOrGrou func expandFsxOpenzfsNfsExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { exports := []*fsx.OpenZFSNfsExport{} + log.Printf("[WARN] NFS Info (%v) ", cfg) + for _, export := range cfg { - expandedExport := expandFsxOpenzfsNfsExport(export.([]interface{})) + expandedExport := expandFsxOpenzfsNfsExport(export.(map[string]interface{})) if expandedExport != nil { exports = append(exports, expandedExport) } @@ -701,16 +698,12 @@ func expandFsxOpenzfsNfsExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { } -func expandFsxOpenzfsNfsExport(cfg []interface{}) *fsx.OpenZFSNfsExport { - if len(cfg) < 1 { - return nil - } - - conf := cfg[0].(map[string]interface{}) - +func expandFsxOpenzfsNfsExport(conf map[string]interface{}) *fsx.OpenZFSNfsExport { out := fsx.OpenZFSNfsExport{} - if v, ok := conf["clinet_configurations"].([]interface{}); ok { + log.Printf("[DEBUG] NFS Export Info (%v) ", conf) + + if v, ok := conf["client_configurations"].([]interface{}); ok { out.ClientConfigurations = expandFsxOpenzfsClinetConfigurations(v) } @@ -720,8 +713,10 @@ func expandFsxOpenzfsNfsExport(cfg []interface{}) *fsx.OpenZFSNfsExport { func expandFsxOpenzfsClinetConfigurations(cfg []interface{}) []*fsx.OpenZFSClientConfiguration { configurations := []*fsx.OpenZFSClientConfiguration{} + log.Printf("[DEBUG] Client Configs (%v) ", cfg) + for _, configuration := range cfg { - expandedConfiguration := expandFsxOpenzfsClientConfiguration(configuration.([]interface{})) + expandedConfiguration := expandFsxOpenzfsClientConfiguration(configuration.(map[string]interface{})) if expandedConfiguration != nil { configurations = append(configurations, expandedConfiguration) } @@ -731,21 +726,17 @@ func expandFsxOpenzfsClinetConfigurations(cfg []interface{}) []*fsx.OpenZFSClien } -func expandFsxOpenzfsClientConfiguration(cfg []interface{}) *fsx.OpenZFSClientConfiguration { - if len(cfg) < 1 { - return nil - } - - conf := cfg[0].(map[string]interface{}) - +func expandFsxOpenzfsClientConfiguration(conf map[string]interface{}) *fsx.OpenZFSClientConfiguration { out := fsx.OpenZFSClientConfiguration{} + log.Printf("[DEBUG] Client Config (%v) ", conf) + if v, ok := conf["clients"].(string); ok && len(v) > 0 { out.Clients = aws.String(v) } - if v, ok := conf["options"].(*schema.Set); ok { - out.Options = flex.ExpandStringSet(v) + if v, ok := conf["options"].([]interface{}); ok { + out.Options = flex.ExpandStringList(v) } return &out @@ -774,7 +765,7 @@ func flattenFsxOpenzfsRootVolumeConfiguration(rs *fsx.Volume) []interface{} { m := make(map[string]interface{}) if rs.OpenZFSConfiguration.CopyTagsToSnapshots != nil { - m["copy_tags_to_snapshots"] = *rs.OpenZFSConfiguration.CopyTagsToSnapshots + m["copy_tags_to_snapshots"] = aws.BoolValue(rs.OpenZFSConfiguration.CopyTagsToSnapshots) } if rs.OpenZFSConfiguration.DataCompressionType != nil { m["data_compression_type"] = aws.StringValue(rs.OpenZFSConfiguration.DataCompressionType) @@ -783,7 +774,7 @@ func flattenFsxOpenzfsRootVolumeConfiguration(rs *fsx.Volume) []interface{} { m["nfs_exports"] = flattenFsxOpenzfsFileNfsExports(rs.OpenZFSConfiguration.NfsExports) } if rs.OpenZFSConfiguration.ReadOnly != nil { - m["read_only"] = *rs.OpenZFSConfiguration.ReadOnly + m["read_only"] = aws.BoolValue(rs.OpenZFSConfiguration.ReadOnly) } if rs.OpenZFSConfiguration.UserAndGroupQuotas != nil { m["user_and_group_quotas"] = flattenFsxOpenzfsFileUserAndGroupQuotas(rs.OpenZFSConfiguration.UserAndGroupQuotas) @@ -797,7 +788,7 @@ func flattenFsxOpenzfsFileNfsExports(rs []*fsx.OpenZFSNfsExport) []map[string]in for _, export := range rs { cfg := make(map[string]interface{}) - cfg["clinet_configurations"] = flattenFsxOpenzfsClientConfigurations(export.ClientConfigurations) + cfg["client_configurations"] = flattenFsxOpenzfsClientConfigurations(export.ClientConfigurations) exports = append(exports, cfg) } @@ -830,8 +821,8 @@ func flattenFsxOpenzfsFileUserAndGroupQuotas(rs []*fsx.OpenZFSUserOrGroupQuota) for _, quota := range rs { cfg := make(map[string]interface{}) - cfg["id"] = aws.Int64(*quota.Id) - cfg["storage_capacity_quota_gib"] = aws.Int64(*quota.StorageCapacityQuotaGiB) + cfg["id"] = aws.Int64Value(quota.Id) + cfg["storage_capacity_quota_gib"] = aws.Int64Value(quota.StorageCapacityQuotaGiB) cfg["type"] = aws.StringValue(quota.Type) quotas = append(quotas, cfg) } diff --git a/internal/service/fsx/openzfs_file_system_test.go b/internal/service/fsx/openzfs_file_system_test.go new file mode 100644 index 00000000000..12ea935d8c8 --- /dev/null +++ b/internal/service/fsx/openzfs_file_system_test.go @@ -0,0 +1,999 @@ +package fsx_test + +import ( + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tffsx "github.com/hashicorp/terraform-provider-aws/internal/service/fsx" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" +) + +func TestAccFSxOpenzfsFileSystem_basic(t *testing.T) { + var filesystem fsx.FileSystem + resourceName := "aws_fsx_openzfs_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsFileSystemBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexp.MustCompile(`file-system/fs-.+`)), + resource.TestCheckResourceAttr(resourceName, "network_interface_ids.#", "1"), + acctest.CheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttrSet(resourceName, "root_volume_id"), + resource.TestCheckResourceAttrSet(resourceName, "dns_name"), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", "64"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "subnet_ids.*", "aws_subnet.test1", "id"), + resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "0"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrPair(resourceName, "vpc_id", "aws_vpc.test", "id"), + resource.TestMatchResourceAttr(resourceName, "weekly_maintenance_start_time", regexp.MustCompile(`^\d:\d\d:\d\d$`)), + resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.OpenZFSDeploymentTypeSingleAz1), + resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "0"), + resource.TestCheckResourceAttr(resourceName, "storage_type", fsx.StorageTypeSsd), + resource.TestCheckResourceAttrSet(resourceName, "kms_key_id"), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity", "64"), + resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.mode", "AUTOMATIC"), + resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.iops", "192"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.data_compression_type", "NONE"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.clients", "*"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.options.0", "crossmnt"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.read_only", "false"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + }, + }) +} + +func TestAccFSxOpenzfsFileSystem_diskIops(t *testing.T) { + var filesystem fsx.FileSystem + resourceName := "aws_fsx_openzfs_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsFileSystemDiskIopsConfigurationConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem), + resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.mode", "USER_PROVISIONED"), + resource.TestCheckResourceAttr(resourceName, "disk_iops_configuration.0.iops", "3072"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + }, + }) +} + +func TestAccFSxOpenzfsFileSystem_disappears(t *testing.T) { + var filesystem fsx.FileSystem + resourceName := "aws_fsx_openzfs_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsFileSystemBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem), + acctest.CheckResourceDisappears(acctest.Provider, tffsx.ResourceOpenzfsFileSystem(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccFSxOpenzfsFileSystem_rootVolume(t *testing.T) { + var filesystem1, filesystem2, filesystem3 fsx.FileSystem + resourceName := "aws_fsx_openzfs_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsFileSystemRootVolumeConfig(rName, "NONE", "sync", "false", 128), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.data_compression_type", "NONE"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.clients", "10.0.1.0/24"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.options.#", "2"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.options.0", "sync"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.options.1", "rw"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.read_only", "false"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.id", "10"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.storage_capacity_quota_gib", "128"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.type", "USER"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + { + Config: testAccOpenzfsFileSystemRootVolumeConfig(rName, "ZSTD", "async", "true", 256), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.data_compression_type", "ZSTD"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.clients", "10.0.1.0/24"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.options.#", "2"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.options.0", "async"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.options.1", "rw"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.read_only", "true"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.id", "10"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.storage_capacity_quota_gib", "256"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.type", "USER"), + ), + }, + { + Config: testAccOpenzfsFileSystemRootVolume2ClientConfig(rName, "NONE", "async", "false", 128, 1024), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem3), + testAccCheckFsxOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem3), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.data_compression_type", "NONE"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.#", "1"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.*", map[string]string{ + "clients": "10.0.1.0/24", + "options.0": "async", + "options.1": "rw", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.*", map[string]string{ + "clients": "*", + "options.0": "sync", + "options.1": "rw", + }), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.read_only", "false"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "2"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.id", "10"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.storage_capacity_quota_gib", "128"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.type", "USER"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.1.id", "20"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.1.storage_capacity_quota_gib", "1024"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.1.type", "GROUP"), + ), + }, + }, + }) +} + +func TestAccFSxOpenzfsFileSystem_securityGroupIDs(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_openzfs_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsFileSystemSecurityGroupIds1Config(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + { + Config: testAccOpenzfsFileSystemSecurityGroupIds2Config(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxOpenzfsFileSystemRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "2"), + ), + }, + }, + }) +} + +func TestAccFSxOpenzfsFileSystem_tags(t *testing.T) { + var filesystem1, filesystem2, filesystem3 fsx.FileSystem + resourceName := "aws_fsx_openzfs_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsFileSystemTags1Config(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + { + Config: testAccOpenzfsFileSystemTags2Config(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccOpenzfsFileSystemTags1Config(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem3), + testAccCheckFsxOpenzfsFileSystemNotRecreated(&filesystem2, &filesystem3), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + +func TestAccFSxOpenzfsFileSystem_copyTags(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_openzfs_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsFileSystemCopyTagsConfig(rName, "key1", "value1", "true"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "true"), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_volumes", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + { + Config: testAccOpenzfsFileSystemCopyTagsConfig(rName, "key1", "value1", "false"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_backups", "false"), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_volumes", "false"), + ), + }, + }, + }) +} + +func TestAccFSxOpenzfsFileSystem_throughput(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_openzfs_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsFileSystemThroughputConfig(rName, 64), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity", "64"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + { + Config: testAccOpenzfsFileSystemThroughputConfig(rName, 128), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "throughput_capacity", "128"), + ), + }, + }, + }) +} + +func TestAccFSxOpenzfsFileSystem_storageType(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_openzfs_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsFileSystemStorageTypeConfig(rName, "HDD"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", "HDD"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + { + Config: testAccOpenzfsFileSystemStorageTypeConfig(rName, "SSD"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxOpenzfsFileSystemRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "storage_capacity", "SSD"), + ), + }, + }, + }) +} + +func TestAccFSxOnpenzfsFileSystem_weeklyMaintenanceStartTime(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_openzfs_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsFileSystemWeeklyMaintenanceStartTimeConfig(rName, "1:01:01"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "weekly_maintenance_start_time", "1:01:01"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + { + Config: testAccOpenzfsFileSystemWeeklyMaintenanceStartTimeConfig(rName, "2:02:02"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "weekly_maintenance_start_time", "2:02:02"), + ), + }, + }, + }) +} + +func TestAccFSxOpenzfsFileSystem_automaticBackupRetentionDays(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_openzfs_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsFileSystemAutomaticBackupRetentionDaysConfig(rName, 90), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "90"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + { + Config: testAccOpenzfsFileSystemAutomaticBackupRetentionDaysConfig(rName, 0), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "0"), + ), + }, + { + Config: testAccOpenzfsFileSystemAutomaticBackupRetentionDaysConfig(rName, 1), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "automatic_backup_retention_days", "1"), + ), + }, + }, + }) +} + +func TestAccFSxOpenzfsFileSystem_kmsKeyID(t *testing.T) { + var filesystem fsx.FileSystem + resourceName := "aws_fsx_openzfs_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsFileSystemKMSKeyIDConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem), + resource.TestCheckResourceAttrPair(resourceName, "kms_key_id", "aws_kms_key.test", "arn"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + }, + }) +} + +func TestAccFSxOpenzfsFileSystem_dailyAutomaticBackupStartTime(t *testing.T) { + var filesystem1, filesystem2 fsx.FileSystem + resourceName := "aws_fsx_openzfs_file_system.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsFileSystemDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsFileSystemDailyAutomaticBackupStartTimeConfig(rName, "01:01"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem1), + resource.TestCheckResourceAttr(resourceName, "daily_automatic_backup_start_time", "01:01"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"security_group_ids"}, + }, + { + Config: testAccOpenzfsFileSystemDailyAutomaticBackupStartTimeConfig(rName, "02:02"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem2), + testAccCheckFsxOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem2), + resource.TestCheckResourceAttr(resourceName, "daily_automatic_backup_start_time", "02:02"), + ), + }, + }, + }) +} + +func testAccCheckFsxOpenzfsFileSystemExists(resourceName string, fs *fsx.FileSystem) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn + + filesystem, err := tffsx.FindFileSystemByID(conn, rs.Primary.ID) + if err != nil { + return err + } + + if filesystem == nil { + return fmt.Errorf("FSx Openzfs File System (%s) not found", rs.Primary.ID) + } + + *fs = *filesystem + + return nil + } +} + +func testAccCheckFsxOpenzfsFileSystemNotRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { + return func(s *terraform.State) error { + if aws.StringValue(i.FileSystemId) != aws.StringValue(j.FileSystemId) { + return fmt.Errorf("FSx OpenZFS File System (%s) recreated", aws.StringValue(i.FileSystemId)) + } + + return nil + } +} + +func testAccCheckFsxOpenzfsFileSystemRecreated(i, j *fsx.FileSystem) resource.TestCheckFunc { + return func(s *terraform.State) error { + if aws.StringValue(i.FileSystemId) == aws.StringValue(j.FileSystemId) { + return fmt.Errorf("FSx OpenZFS File System (%s) not recreated", aws.StringValue(i.FileSystemId)) + } + + return nil + } +} + +func testAccCheckFsxOpenzfsFileSystemDestroy(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_fsx_openzfs_file_system" { + continue + } + + filesystem, err := tffsx.FindFileSystemByID(conn, rs.Primary.ID) + if tfresource.NotFound(err) { + continue + } + + if filesystem != nil { + return fmt.Errorf("FSx OpenZFS File System (%s) still exists", rs.Primary.ID) + } + } + return nil +} + +func testAccOpenzfsFileSystemBaseConfig(rName string) string { + return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_subnet" "test1" { + vpc_id = aws_vpc.test.id + cidr_block = "10.0.1.0/24" + availability_zone = data.aws_availability_zones.available.names[0] + + tags = { + Name = %[1]q + } +} + +`, rName)) +} + +func testAccOpenzfsFileSystemBasicConfig(rName string) string { + return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), ` +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 64 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 64 +} +`) +} + +func testAccOpenzfsFileSystemDiskIopsConfigurationConfig(rName string) string { + return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), ` +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 64 + storage_type = "SSD" + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 64 + disk_iops_configuration { + mode = "USER_PROVISIONED" + iops = 3072 + } +} +`) +} + +func testAccOpenzfsFileSystemSecurityGroupIds1Config(rName string) string { + return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` +resource "aws_security_group" "test1" { + description = "security group for FSx testing" + vpc_id = aws_vpc.test.id + + ingress { + cidr_blocks = [aws_vpc.test.cidr_block] + from_port = 0 + protocol = -1 + to_port = 0 + } + + egress { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + to_port = 0 + } + + tags = { + Name = %[1]q + } +} + +resource "aws_fsx_openzfs_file_system" "test" { + security_group_ids = [aws_security_group.test1.id] + storage_capacity = 1024 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 512 + storage_type = "SSD" + + tags = { + Name = %[1]q + } +} +`, rName)) +} + +func testAccOpenzfsFileSystemSecurityGroupIds2Config(rName string) string { + return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` +resource "aws_security_group" "test1" { + description = "security group for FSx testing" + vpc_id = aws_vpc.test.id + + ingress { + cidr_blocks = [aws_vpc.test.cidr_block] + from_port = 0 + protocol = -1 + to_port = 0 + } + + egress { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + to_port = 0 + } + + tags = { + Name = %[1]q + } +} + +resource "aws_security_group" "test2" { + description = "security group for FSx testing" + vpc_id = aws_vpc.test.id + + ingress { + cidr_blocks = [aws_vpc.test.cidr_block] + from_port = 0 + protocol = -1 + to_port = 0 + } + + egress { + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + protocol = "-1" + to_port = 0 + } + + tags = { + Name = %[1]q + } +} + +resource "aws_fsx_openzfs_file_system" "test" { + security_group_ids = [aws_security_group.test1.id] + storage_capacity = 1024 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 512 + storage_type = "SSD" + + tags = { + Name = %[1]q + } +} +`, rName)) +} + +func testAccOpenzfsFileSystemTags1Config(rName, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 1024 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 512 + storage_type = "SSD" + + tags = { + %[1]q = %[2]q + } +} +`, tagKey1, tagValue1)) +} + +func testAccOpenzfsFileSystemTags2Config(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 1024 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 512 + storage_type = "SSD" + + tags = { + %[1]q = %[2]q + %[3]q = %[4]q + } +} +`, tagKey1, tagValue1, tagKey2, tagValue2)) +} + +func testAccOpenzfsFileSystemCopyTagsConfig(rName, tagKey1, tagValue1, copyTags string) string { + return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 1024 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 512 + storage_type = "SSD" + copy_tags_to_backups = %[3]s + copy_tags_to_volumes = %[3]s + + tags = { + %[1]q = %[2]q + } +} +`, tagKey1, tagValue1, copyTags)) +} + +func testAccOpenzfsFileSystemWeeklyMaintenanceStartTimeConfig(rName, weeklyMaintenanceStartTime string) string { + return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 1024 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 512 + storage_type = "SSD" + weekly_maintenance_start_time = %[2]q + + tags = { + Name = %[1]q + } +} +`, rName, weeklyMaintenanceStartTime)) +} + +func testAccOpenzfsFileSystemDailyAutomaticBackupStartTimeConfig(rName, dailyAutomaticBackupStartTime string) string { + return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 1024 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 512 + storage_type = "SSD" + daily_automatic_backup_start_time = %[2]q + automatic_backup_retention_days = 1 + + tags = { + Name = %[1]q + } +} +`, rName, dailyAutomaticBackupStartTime)) +} + +func testAccOpenzfsFileSystemAutomaticBackupRetentionDaysConfig(rName string, retention int) string { + return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 1024 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 512 + storage_type = "SSD" + automatic_backup_retention_days = %[2]d + + tags = { + Name = %[1]q + } +} +`, rName, retention)) +} + +func testAccOpenzfsFileSystemKMSKeyIDConfig(rName string) string { + return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` +resource "aws_kms_key" "test" { + description = %[1]q + deletion_window_in_days = 7 +} + +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 1024 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 64 + storage_type = "SSD" + kms_key_id = aws_kms_key.test.arn + + tags = { + Name = %[1]q + } +} +`, rName)) +} + +func testAccOpenzfsFileSystemThroughputConfig(rName string, throughput int) string { + return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 1024 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = %[2]d + + tags = { + Name = %[1]q + } +} +`, rName, throughput)) +} + +func testAccOpenzfsFileSystemStorageTypeConfig(rName, storageType string) string { + return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 1024 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + storage_type = %[2]q + + tags = { + Name = %[1]q + } +} +`, rName, storageType)) +} + +func testAccOpenzfsFileSystemRootVolumeConfig(rName, dataCompression, exportOption, readOnly string, quotaSize int) string { + return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 1024 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 64 + root_volume_configuration { + copy_tags_to_snapshots = true + data_compression_type = %[2]q + nfs_exports { + client_configurations { + clients = "10.0.1.0/24" + options = [%[3]q,"rw"] + } + } + read_only = %[4]s + user_and_group_quotas { + id = 10 + storage_capacity_quota_gib = %[5]d + type = "USER" + } + } + + tags = { + Name = %[1]q + } +} +`, rName, dataCompression, exportOption, readOnly, quotaSize)) +} + +func testAccOpenzfsFileSystemRootVolume2ClientConfig(rName, dataCompression, exportOption, readOnly string, userQuota, groupQuota int) string { + return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 1024 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 64 + root_volume_configuration { + copy_tags_to_snapshots = true + data_compression_type = %[2]q + nfs_exports { + client_configurations { + clients = "10.0.1.0/24" + options = [%[3]q,"rw"] + } + client_configurations { + clients = "*" + options = ["sync","rw"] + } + } + read_only = %[4]s + user_and_group_quotas { + id = 10 + storage_capacity_quota_gib = %[5]d + type = "USER" + } + user_and_group_quotas { + id = 20 + storage_capacity_quota_gib = %[6]d + type = "GROUP" + } + } + + tags = { + Name = %[1]q + } +} +`, rName, dataCompression, exportOption, readOnly, userQuota, groupQuota)) +} diff --git a/internal/service/fsx/sweep.go b/internal/service/fsx/sweep.go index 8e6c664359a..5aa751eb0ec 100644 --- a/internal/service/fsx/sweep.go +++ b/internal/service/fsx/sweep.go @@ -43,6 +43,11 @@ func init() { F: sweepFSXOntapVolume, }) + resource.AddTestSweepers("aws_fsx_openzfs_file_system", &resource.Sweeper{ + Name: "aws_fsx_openzfs_file_system", + F: sweepFSXOpenzfsFileSystems, + }) + resource.AddTestSweepers("aws_fsx_windows_file_system", &resource.Sweeper{ Name: "aws_fsx_windows_file_system", F: sweepFSXWindowsFileSystems, diff --git a/internal/service/fsx/wait.go b/internal/service/fsx/wait.go index 0ec842b4a98..834c5577be1 100644 --- a/internal/service/fsx/wait.go +++ b/internal/service/fsx/wait.go @@ -228,7 +228,7 @@ func waitVolumeCreated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Vo func waitVolumeUpdated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { stateConf := &resource.StateChangeConf{ Pending: []string{fsx.VolumeLifecyclePending}, - Target: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured}, + Target: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable}, Refresh: statusVolume(conn, id), Timeout: timeout, Delay: 150 * time.Second, diff --git a/website/docs/r/fsx_openzfs_file_system.html.markdown b/website/docs/r/fsx_openzfs_file_system.html.markdown new file mode 100644 index 00000000000..196a52caf39 --- /dev/null +++ b/website/docs/r/fsx_openzfs_file_system.html.markdown @@ -0,0 +1,112 @@ +--- +subcategory: "File System (FSx)" +layout: "aws" +page_title: "AWS: aws_fsx_openzfs_file_system" +description: |- + Manages an Amazon FSx for NetApp OpenZFS file system. +--- + +# Resource: aws_fsx_ontap_file_system + +Manages an Amazon FSx for NetApp OpenZFS file system. +See the [FSx OpenZFS User Guide](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/what-is-fsx.html) for more information. + +## Example Usage + +```terraform +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 64 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 64 +} +``` + +## Argument Reference + +The following arguments are supported: + +* `deployment_type` - (Required) - The filesystem deployment type. Only `SINGLE_AZ_1` is supported. +* `storage_capacity` - (Required) The storage capacity (GiB) of the file system. Valid values between `64` and `524288`. +* `subnet_ids` - (Required) A list of IDs for the subnets that the file system will be accessible from. Exactly 1 subnet need to be provided. +* `throughput_capacity` - (Required) Throughput (megabytes per second) of the file system in power of 2 increments. Minimum of `64` and maximum of `4096`. +* `automatic_backup_retention_days` - (Optional) The number of days to retain automatic backups. Setting this to 0 disables automatic backups. You can retain automatic backups for a maximum of 90 days. +* `backup_id` - (Optional) The ID of the source backup to create the filesystem from. +* `copy_tags_to_backups` - (Optional) A boolean flag indicating whether tags for the file system should be copied to backups. The default value is false. +* `copy_tags_to_volumes` - (Optional) A boolean flag indicating whether tags for the file system should be copied to snapshots. The default value is false. +* `daily_automatic_backup_start_time` - (Optional) A recurring daily time, in the format HH:MM. HH is the zero-padded hour of the day (0-23), and MM is the zero-padded minute of the hour. For example, 05:00 specifies 5 AM daily. Requires `automatic_backup_retention_days` to be set. +* `disk_iops_configuration` - (Optional) The SSD IOPS configuration for the Amazon FSx for OpenZFS file system. See [Disk Iops Configuration](#disk-iops-configuration) Below. +* `kms_key_id` - (Optional) ARN for the KMS Key to encrypt the file system at rest, Defaults to an AWS managed KMS Key. +* `root_volume_configuration` - (Optional) The configuration for the root volume of the file system. All other volumes are children or the root volume. See [Root Volume Configuration](#root-volume-configuration) Below. +* `security_group_ids` - (Optional) A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. +* `storage_type` - (Optional) The filesystem storage type. defaults to `SSD`. +* `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `weekly_maintenance_start_time` - (Optional) The preferred start time (in `d:HH:MM` format) to perform weekly maintenance, in the UTC time zone. + +### Disk Iops Configuration + +* `iops` - (Optional) - The total number of SSD IOPS provisioned for the file system. +* `mode` - (Optional) - Specifies whether the number of IOPS for the file system is using the system. Valid values are `AUTOMATIC` and `USER_PROVISIONED`. Default value is `AUTOMATIC`. + +### Root Volume Configuration +* `copy_tags_to_snapshots` - (Optional) - A boolean flag indicating whether tags for the file system should be copied to snapshots. The default value is false. +* `data_compression_type` - (Optional) - Method used to compress the data on the volume. Valid values are `NONE` or `ZSTD`. Child volumes that don't specify compession option will inherit from parent volume. This option on file system applies to the root volume. +* `nfs_exports` - (Optional) - NFS export configuration for the root volume. Exactly 1 item. See [NFS Exports](#nfs-exports) Below. +* `read_only` - (Optional) - specifies whether the volume is read-only. Default is false. +* `user_and_group_quotas` - (Optional) - Specify how much storage users or groups can use on the volume. Maximum of 100 items. See [User and Group Quotas](#user-and-group-quotas) Below. + +### NFS Exports +* `client_configurations` - (Required) - A list of configuration objects that contain the client and options for mounting the OpenZFS file system. Maximum of 25 items. See [Client Configurations](#client configurations) Below. + +### Client Configurations +* `clients` - (Required) - A value that specifies who can mount the file system. You can provide a wildcard character (*), an IP address (0.0.0.0), or a CIDR address (192.0.2.0/24. By default, Amazon FSx uses the wildcard character when specifying the client. +* `options` - (Required) - The options to use when mounting the file system. Maximum of 20 items. See the [Linix NFS exports man page](https://linux.die.net/man/5/exports) for more information. `crossmount` and `sync` are used by default. + +### User and Group Quotas +* `id` - (Required) - The ID of the user or group. Valid values between `0` and `2147483647` +* `storage_capacity_quota_gib` - (Required) - The amount of storage that the user or group can use in gibibytes (GiB). Valid values between `0` and `2147483647` +* `Type` - (Required) - A value that specifies whether the quota applies to a user or group. Valid values are `USER` or `GROUP`. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `arn` - Amazon Resource Name of the file system. +* `dns_name` - DNS name for the file system, e.g., `fs-12345678.fsx.us-west-2.amazonaws.com` +* `id` - Identifier of the file system, e.g., `fs-12345678` +* `network_interface_ids` - Set of Elastic Network Interface identifiers from which the file system is accessible The first network interface returned is the primary network interface. +* `root_volume_id` - Identifier of the root volume, e.g., `fsvol-12345678` +* `owner_id` - AWS account identifier that created the file system. +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). +* `vpc_id` - Identifier of the Virtual Private Cloud for the file system. + +## Timeouts + +`aws_fsx_openzfs_file_system` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) +configuration options: + +* `create` - (Default `60m`) How long to wait for the file system to be created. +* `update` - (Default `60m`) How long to wait for the file system to be updated. +* `delete` - (Default `60m`) How long to wait for the file system to be deleted. + +## Import + +FSx File Systems can be imported using the `id`, e.g., + +``` +$ terraform import aws_fsx_openzfs_file_system.example fs-543ab12b1ca672f33 +``` + +Certain resource arguments, like `security_group_ids`, do not have a FSx API method for reading the information after creation. If the argument is set in the Terraform configuration on an imported resource, Terraform will always show a difference. To workaround this behavior, either omit the argument from the Terraform configuration or use [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) to hide the difference, e.g., + +```terraform +resource "aws_fsx_openzfs_file_system" "example" { + # ... other configuration ... + security_group_ids = [aws_security_group.example.id] + + # There is no FSx API for reading security_group_ids + lifecycle { + ignore_changes = [security_group_ids] + } +} +``` From 79152dca6c5c4c55b8949ca6c412f231265af9a7 Mon Sep 17 00:00:00 2001 From: awsaxeman <34073510+awsaxeman@users.noreply.github.com> Date: Sun, 19 Dec 2021 22:54:14 -0500 Subject: [PATCH 03/12] fix formating, testing, and nfs_exports --- internal/service/fsx/openzfs_file_system.go | 29 +-- .../service/fsx/openzfs_file_system_test.go | 194 +++++++++++------- internal/service/fsx/sweep.go | 48 +++++ internal/service/fsx/wait.go | 2 +- .../r/fsx_openzfs_file_system.html.markdown | 10 +- 5 files changed, 179 insertions(+), 104 deletions(-) diff --git a/internal/service/fsx/openzfs_file_system.go b/internal/service/fsx/openzfs_file_system.go index 1882c5e0dbf..9ee07650a7f 100644 --- a/internal/service/fsx/openzfs_file_system.go +++ b/internal/service/fsx/openzfs_file_system.go @@ -121,7 +121,7 @@ func ResourceOpenzfsFileSystem() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "client_configurations": { - Type: schema.TypeList, + Type: schema.TypeSet, Required: true, MaxItems: 25, Elem: &schema.Resource{ @@ -273,7 +273,8 @@ func resourceOepnzfsFileSystemCreate(d *schema.ResourceData, meta interface{}) e StorageType: aws.String(d.Get("storage_type").(string)), SubnetIds: flex.ExpandStringList(d.Get("subnet_ids").([]interface{})), OpenZFSConfiguration: &fsx.CreateFileSystemOpenZFSConfiguration{ - DeploymentType: aws.String(d.Get("deployment_type").(string)), + DeploymentType: aws.String(d.Get("deployment_type").(string)), + AutomaticBackupRetentionDays: aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))), }, } @@ -282,7 +283,8 @@ func resourceOepnzfsFileSystemCreate(d *schema.ResourceData, meta interface{}) e StorageType: aws.String(d.Get("storage_type").(string)), SubnetIds: flex.ExpandStringList(d.Get("subnet_ids").([]interface{})), OpenZFSConfiguration: &fsx.CreateFileSystemOpenZFSConfiguration{ - DeploymentType: aws.String(d.Get("deployment_type").(string)), + DeploymentType: aws.String(d.Get("deployment_type").(string)), + AutomaticBackupRetentionDays: aws.Int64(int64(d.Get("automatic_backup_retention_days").(int))), }, } @@ -301,11 +303,6 @@ func resourceOepnzfsFileSystemCreate(d *schema.ResourceData, meta interface{}) e backupInput.KmsKeyId = aws.String(v.(string)) } - if v, ok := d.GetOk("automatic_backup_retention_days"); ok { - input.OpenZFSConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(v.(int))) - backupInput.OpenZFSConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(v.(int))) - } - if v, ok := d.GetOk("daily_automatic_backup_start_time"); ok { input.OpenZFSConfiguration.DailyAutomaticBackupStartTime = aws.String(v.(string)) backupInput.OpenZFSConfiguration.DailyAutomaticBackupStartTime = aws.String(v.(string)) @@ -588,8 +585,6 @@ func expandFsxOpenzfsRootVolumeConfiguration(cfg []interface{}) *fsx.OpenZFSCrea return nil } - log.Printf("[WARN] Root Volume Info (%v) ", cfg) - conf := cfg[0].(map[string]interface{}) out := fsx.OpenZFSCreateRootVolumeConfiguration{} @@ -685,8 +680,6 @@ func expandFsxOpenzfsUserAndGroupQuota(conf map[string]interface{}) *fsx.OpenZFS func expandFsxOpenzfsNfsExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { exports := []*fsx.OpenZFSNfsExport{} - log.Printf("[WARN] NFS Info (%v) ", cfg) - for _, export := range cfg { expandedExport := expandFsxOpenzfsNfsExport(export.(map[string]interface{})) if expandedExport != nil { @@ -698,13 +691,11 @@ func expandFsxOpenzfsNfsExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { } -func expandFsxOpenzfsNfsExport(conf map[string]interface{}) *fsx.OpenZFSNfsExport { +func expandFsxOpenzfsNfsExport(cfg map[string]interface{}) *fsx.OpenZFSNfsExport { out := fsx.OpenZFSNfsExport{} - log.Printf("[DEBUG] NFS Export Info (%v) ", conf) - - if v, ok := conf["client_configurations"].([]interface{}); ok { - out.ClientConfigurations = expandFsxOpenzfsClinetConfigurations(v) + if v, ok := cfg["client_configurations"]; ok { + out.ClientConfigurations = expandFsxOpenzfsClinetConfigurations(v.(*schema.Set).List()) } return &out @@ -713,8 +704,6 @@ func expandFsxOpenzfsNfsExport(conf map[string]interface{}) *fsx.OpenZFSNfsExpor func expandFsxOpenzfsClinetConfigurations(cfg []interface{}) []*fsx.OpenZFSClientConfiguration { configurations := []*fsx.OpenZFSClientConfiguration{} - log.Printf("[DEBUG] Client Configs (%v) ", cfg) - for _, configuration := range cfg { expandedConfiguration := expandFsxOpenzfsClientConfiguration(configuration.(map[string]interface{})) if expandedConfiguration != nil { @@ -729,8 +718,6 @@ func expandFsxOpenzfsClinetConfigurations(cfg []interface{}) []*fsx.OpenZFSClien func expandFsxOpenzfsClientConfiguration(conf map[string]interface{}) *fsx.OpenZFSClientConfiguration { out := fsx.OpenZFSClientConfiguration{} - log.Printf("[DEBUG] Client Config (%v) ", conf) - if v, ok := conf["clients"].(string); ok && len(v) > 0 { out.Clients = aws.String(v) } diff --git a/internal/service/fsx/openzfs_file_system_test.go b/internal/service/fsx/openzfs_file_system_test.go index 12ea935d8c8..4fc42e16101 100644 --- a/internal/service/fsx/openzfs_file_system_test.go +++ b/internal/service/fsx/openzfs_file_system_test.go @@ -136,7 +136,7 @@ func TestAccFSxOpenzfsFileSystem_rootVolume(t *testing.T) { CheckDestroy: testAccCheckFsxOpenzfsFileSystemDestroy, Steps: []resource.TestStep{ { - Config: testAccOpenzfsFileSystemRootVolumeConfig(rName, "NONE", "sync", "false", 128), + Config: testAccOpenzfsFileSystemRootVolume1Config(rName, "NONE", "false", 128), Check: resource.ComposeTestCheckFunc( testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem1), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.#", "1"), @@ -161,7 +161,7 @@ func TestAccFSxOpenzfsFileSystem_rootVolume(t *testing.T) { ImportStateVerifyIgnore: []string{"security_group_ids"}, }, { - Config: testAccOpenzfsFileSystemRootVolumeConfig(rName, "ZSTD", "async", "true", 256), + Config: testAccOpenzfsFileSystemRootVolume2Config(rName, "ZSTD", "true", 256), Check: resource.ComposeTestCheckFunc( testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem2), testAccCheckFsxOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem2), @@ -181,7 +181,7 @@ func TestAccFSxOpenzfsFileSystem_rootVolume(t *testing.T) { ), }, { - Config: testAccOpenzfsFileSystemRootVolume2ClientConfig(rName, "NONE", "async", "false", 128, 1024), + Config: testAccOpenzfsFileSystemRootVolume3ClientConfig(rName, "NONE", "false", 128, 1024), Check: resource.ComposeTestCheckFunc( testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem3), testAccCheckFsxOpenzfsFileSystemNotRecreated(&filesystem1, &filesystem3), @@ -200,13 +200,19 @@ func TestAccFSxOpenzfsFileSystem_rootVolume(t *testing.T) { "options.1": "rw", }), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.read_only", "false"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "2"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "4"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.id", "10"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.storage_capacity_quota_gib", "128"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.type", "USER"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.1.id", "20"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.1.storage_capacity_quota_gib", "1024"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.1.type", "GROUP"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.2.id", "5"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.2.storage_capacity_quota_gib", "1024"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.2.type", "GROUP"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.3.id", "100"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.3.storage_capacity_quota_gib", "128"), + resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.3.type", "USER"), ), }, }, @@ -375,7 +381,7 @@ func TestAccFSxOpenzfsFileSystem_throughput(t *testing.T) { } func TestAccFSxOpenzfsFileSystem_storageType(t *testing.T) { - var filesystem1, filesystem2 fsx.FileSystem + var filesystem1 fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -386,10 +392,10 @@ func TestAccFSxOpenzfsFileSystem_storageType(t *testing.T) { CheckDestroy: testAccCheckFsxOpenzfsFileSystemDestroy, Steps: []resource.TestStep{ { - Config: testAccOpenzfsFileSystemStorageTypeConfig(rName, "HDD"), + Config: testAccOpenzfsFileSystemStorageTypeConfig(rName, "SSD"), Check: resource.ComposeTestCheckFunc( testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem1), - resource.TestCheckResourceAttr(resourceName, "storage_capacity", "HDD"), + resource.TestCheckResourceAttr(resourceName, "storage_type", "SSD"), ), }, { @@ -398,14 +404,6 @@ func TestAccFSxOpenzfsFileSystem_storageType(t *testing.T) { ImportStateVerify: true, ImportStateVerifyIgnore: []string{"security_group_ids"}, }, - { - Config: testAccOpenzfsFileSystemStorageTypeConfig(rName, "SSD"), - Check: resource.ComposeTestCheckFunc( - testAccCheckFsxOpenzfsFileSystemExists(resourceName, &filesystem2), - testAccCheckFsxOpenzfsFileSystemRecreated(&filesystem1, &filesystem2), - resource.TestCheckResourceAttr(resourceName, "storage_capacity", "SSD"), - ), - }, }, }) } @@ -662,8 +660,8 @@ resource "aws_fsx_openzfs_file_system" "test" { deployment_type = "SINGLE_AZ_1" throughput_capacity = 64 disk_iops_configuration { - mode = "USER_PROVISIONED" - iops = 3072 + mode = "USER_PROVISIONED" + iops = 3072 } } `) @@ -696,7 +694,7 @@ resource "aws_security_group" "test1" { resource "aws_fsx_openzfs_file_system" "test" { security_group_ids = [aws_security_group.test1.id] - storage_capacity = 1024 + storage_capacity = 64 subnet_ids = [aws_subnet.test1.id] deployment_type = "SINGLE_AZ_1" throughput_capacity = 512 @@ -758,8 +756,8 @@ resource "aws_security_group" "test2" { } resource "aws_fsx_openzfs_file_system" "test" { - security_group_ids = [aws_security_group.test1.id] - storage_capacity = 1024 + security_group_ids = [aws_security_group.test1.id, aws_security_group.test2.id] + storage_capacity = 64 subnet_ids = [aws_subnet.test1.id] deployment_type = "SINGLE_AZ_1" throughput_capacity = 512 @@ -775,7 +773,7 @@ resource "aws_fsx_openzfs_file_system" "test" { func testAccOpenzfsFileSystemTags1Config(rName, tagKey1, tagValue1 string) string { return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { - storage_capacity = 1024 + storage_capacity = 64 subnet_ids = [aws_subnet.test1.id] deployment_type = "SINGLE_AZ_1" throughput_capacity = 512 @@ -791,7 +789,7 @@ resource "aws_fsx_openzfs_file_system" "test" { func testAccOpenzfsFileSystemTags2Config(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { - storage_capacity = 1024 + storage_capacity = 64 subnet_ids = [aws_subnet.test1.id] deployment_type = "SINGLE_AZ_1" throughput_capacity = 512 @@ -808,11 +806,11 @@ resource "aws_fsx_openzfs_file_system" "test" { func testAccOpenzfsFileSystemCopyTagsConfig(rName, tagKey1, tagValue1, copyTags string) string { return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { - storage_capacity = 1024 - subnet_ids = [aws_subnet.test1.id] - deployment_type = "SINGLE_AZ_1" - throughput_capacity = 512 - storage_type = "SSD" + storage_capacity = 64 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 512 + storage_type = "SSD" copy_tags_to_backups = %[3]s copy_tags_to_volumes = %[3]s @@ -826,7 +824,7 @@ resource "aws_fsx_openzfs_file_system" "test" { func testAccOpenzfsFileSystemWeeklyMaintenanceStartTimeConfig(rName, weeklyMaintenanceStartTime string) string { return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { - storage_capacity = 1024 + storage_capacity = 64 subnet_ids = [aws_subnet.test1.id] deployment_type = "SINGLE_AZ_1" throughput_capacity = 512 @@ -843,7 +841,7 @@ resource "aws_fsx_openzfs_file_system" "test" { func testAccOpenzfsFileSystemDailyAutomaticBackupStartTimeConfig(rName, dailyAutomaticBackupStartTime string) string { return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { - storage_capacity = 1024 + storage_capacity = 64 subnet_ids = [aws_subnet.test1.id] deployment_type = "SINGLE_AZ_1" throughput_capacity = 512 @@ -861,11 +859,11 @@ resource "aws_fsx_openzfs_file_system" "test" { func testAccOpenzfsFileSystemAutomaticBackupRetentionDaysConfig(rName string, retention int) string { return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { - storage_capacity = 1024 + storage_capacity = 64 subnet_ids = [aws_subnet.test1.id] deployment_type = "SINGLE_AZ_1" throughput_capacity = 512 - storage_type = "SSD" + storage_type = "SSD" automatic_backup_retention_days = %[2]d tags = { @@ -883,7 +881,7 @@ resource "aws_kms_key" "test" { } resource "aws_fsx_openzfs_file_system" "test" { - storage_capacity = 1024 + storage_capacity = 64 subnet_ids = [aws_subnet.test1.id] deployment_type = "SINGLE_AZ_1" throughput_capacity = 64 @@ -900,7 +898,7 @@ resource "aws_fsx_openzfs_file_system" "test" { func testAccOpenzfsFileSystemThroughputConfig(rName string, throughput int) string { return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { - storage_capacity = 1024 + storage_capacity = 64 subnet_ids = [aws_subnet.test1.id] deployment_type = "SINGLE_AZ_1" throughput_capacity = %[2]d @@ -915,10 +913,11 @@ resource "aws_fsx_openzfs_file_system" "test" { func testAccOpenzfsFileSystemStorageTypeConfig(rName, storageType string) string { return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { - storage_capacity = 1024 + storage_capacity = 64 subnet_ids = [aws_subnet.test1.id] deployment_type = "SINGLE_AZ_1" storage_type = %[2]q + throughput_capacity = 64 tags = { Name = %[1]q @@ -927,73 +926,114 @@ resource "aws_fsx_openzfs_file_system" "test" { `, rName, storageType)) } -func testAccOpenzfsFileSystemRootVolumeConfig(rName, dataCompression, exportOption, readOnly string, quotaSize int) string { +func testAccOpenzfsFileSystemRootVolume1Config(rName, dataCompression, readOnly string, quotaSize int) string { return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { - storage_capacity = 1024 + storage_capacity = 64 subnet_ids = [aws_subnet.test1.id] deployment_type = "SINGLE_AZ_1" throughput_capacity = 64 root_volume_configuration { - copy_tags_to_snapshots = true - data_compression_type = %[2]q - nfs_exports { - client_configurations { - clients = "10.0.1.0/24" - options = [%[3]q,"rw"] - } - } - read_only = %[4]s - user_and_group_quotas { - id = 10 - storage_capacity_quota_gib = %[5]d - type = "USER" - } + copy_tags_to_snapshots = true + data_compression_type = %[2]q + nfs_exports { + client_configurations { + clients = "10.0.1.0/24" + options = ["sync", "rw"] + } + } + read_only = %[3]s + user_and_group_quotas { + id = 10 + storage_capacity_quota_gib = %[4]d + type = "USER" + } } tags = { Name = %[1]q } } -`, rName, dataCompression, exportOption, readOnly, quotaSize)) +`, rName, dataCompression, readOnly, quotaSize)) } -func testAccOpenzfsFileSystemRootVolume2ClientConfig(rName, dataCompression, exportOption, readOnly string, userQuota, groupQuota int) string { +func testAccOpenzfsFileSystemRootVolume2Config(rName, dataCompression, readOnly string, quotaSize int) string { return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` resource "aws_fsx_openzfs_file_system" "test" { - storage_capacity = 1024 + storage_capacity = 64 subnet_ids = [aws_subnet.test1.id] deployment_type = "SINGLE_AZ_1" throughput_capacity = 64 root_volume_configuration { - copy_tags_to_snapshots = true - data_compression_type = %[2]q - nfs_exports { - client_configurations { - clients = "10.0.1.0/24" - options = [%[3]q,"rw"] - } - client_configurations { - clients = "*" - options = ["sync","rw"] - } - } - read_only = %[4]s - user_and_group_quotas { - id = 10 - storage_capacity_quota_gib = %[5]d - type = "USER" - } - user_and_group_quotas { - id = 20 - storage_capacity_quota_gib = %[6]d - type = "GROUP" - } + copy_tags_to_snapshots = true + data_compression_type = %[2]q + nfs_exports { + client_configurations { + clients = "10.0.1.0/24" + options = ["async", "rw"] + } + } + read_only = %[3]s + user_and_group_quotas { + id = 10 + storage_capacity_quota_gib = %[4]d + type = "USER" + } + } + + tags = { + Name = %[1]q + } +} +`, rName, dataCompression, readOnly, quotaSize)) +} + +func testAccOpenzfsFileSystemRootVolume3ClientConfig(rName, dataCompression, readOnly string, userQuota, groupQuota int) string { + return acctest.ConfigCompose(testAccOpenzfsFileSystemBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 64 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 64 + root_volume_configuration { + copy_tags_to_snapshots = true + data_compression_type = %[2]q + nfs_exports { + client_configurations { + clients = "10.0.1.0/24" + options = ["async", "rw"] + } + client_configurations { + clients = "*" + options = ["sync", "rw"] + } + } + read_only = %[3]s + user_and_group_quotas { + id = 10 + storage_capacity_quota_gib = %[4]d + type = "USER" + } + user_and_group_quotas { + id = 20 + storage_capacity_quota_gib = %[5]d + type = "GROUP" + } + user_and_group_quotas { + id = 5 + storage_capacity_quota_gib = %[5]d + type = "GROUP" + } + user_and_group_quotas { + id = 100 + storage_capacity_quota_gib = %[4]d + type = "USER" + } } tags = { Name = %[1]q } } -`, rName, dataCompression, exportOption, readOnly, userQuota, groupQuota)) +`, rName, dataCompression, readOnly, userQuota, groupQuota)) } diff --git a/internal/service/fsx/sweep.go b/internal/service/fsx/sweep.go index 5aa751eb0ec..5eebb79b609 100644 --- a/internal/service/fsx/sweep.go +++ b/internal/service/fsx/sweep.go @@ -289,6 +289,54 @@ func sweepFSXOntapVolume(region string) error { return errs.ErrorOrNil() } +func sweepFSXOpenzfsFileSystems(region string) error { + client, err := sweep.SharedRegionalSweepClient(region) + + if err != nil { + return fmt.Errorf("error getting client: %w", err) + } + + conn := client.(*conns.AWSClient).FSxConn + sweepResources := make([]*sweep.SweepResource, 0) + var errs *multierror.Error + input := &fsx.DescribeFileSystemsInput{} + + err = conn.DescribeFileSystemsPages(input, func(page *fsx.DescribeFileSystemsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, fs := range page.FileSystems { + if aws.StringValue(fs.FileSystemType) != fsx.FileSystemTypeOpenZFS { + continue + } + + r := ResourceOpenzfsFileSystem() + d := r.Data(nil) + d.SetId(aws.StringValue(fs.FileSystemId)) + + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + } + + return !lastPage + }) + + if err != nil { + errs = multierror.Append(errs, fmt.Errorf("error listing FSx OpenZFS File Systems for %s: %w", region, err)) + } + + if err = sweep.SweepOrchestrator(sweepResources); err != nil { + errs = multierror.Append(errs, fmt.Errorf("error sweeping FSx OpenZFS File Systems for %s: %w", region, err)) + } + + if sweep.SkipSweepError(errs.ErrorOrNil()) { + log.Printf("[WARN] Skipping FSx OpenZFS File System sweep for %s: %s", region, errs) + return nil + } + + return errs.ErrorOrNil() +} + func sweepFSXWindowsFileSystems(region string) error { client, err := sweep.SharedRegionalSweepClient(region) diff --git a/internal/service/fsx/wait.go b/internal/service/fsx/wait.go index 834c5577be1..198ef6d7429 100644 --- a/internal/service/fsx/wait.go +++ b/internal/service/fsx/wait.go @@ -225,7 +225,7 @@ func waitVolumeCreated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Vo return nil, err } -func waitVolumeUpdated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { +func waitVolumeUpdated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam stateConf := &resource.StateChangeConf{ Pending: []string{fsx.VolumeLifecyclePending}, Target: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable}, diff --git a/website/docs/r/fsx_openzfs_file_system.html.markdown b/website/docs/r/fsx_openzfs_file_system.html.markdown index 196a52caf39..0c82c50c500 100644 --- a/website/docs/r/fsx_openzfs_file_system.html.markdown +++ b/website/docs/r/fsx_openzfs_file_system.html.markdown @@ -3,12 +3,12 @@ subcategory: "File System (FSx)" layout: "aws" page_title: "AWS: aws_fsx_openzfs_file_system" description: |- - Manages an Amazon FSx for NetApp OpenZFS file system. + Manages an Amazon FSx for OpenZFS file system. --- -# Resource: aws_fsx_ontap_file_system +# Resource: aws_fsx_openzfs_file_system -Manages an Amazon FSx for NetApp OpenZFS file system. +Manages an Amazon FSx for OpenZFS file system. See the [FSx OpenZFS User Guide](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/what-is-fsx.html) for more information. ## Example Usage @@ -39,7 +39,7 @@ The following arguments are supported: * `kms_key_id` - (Optional) ARN for the KMS Key to encrypt the file system at rest, Defaults to an AWS managed KMS Key. * `root_volume_configuration` - (Optional) The configuration for the root volume of the file system. All other volumes are children or the root volume. See [Root Volume Configuration](#root-volume-configuration) Below. * `security_group_ids` - (Optional) A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. -* `storage_type` - (Optional) The filesystem storage type. defaults to `SSD`. +* `storage_type` - (Optional) The filesystem storage type. Only `SSD` is supported. * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `weekly_maintenance_start_time` - (Optional) The preferred start time (in `d:HH:MM` format) to perform weekly maintenance, in the UTC time zone. @@ -50,7 +50,7 @@ The following arguments are supported: ### Root Volume Configuration * `copy_tags_to_snapshots` - (Optional) - A boolean flag indicating whether tags for the file system should be copied to snapshots. The default value is false. -* `data_compression_type` - (Optional) - Method used to compress the data on the volume. Valid values are `NONE` or `ZSTD`. Child volumes that don't specify compession option will inherit from parent volume. This option on file system applies to the root volume. +* `data_compression_type` - (Optional) - Method used to compress the data on the volume. Valid values are `NONE` or `ZSTD`. Child volumes that don't specify compression option will inherit from parent volume. This option on file system applies to the root volume. * `nfs_exports` - (Optional) - NFS export configuration for the root volume. Exactly 1 item. See [NFS Exports](#nfs-exports) Below. * `read_only` - (Optional) - specifies whether the volume is read-only. Default is false. * `user_and_group_quotas` - (Optional) - Specify how much storage users or groups can use on the volume. Maximum of 100 items. See [User and Group Quotas](#user-and-group-quotas) Below. From f43691c5b4c2dac6498271005d071b9cc9995f7b Mon Sep 17 00:00:00 2001 From: awsaxeman <34073510+awsaxeman@users.noreply.github.com> Date: Mon, 20 Dec 2021 00:14:49 -0500 Subject: [PATCH 04/12] fix errors --- internal/service/fsx/openzfs_file_system_test.go | 2 +- internal/service/fsx/sweep.go | 2 +- website/docs/r/fsx_openzfs_file_system.html.markdown | 10 +++++++--- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/internal/service/fsx/openzfs_file_system_test.go b/internal/service/fsx/openzfs_file_system_test.go index 4fc42e16101..9f8a600e806 100644 --- a/internal/service/fsx/openzfs_file_system_test.go +++ b/internal/service/fsx/openzfs_file_system_test.go @@ -408,7 +408,7 @@ func TestAccFSxOpenzfsFileSystem_storageType(t *testing.T) { }) } -func TestAccFSxOnpenzfsFileSystem_weeklyMaintenanceStartTime(t *testing.T) { +func TestAccFSxOpenzfsFileSystem_weeklyMaintenanceStartTime(t *testing.T) { var filesystem1, filesystem2 fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) diff --git a/internal/service/fsx/sweep.go b/internal/service/fsx/sweep.go index 5eebb79b609..bed57fc16eb 100644 --- a/internal/service/fsx/sweep.go +++ b/internal/service/fsx/sweep.go @@ -307,7 +307,7 @@ func sweepFSXOpenzfsFileSystems(region string) error { } for _, fs := range page.FileSystems { - if aws.StringValue(fs.FileSystemType) != fsx.FileSystemTypeOpenZFS { + if aws.StringValue(fs.FileSystemType) != fsx.FileSystemTypeOpenzfs { continue } diff --git a/website/docs/r/fsx_openzfs_file_system.html.markdown b/website/docs/r/fsx_openzfs_file_system.html.markdown index 0c82c50c500..e780ddc1bfc 100644 --- a/website/docs/r/fsx_openzfs_file_system.html.markdown +++ b/website/docs/r/fsx_openzfs_file_system.html.markdown @@ -39,7 +39,7 @@ The following arguments are supported: * `kms_key_id` - (Optional) ARN for the KMS Key to encrypt the file system at rest, Defaults to an AWS managed KMS Key. * `root_volume_configuration` - (Optional) The configuration for the root volume of the file system. All other volumes are children or the root volume. See [Root Volume Configuration](#root-volume-configuration) Below. * `security_group_ids` - (Optional) A list of IDs for the security groups that apply to the specified network interfaces created for file system access. These security groups will apply to all network interfaces. -* `storage_type` - (Optional) The filesystem storage type. Only `SSD` is supported. +* `storage_type` - (Optional) The filesystem storage type. Only `SSD` is supported. * `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `weekly_maintenance_start_time` - (Optional) The preferred start time (in `d:HH:MM` format) to perform weekly maintenance, in the UTC time zone. @@ -49,20 +49,24 @@ The following arguments are supported: * `mode` - (Optional) - Specifies whether the number of IOPS for the file system is using the system. Valid values are `AUTOMATIC` and `USER_PROVISIONED`. Default value is `AUTOMATIC`. ### Root Volume Configuration + * `copy_tags_to_snapshots` - (Optional) - A boolean flag indicating whether tags for the file system should be copied to snapshots. The default value is false. -* `data_compression_type` - (Optional) - Method used to compress the data on the volume. Valid values are `NONE` or `ZSTD`. Child volumes that don't specify compression option will inherit from parent volume. This option on file system applies to the root volume. +* `data_compression_type` - (Optional) - Method used to compress the data on the volume. Valid values are `NONE` or `ZSTD`. Child volumes that don't specify compression option will inherit from parent volume. This option on file system applies to the root volume. * `nfs_exports` - (Optional) - NFS export configuration for the root volume. Exactly 1 item. See [NFS Exports](#nfs-exports) Below. * `read_only` - (Optional) - specifies whether the volume is read-only. Default is false. * `user_and_group_quotas` - (Optional) - Specify how much storage users or groups can use on the volume. Maximum of 100 items. See [User and Group Quotas](#user-and-group-quotas) Below. ### NFS Exports + * `client_configurations` - (Required) - A list of configuration objects that contain the client and options for mounting the OpenZFS file system. Maximum of 25 items. See [Client Configurations](#client configurations) Below. ### Client Configurations + * `clients` - (Required) - A value that specifies who can mount the file system. You can provide a wildcard character (*), an IP address (0.0.0.0), or a CIDR address (192.0.2.0/24. By default, Amazon FSx uses the wildcard character when specifying the client. -* `options` - (Required) - The options to use when mounting the file system. Maximum of 20 items. See the [Linix NFS exports man page](https://linux.die.net/man/5/exports) for more information. `crossmount` and `sync` are used by default. +* `options` - (Required) - The options to use when mounting the file system. Maximum of 20 items. See the [Linix NFS exports man page](https://linux.die.net/man/5/exports) for more information. `crossmount` and `sync` are used by default. ### User and Group Quotas + * `id` - (Required) - The ID of the user or group. Valid values between `0` and `2147483647` * `storage_capacity_quota_gib` - (Required) - The amount of storage that the user or group can use in gibibytes (GiB). Valid values between `0` and `2147483647` * `Type` - (Required) - A value that specifies whether the quota applies to a user or group. Valid values are `USER` or `GROUP`. From 091df9d7caea8e89de6a188a6df76bbc66bcbb50 Mon Sep 17 00:00:00 2001 From: awsaxeman <34073510+awsaxeman@users.noreply.github.com> Date: Wed, 29 Dec 2021 00:21:53 -0500 Subject: [PATCH 05/12] added openzfs volume --- .changelog/22234.txt | 4 + internal/provider/provider.go | 1 + internal/service/fsx/openzfs_volume.go | 565 ++++++++++++++ internal/service/fsx/openzfs_volume_test.go | 702 ++++++++++++++++++ internal/service/fsx/sweep.go | 56 ++ internal/service/fsx/wait.go | 4 +- .../docs/r/fsx_openzfs_volume.html.markdown | 77 ++ 7 files changed, 1407 insertions(+), 2 deletions(-) create mode 100644 internal/service/fsx/openzfs_volume.go create mode 100644 internal/service/fsx/openzfs_volume_test.go create mode 100644 website/docs/r/fsx_openzfs_volume.html.markdown diff --git a/.changelog/22234.txt b/.changelog/22234.txt index 8d43ac42edd..721da0fa251 100644 --- a/.changelog/22234.txt +++ b/.changelog/22234.txt @@ -1,3 +1,7 @@ ```release-note:new-resource aws_fsx_openzfs_file_system +``` + +```release-note:new-resource +aws_fsx_openzfs_volume ``` \ No newline at end of file diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 4c952674431..9961f0449bd 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -1219,6 +1219,7 @@ func Provider() *schema.Provider { "aws_fsx_ontap_storage_virtual_machine": fsx.ResourceOntapStorageVirtualMachine(), "aws_fsx_ontap_volume": fsx.ResourceOntapVolume(), "aws_fsx_openzfs_file_system": fsx.ResourceOpenzfsFileSystem(), + "aws_fsx_openzfs_volume": fsx.ResourceOpenzfsVolume(), "aws_fsx_windows_file_system": fsx.ResourceWindowsFileSystem(), "aws_gamelift_alias": gamelift.ResourceAlias(), diff --git a/internal/service/fsx/openzfs_volume.go b/internal/service/fsx/openzfs_volume.go new file mode 100644 index 00000000000..5700e6b481c --- /dev/null +++ b/internal/service/fsx/openzfs_volume.go @@ -0,0 +1,565 @@ +package fsx + +import ( + "fmt" + "log" + "regexp" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" +) + +func ResourceOpenzfsVolume() *schema.Resource { + return &schema.Resource{ + Create: resourceOepnzfsVolumeCreate, + Read: resourceOpenzfsVolumeRead, + Update: resourceOpenzfsVolumeUpdate, + Delete: resourceOpenzfsVolumeDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "copy_tags_to_snapshots": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "data_compression_type": { + Type: schema.TypeString, + Optional: true, + Default: "NONE", + ValidateFunc: validation.StringInSlice(fsx.OpenZFSDataCompressionType_Values(), false), + }, + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 203), + }, + "nfs_exports": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "client_configurations": { + Type: schema.TypeSet, + Required: true, + MaxItems: 25, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "clients": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(1, 128), + validation.StringMatch(regexp.MustCompile(`^[ -~]{1,128}$`), "must be either IP Address or CIDR"), + ), + }, + "options": { + Type: schema.TypeList, + Required: true, + MinItems: 1, + MaxItems: 20, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(1, 128), + }, + }, + }, + }, + }, + }, + }, + }, + "origin_snapshot": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "copy_strategy": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(fsx.OpenZFSCopyStrategy_Values(), false), + }, + "snapshot_arn": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(8, 512), + validation.StringMatch(regexp.MustCompile(`^arn:.*`), "must specify the full ARN of the snapshot"), + ), + }, + }, + }, + }, + "parent_volume_id": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.All( + validation.StringLenBetween(23, 23), + validation.StringMatch(regexp.MustCompile(`^(fsvol-[0-9a-f]{17,})$`), "must specify a filesystem id i.e. fs-12345678"), + ), + }, + "read_only": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "storage_capacity_quota_gib": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(0, 2147483647), + }, + "storage_capacity_reservation_gib": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validation.IntBetween(0, 2147483647), + }, + "user_and_group_quotas": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 100, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 2147483647), + }, + "storage_capacity_quota_gib": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validation.IntBetween(0, 2147483647), + }, + "type": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(fsx.OpenZFSQuotaType_Values(), false), + }, + }, + }, + }, + "tags": tftags.TagsSchema(), + "tags_all": tftags.TagsSchemaComputed(), + "volume_type": { + Type: schema.TypeString, + Default: fsx.VolumeTypeOpenzfs, + Optional: true, + ValidateFunc: validation.StringInSlice(fsx.VolumeType_Values(), false), + }, + }, + + CustomizeDiff: verify.SetTagsDiff, + } +} + +func resourceOepnzfsVolumeCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).FSxConn + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + tags := defaultTagsConfig.MergeTags(tftags.New(d.Get("tags").(map[string]interface{}))) + + input := &fsx.CreateVolumeInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + Name: aws.String(d.Get("name").(string)), + VolumeType: aws.String(d.Get("volume_type").(string)), + OpenZFSConfiguration: &fsx.CreateOpenZFSVolumeConfiguration{ + ParentVolumeId: aws.String(d.Get("parent_volume_id").(string)), + }, + } + + if v, ok := d.GetOk("copy_tags_to_snapshots"); ok { + input.OpenZFSConfiguration.CopyTagsToSnapshots = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("data_compression_type"); ok { + input.OpenZFSConfiguration.DataCompressionType = aws.String(v.(string)) + } + + if v, ok := d.GetOk("nfs_exports"); ok { + input.OpenZFSConfiguration.NfsExports = expandFsxOpenzfsVolumeNfsExports(v.([]interface{})) + } + + if v, ok := d.GetOk("read_only"); ok { + input.OpenZFSConfiguration.ReadOnly = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("storage_capacity_quota_gib"); ok { + input.OpenZFSConfiguration.StorageCapacityQuotaGiB = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("storage_capacity_reservation_gib"); ok { + input.OpenZFSConfiguration.StorageCapacityReservationGiB = aws.Int64(int64(v.(int))) + } + + if len(tags) > 0 { + input.Tags = Tags(tags.IgnoreAWS()) + } + + if v, ok := d.GetOk("user_and_group_quotas"); ok { + input.OpenZFSConfiguration.UserAndGroupQuotas = expandFsxOpenzfsVolumeUserAndGroupQuotas(v.([]interface{})) + } + + if v, ok := d.GetOk("origin_snapshot"); ok { + input.OpenZFSConfiguration.OriginSnapshot = expandFsxOpenzfsCreateVolumeOriginSnapshot(v.([]interface{})) + + log.Printf("[DEBUG] Creating FSx OpenZFS Volume: %s", input) + result, err := conn.CreateVolume(input) + + if err != nil { + return fmt.Errorf("error creating FSx OpenZFS Volume from snapshot: %w", err) + } + + d.SetId(aws.StringValue(result.Volume.VolumeId)) + } else { + log.Printf("[DEBUG] Creating FSx OpenZFS Volume: %s", input) + result, err := conn.CreateVolume(input) + + if err != nil { + return fmt.Errorf("error creating FSx OpenZFS Volume: %w", err) + } + + d.SetId(aws.StringValue(result.Volume.VolumeId)) + } + + if _, err := waitVolumeCreated(conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return fmt.Errorf("error waiting for FSx OpenZFS Volume(%s) create: %w", d.Id(), err) + } + + return resourceOpenzfsVolumeRead(d, meta) +} + +func resourceOpenzfsVolumeRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).FSxConn + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + + volume, err := FindVolumeByID(conn, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] FSx OpenZFS volume (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading FSx OpenZFS Volume (%s): %w", d.Id(), err) + } + + openzfsConfig := volume.OpenZFSConfiguration + + if volume.OntapConfiguration != nil { + return fmt.Errorf("expected FSx OpeZFS Volume, found FSx ONTAP Volume: %s", d.Id()) + } + + if openzfsConfig == nil { + return fmt.Errorf("error describing FSx OpenZFS Volume (%s): empty Openzfs configuration", d.Id()) + } + + d.Set("arn", volume.ResourceARN) + d.Set("copy_tags_to_snapshots", openzfsConfig.CopyTagsToSnapshots) + d.Set("data_compression_type", openzfsConfig.DataCompressionType) + d.Set("name", volume.Name) + d.Set("origin_snapshot", openzfsConfig.OriginSnapshot) + d.Set("parent_volume_id", openzfsConfig.ParentVolumeId) + d.Set("read_only", openzfsConfig.ReadOnly) + d.Set("storage_capacity_quota_gib", openzfsConfig.StorageCapacityQuotaGiB) + d.Set("storage_capacity_reservation_gib", openzfsConfig.StorageCapacityReservationGiB) + d.Set("volume_type", volume.VolumeType) + + //Volume tags do not get returned with describe call so need to make a separate list tags call + tags, tagserr := ListTags(conn, *volume.ResourceARN) + + if tagserr != nil { + return fmt.Errorf("error reading Tags for FSx OpenZFS Volume (%s): %w", d.Id(), err) + } else { + tags = tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig) + } + + //lintignore:AWSR002 + if err := d.Set("tags", tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %w", err) + } + + if err := d.Set("tags_all", tags.Map()); err != nil { + return fmt.Errorf("error setting tags_all: %w", err) + } + + if err := d.Set("nfs_exports", flattenFsxOpenzfsFileNfsExports(openzfsConfig.NfsExports)); err != nil { + return fmt.Errorf("error setting nfs_exports: %w", err) + } + + if err := d.Set("user_and_group_quotas", flattenFsxOpenzfsFileUserAndGroupQuotas(openzfsConfig.UserAndGroupQuotas)); err != nil { + return fmt.Errorf("error setting user_and_group_quotas: %w", err) + } + + return nil +} + +func resourceOpenzfsVolumeUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).FSxConn + + if d.HasChange("tags_all") { + o, n := d.GetChange("tags_all") + + if err := UpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating FSx OpenZFS Volume (%s) tags: %w", d.Get("arn").(string), err) + } + } + + if d.HasChangesExcept("tags_all", "tags") { + input := &fsx.UpdateVolumeInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + VolumeId: aws.String(d.Id()), + OpenZFSConfiguration: &fsx.UpdateOpenZFSVolumeConfiguration{}, + } + + if d.HasChange("data_compression_type") { + input.OpenZFSConfiguration.DataCompressionType = aws.String(d.Get("data_compression_type").(string)) + } + + if d.HasChange("name") { + input.Name = aws.String(d.Get("name").(string)) + } + + if d.HasChange("nfs_exports") { + input.OpenZFSConfiguration.NfsExports = expandFsxOpenzfsVolumeNfsExports(d.Get("nfs_exports").([]interface{})) + } + + if d.HasChange("read_only") { + input.OpenZFSConfiguration.ReadOnly = aws.Bool(d.Get("read_only").(bool)) + } + + if d.HasChange("storage_capacity_quota_gib") { + input.OpenZFSConfiguration.StorageCapacityQuotaGiB = aws.Int64(int64(d.Get("storage_capacity_quota_gib").(int))) + } + + if d.HasChange("storage_capacity_reservation_gib") { + input.OpenZFSConfiguration.StorageCapacityReservationGiB = aws.Int64(int64(d.Get("storage_capacity_reservation_gib").(int))) + } + + if d.HasChange("user_and_group_quotas") { + input.OpenZFSConfiguration.UserAndGroupQuotas = expandFsxOpenzfsVolumeUserAndGroupQuotas(d.Get("user_and_group_quotas").([]interface{})) + } + + _, err := conn.UpdateVolume(input) + + if err != nil { + return fmt.Errorf("error updating FSx OpenZFS Volume (%s): %w", d.Id(), err) + } + + if _, err := waitVolumeUpdated(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return fmt.Errorf("error waiting for FSx OpenZFS Volume (%s) update: %w", d.Id(), err) + } + + } + + return resourceOpenzfsVolumeRead(d, meta) +} + +func resourceOpenzfsVolumeDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).FSxConn + + log.Printf("[DEBUG] Deleting FSx OpenZFS Volume: %s", d.Id()) + _, err := conn.DeleteVolume(&fsx.DeleteVolumeInput{ + VolumeId: aws.String(d.Id()), + }) + + if tfawserr.ErrCodeEquals(err, fsx.ErrCodeVolumeNotFound) { + return nil + } + + if err != nil { + return fmt.Errorf("error deleting FSx OpenZFS Volume (%s): %w", d.Id(), err) + } + + if _, err := waitVolumeDeleted(conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return fmt.Errorf("error waiting for FSx OpenZFS Volume (%s) delete: %w", d.Id(), err) + } + + return nil +} + +func expandFsxOpenzfsVolumeUserAndGroupQuotas(cfg []interface{}) []*fsx.OpenZFSUserOrGroupQuota { + quotas := []*fsx.OpenZFSUserOrGroupQuota{} + + for _, quota := range cfg { + expandedQuota := expandFsxOpenzfsVolumeUserAndGroupQuota(quota.(map[string]interface{})) + if expandedQuota != nil { + quotas = append(quotas, expandedQuota) + } + } + + return quotas + +} + +func expandFsxOpenzfsVolumeUserAndGroupQuota(conf map[string]interface{}) *fsx.OpenZFSUserOrGroupQuota { + if len(conf) < 1 { + return nil + } + + out := fsx.OpenZFSUserOrGroupQuota{} + + if v, ok := conf["id"].(int); ok { + out.Id = aws.Int64(int64(v)) + } + + if v, ok := conf["storage_capacity_quota_gib"].(int); ok { + out.StorageCapacityQuotaGiB = aws.Int64(int64(v)) + } + + if v, ok := conf["type"].(string); ok { + out.Type = aws.String(v) + } + + return &out + +} + +func expandFsxOpenzfsVolumeNfsExports(cfg []interface{}) []*fsx.OpenZFSNfsExport { + exports := []*fsx.OpenZFSNfsExport{} + + for _, export := range cfg { + expandedExport := expandFsxOpenzfsVolumeNfsExport(export.(map[string]interface{})) + if expandedExport != nil { + exports = append(exports, expandedExport) + } + } + + return exports + +} + +func expandFsxOpenzfsVolumeNfsExport(cfg map[string]interface{}) *fsx.OpenZFSNfsExport { + out := fsx.OpenZFSNfsExport{} + + if v, ok := cfg["client_configurations"]; ok { + out.ClientConfigurations = expandFsxOpenzfsVolumeClinetConfigurations(v.(*schema.Set).List()) + } + + return &out +} + +func expandFsxOpenzfsVolumeClinetConfigurations(cfg []interface{}) []*fsx.OpenZFSClientConfiguration { + configurations := []*fsx.OpenZFSClientConfiguration{} + + for _, configuration := range cfg { + expandedConfiguration := expandFsxOpenzfsVolumeClientConfiguration(configuration.(map[string]interface{})) + if expandedConfiguration != nil { + configurations = append(configurations, expandedConfiguration) + } + } + + return configurations + +} + +func expandFsxOpenzfsVolumeClientConfiguration(conf map[string]interface{}) *fsx.OpenZFSClientConfiguration { + out := fsx.OpenZFSClientConfiguration{} + + if v, ok := conf["clients"].(string); ok && len(v) > 0 { + out.Clients = aws.String(v) + } + + if v, ok := conf["options"].([]interface{}); ok { + out.Options = flex.ExpandStringList(v) + } + + return &out +} + +func expandFsxOpenzfsCreateVolumeOriginSnapshot(cfg []interface{}) *fsx.CreateOpenZFSOriginSnapshotConfiguration { + if len(cfg) < 1 { + return nil + } + + conf := cfg[0].(map[string]interface{}) + + out := fsx.CreateOpenZFSOriginSnapshotConfiguration{} + + if v, ok := conf["copy_strategy"].(string); ok { + out.CopyStrategy = aws.String(v) + } + + if v, ok := conf["snapshot_arn"].(string); ok { + out.SnapshotARN = aws.String(v) + } + + return &out +} + +func flattenFsxOpenzfsVolumeNfsExports(rs []*fsx.OpenZFSNfsExport) []map[string]interface{} { + exports := make([]map[string]interface{}, 0) + + for _, export := range rs { + cfg := make(map[string]interface{}) + cfg["client_configurations"] = flattenFsxOpenzfsVolumeClientConfigurations(export.ClientConfigurations) + exports = append(exports, cfg) + } + + if len(exports) > 0 { + return exports + } + + return nil +} + +func flattenFsxOpenzfsVolumeClientConfigurations(rs []*fsx.OpenZFSClientConfiguration) []map[string]interface{} { + configurations := make([]map[string]interface{}, 0) + + for _, configuration := range rs { + cfg := make(map[string]interface{}) + cfg["clients"] = aws.StringValue(configuration.Clients) + cfg["options"] = flex.FlattenStringList(configuration.Options) + configurations = append(configurations, cfg) + } + + if len(configurations) > 0 { + return configurations + } + + return nil +} + +func flattenFsxOpenzfsVolumeUserAndGroupQuotas(rs []*fsx.OpenZFSUserOrGroupQuota) []map[string]interface{} { + quotas := make([]map[string]interface{}, 0) + + for _, quota := range rs { + cfg := make(map[string]interface{}) + cfg["id"] = aws.Int64Value(quota.Id) + cfg["storage_capacity_quota_gib"] = aws.Int64Value(quota.StorageCapacityQuotaGiB) + cfg["type"] = aws.StringValue(quota.Type) + quotas = append(quotas, cfg) + } + + if len(quotas) > 0 { + return quotas + } + + return nil +} diff --git a/internal/service/fsx/openzfs_volume_test.go b/internal/service/fsx/openzfs_volume_test.go new file mode 100644 index 00000000000..1a05e7dd6d7 --- /dev/null +++ b/internal/service/fsx/openzfs_volume_test.go @@ -0,0 +1,702 @@ +package fsx_test + +import ( + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tffsx "github.com/hashicorp/terraform-provider-aws/internal/service/fsx" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" +) + +func TestAccFSxOpenzfsVolume_basic(t *testing.T) { + var volume fsx.Volume + resourceName := "aws_fsx_openzfs_volume.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsVolumeBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexp.MustCompile(`volume/fs-.+/fsvol-.+`)), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_snapshots", "false"), + resource.TestCheckResourceAttr(resourceName, "data_compression_type", "NONE"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "nfs_exports.#", "1"), + resource.TestCheckResourceAttr(resourceName, "nfs_exports.0.client_configurations.#", "1"), + resource.TestCheckResourceAttr(resourceName, "nfs_exports.0.client_configurations.0.clients", "*"), + resource.TestCheckResourceAttr(resourceName, "nfs_exports.0.client_configurations.0.options.#", "1"), + resource.TestCheckResourceAttr(resourceName, "nfs_exports.0.client_configurations.0.options.0", "crossmnt"), + resource.TestCheckResourceAttrSet(resourceName, "parent_volume_id"), + resource.TestCheckResourceAttr(resourceName, "read_only", "false"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.#", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccFSxOpenzfsVolume_parentVolume(t *testing.T) { + var volume, volume2 fsx.Volume + var volumeId string + resourceName := "aws_fsx_openzfs_volume.test" + resourceName2 := "aws_fsx_openzfs_volume.test2" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsVolumeParentVolumeConfig(rName, rName2), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume), + testAccCheckFsxOpenzfsVolumeExists(resourceName2, &volume2), + testAccCheckFsxOpenzfsVolumeGetId(resourceName, &volumeId), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexp.MustCompile(`volume/fs-.+/fsvol-.+`)), + acctest.MatchResourceAttrRegionalARN(resourceName2, "arn", "fsx", regexp.MustCompile(`volume/fs-.+/fsvol-.+`)), + resource.TestCheckResourceAttrPtr(resourceName2, "parent_volume_id", &volumeId), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccFSxOpenzfsVolume_tags(t *testing.T) { + var volume1, volume2, volume3 fsx.Volume + resourceName := "aws_fsx_openzfs_volume.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsVolumeTags1Config(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccOpenzfsVolumeTags2Config(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume2), + testAccCheckFsxOpenzfsVolumeNotRecreated(&volume1, &volume2), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccOpenzfsVolumeTags1Config(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume3), + testAccCheckFsxOpenzfsVolumeNotRecreated(&volume2, &volume3), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + +func TestAccFSxOpenzfsVolume_copyTags(t *testing.T) { + var volume1, volume2 fsx.Volume + resourceName := "aws_fsx_openzfs_volume.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsVolumeCopyTagsConfig(rName, "key1", "value1", "true"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_snapshots", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccOpenzfsVolumeCopyTagsConfig(rName, "key1", "value1", "false"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume2), + testAccCheckFsxOpenzfsVolumeRecreated(&volume1, &volume2), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + resource.TestCheckResourceAttr(resourceName, "copy_tags_to_snapshots", "false"), + ), + }, + }, + }) +} + +func TestAccFSxOpenzfsVolume_name(t *testing.T) { + var volume1, volume2 fsx.Volume + resourceName := "aws_fsx_openzfs_volume.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsVolumeBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "name", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccOpenzfsVolumeBasicConfig(rName2), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume2), + testAccCheckFsxOpenzfsVolumeNotRecreated(&volume1, &volume2), + resource.TestCheckResourceAttr(resourceName, "name", rName2), + ), + }, + }, + }) +} + +func TestAccFSxOpenzfsVolume_dataCompressionType(t *testing.T) { + var volume1, volume2 fsx.Volume + resourceName := "aws_fsx_openzfs_volume.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsVolumeDataCompressionConfig(rName, "ZSTD"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "data_compression_type", "ZSTD"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccOpenzfsVolumeDataCompressionConfig(rName, "NONE"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume2), + testAccCheckFsxOpenzfsVolumeNotRecreated(&volume1, &volume2), + resource.TestCheckResourceAttr(resourceName, "data_compression_type", "NONE"), + ), + }, + }, + }) +} + +func TestAccFSxOpenzfsVolume_readOnly(t *testing.T) { + var volume1, volume2 fsx.Volume + resourceName := "aws_fsx_openzfs_volume.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsVolumeReadOnlyConfig(rName, "false"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "read_only", "false"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccOpenzfsVolumeReadOnlyConfig(rName, "true"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume2), + testAccCheckFsxOpenzfsVolumeNotRecreated(&volume1, &volume2), + resource.TestCheckResourceAttr(resourceName, "read_only", "true"), + ), + }, + }, + }) +} + +func TestAccFSxOpenzfsVolume_storageCapacity(t *testing.T) { + var volume1, volume2 fsx.Volume + resourceName := "aws_fsx_openzfs_volume.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsVolumeStorageCapacityConfig(rName, 30, 20), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "storage_capacity_quota_gib", "30"), + resource.TestCheckResourceAttr(resourceName, "storage_capacity_reservation_gib", "20"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccOpenzfsVolumeStorageCapacityConfig(rName, 40, 30), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume2), + testAccCheckFsxOpenzfsVolumeNotRecreated(&volume1, &volume2), + resource.TestCheckResourceAttr(resourceName, "storage_capacity_quota_gib", "40"), + resource.TestCheckResourceAttr(resourceName, "storage_capacity_reservation_gib", "30"), + ), + }, + }, + }) +} + +func TestAccFSxOpenzfsVolume_nfsExports(t *testing.T) { + var volume1, volume2 fsx.Volume + resourceName := "aws_fsx_openzfs_volume.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsVolumeNFSExports1Config(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "nfs_exports.#", "1"), + resource.TestCheckResourceAttr(resourceName, "nfs_exports.0.client_configurations.#", "1"), + resource.TestCheckResourceAttr(resourceName, "nfs_exports.0.client_configurations.0.clients", "10.0.1.0/24"), + resource.TestCheckResourceAttr(resourceName, "nfs_exports.0.client_configurations.0.options.#", "2"), + resource.TestCheckResourceAttr(resourceName, "nfs_exports.0.client_configurations.0.options.0", "async"), + resource.TestCheckResourceAttr(resourceName, "nfs_exports.0.client_configurations.0.options.1", "rw"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccOpenzfsVolumeNFSExports2Config(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume2), + testAccCheckFsxOpenzfsVolumeNotRecreated(&volume1, &volume2), + resource.TestCheckResourceAttr(resourceName, "nfs_exports.#", "1"), + resource.TestCheckResourceAttr(resourceName, "nfs_exports.0.client_configurations.#", "2"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "nfs_exports.0.client_configurations.*", map[string]string{ + "clients": "10.0.1.0/24", + "options.0": "async", + "options.1": "rw", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "nfs_exports.0.client_configurations.*", map[string]string{ + "clients": "*", + "options.0": "sync", + "options.1": "rw", + }), + ), + }, + }, + }) +} + +func TestAccFSxOpenzfsVolume_userAndGroupQuotas(t *testing.T) { + var volume1, volume2 fsx.Volume + resourceName := "aws_fsx_openzfs_volume.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsVolumeDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsVolumeUserAndGroupQuotas1Config(rName, 256), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume1), + resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.#", "1"), + resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.0.id", "10"), + resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.0.storage_capacity_quota_gib", "256"), + resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.0.type", "USER"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccOpenzfsVolumeUserAndGroupQuotas2Config(rName, 128, 1024), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume2), + testAccCheckFsxOpenzfsVolumeNotRecreated(&volume1, &volume2), + resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.#", "4"), + resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.0.id", "10"), + resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.0.storage_capacity_quota_gib", "128"), + resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.0.type", "USER"), + resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.1.id", "20"), + resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.1.storage_capacity_quota_gib", "1024"), + resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.1.type", "GROUP"), + resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.2.id", "5"), + resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.2.storage_capacity_quota_gib", "1024"), + resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.2.type", "GROUP"), + resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.3.id", "100"), + resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.3.storage_capacity_quota_gib", "128"), + resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.3.type", "USER"), + ), + }, + }, + }) +} + +func testAccCheckFsxOpenzfsVolumeExists(resourceName string, volume *fsx.Volume) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn + + volume1, err := tffsx.FindVolumeByID(conn, rs.Primary.ID) + if err != nil { + return err + } + + if volume == nil { + return fmt.Errorf("FSx OpenZFS Volume (%s) not found", rs.Primary.ID) + } + + *volume = *volume1 + + return nil + } +} + +func testAccCheckFsxOpenzfsVolumeDestroy(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_fsx_openzfs_volume" { + continue + } + + volume, err := tffsx.FindVolumeByID(conn, rs.Primary.ID) + if tfresource.NotFound(err) { + continue + } + + if volume != nil { + return fmt.Errorf("FSx OpenZFS Volume (%s) still exists", rs.Primary.ID) + } + } + return nil +} + +func testAccCheckFsxOpenzfsVolumeGetId(resourceName string, volumeId *string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + *volumeId = rs.Primary.ID + + return nil + } +} + +func testAccCheckFsxOpenzfsVolumeNotRecreated(i, j *fsx.Volume) resource.TestCheckFunc { + return func(s *terraform.State) error { + if aws.StringValue(i.VolumeId) != aws.StringValue(j.VolumeId) { + return fmt.Errorf("FSx OpenZFS Volume (%s) recreated", aws.StringValue(i.VolumeId)) + } + + return nil + } +} + +func testAccCheckFsxOpenzfsVolumeRecreated(i, j *fsx.Volume) resource.TestCheckFunc { + return func(s *terraform.State) error { + if aws.StringValue(i.VolumeId) == aws.StringValue(j.VolumeId) { + return fmt.Errorf("FSx OpenZFS Volume (%s) not recreated", aws.StringValue(i.VolumeId)) + } + + return nil + } +} + +func testAccOpenzfsVolumeBaseConfig(rName string) string { + return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` +data "aws_partition" "current" {} + +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_subnet" "test1" { + vpc_id = aws_vpc.test.id + cidr_block = "10.0.1.0/24" + availability_zone = data.aws_availability_zones.available.names[0] + + tags = { + Name = %[1]q + } +} + +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 64 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 64 + } + +`, rName)) +} + +func testAccOpenzfsVolumeBasicConfig(rName string) string { + return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_volume" "test" { + name = %[1]q + parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id +} +`, rName)) +} + +func testAccOpenzfsVolumeParentVolumeConfig(rName, rName2 string) string { + return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_volume" "test" { + name = %[1]q + parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id +} + +resource "aws_fsx_openzfs_volume" "test2" { + name = %[2]q + parent_volume_id = aws_fsx_openzfs_volume.test.id + } +`, rName, rName2)) +} + +func testAccOpenzfsVolumeTags1Config(rName, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_volume" "test" { + name = %[1]q + parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1)) +} + +func testAccOpenzfsVolumeTags2Config(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_volume" "test" { + name = %[1]q + parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id + + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) +} + +func testAccOpenzfsVolumeCopyTagsConfig(rName, tagKey1, tagValue1, copyTags string) string { + return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_volume" "test" { + name = %[1]q + parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id + copy_tags_to_snapshots = %[4]s + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1, copyTags)) +} + +func testAccOpenzfsVolumeDataCompressionConfig(rName, dType string) string { + return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_volume" "test" { + name = %[1]q + parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id + data_compression_type = %[2]q +} +`, rName, dType)) +} + +func testAccOpenzfsVolumeReadOnlyConfig(rName, readOnly string) string { + return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_volume" "test" { + name = %[1]q + parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id + read_only = %[2]s +} +`, rName, readOnly)) +} + +func testAccOpenzfsVolumeStorageCapacityConfig(rName string, storageQuota, storageReservation int) string { + return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_volume" "test" { + name = %[1]q + parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id + storage_capacity_quota_gib = %[2]d + storage_capacity_reservation_gib = %[3]d +} +`, rName, storageQuota, storageReservation)) +} + +func testAccOpenzfsVolumeNFSExports1Config(rName string) string { + return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_volume" "test" { + name = %[1]q + parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id + nfs_exports { + client_configurations { + clients = "10.0.1.0/24" + options = ["async", "rw"] + } + } + +} +`, rName)) +} + +func testAccOpenzfsVolumeNFSExports2Config(rName string) string { + return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_volume" "test" { + name = %[1]q + parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id + nfs_exports { + client_configurations { + clients = "10.0.1.0/24" + options = ["async", "rw"] + } + client_configurations { + clients = "*" + options = ["sync", "rw"] + } + } +} +`, rName)) +} + +func testAccOpenzfsVolumeUserAndGroupQuotas1Config(rName string, quotaSize int) string { + return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_volume" "test" { + name = %[1]q + parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id + user_and_group_quotas { + id = 10 + storage_capacity_quota_gib = %[2]d + type = "USER" + } +} +`, rName, quotaSize)) +} + +func testAccOpenzfsVolumeUserAndGroupQuotas2Config(rName string, userQuota, groupQuota int) string { + return acctest.ConfigCompose(testAccOpenzfsVolumeBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_volume" "test" { + name = %[1]q + parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id + user_and_group_quotas { + id = 10 + storage_capacity_quota_gib = %[2]d + type = "USER" + } + user_and_group_quotas { + id = 20 + storage_capacity_quota_gib = %[3]d + type = "GROUP" + } + user_and_group_quotas { + id = 5 + storage_capacity_quota_gib = %[3]d + type = "GROUP" + } + user_and_group_quotas { + id = 100 + storage_capacity_quota_gib = %[2]d + type = "USER" + } +} +`, rName, userQuota, groupQuota)) +} diff --git a/internal/service/fsx/sweep.go b/internal/service/fsx/sweep.go index bed57fc16eb..284c070996d 100644 --- a/internal/service/fsx/sweep.go +++ b/internal/service/fsx/sweep.go @@ -48,6 +48,11 @@ func init() { F: sweepFSXOpenzfsFileSystems, }) + resource.AddTestSweepers("aws_fsx_ontap_volume", &resource.Sweeper{ + Name: "aws_fsx_openzfs_volume", + F: sweepFSXOpenzfsVolume, + }) + resource.AddTestSweepers("aws_fsx_windows_file_system", &resource.Sweeper{ Name: "aws_fsx_windows_file_system", F: sweepFSXWindowsFileSystems, @@ -337,6 +342,57 @@ func sweepFSXOpenzfsFileSystems(region string) error { return errs.ErrorOrNil() } +func sweepFSXOpenzfsVolume(region string) error { + client, err := sweep.SharedRegionalSweepClient(region) + + if err != nil { + return fmt.Errorf("error getting client: %w", err) + } + + conn := client.(*conns.AWSClient).FSxConn + sweepResources := make([]*sweep.SweepResource, 0) + var errs *multierror.Error + input := &fsx.DescribeVolumesInput{} + + err = conn.DescribeVolumesPages(input, func(page *fsx.DescribeVolumesOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.Volumes { + if aws.StringValue(v.VolumeType) != fsx.VolumeTypeOpenzfs { + continue + } + if v.OpenzfsConfiguration != nil && aws.BoolValue(v.OpenzfsConfiguration.RootVolumeId) { + continue + } + + r := ResourceOpenzfsVolume() + d := r.Data(nil) + d.SetId(aws.StringValue(v.VolumeId)) + + sweepResources = append(sweepResources, sweep.NewSweepResource(r, d, client)) + } + + return !lastPage + }) + + if err != nil { + errs = multierror.Append(errs, fmt.Errorf("error listing FSx OpenZFS Volume for %s: %w", region, err)) + } + + if err = sweep.SweepOrchestrator(sweepResources); err != nil { + errs = multierror.Append(errs, fmt.Errorf("error sweeping FSx OpenZFS Volume for %s: %w", region, err)) + } + + if sweep.SkipSweepError(errs.ErrorOrNil()) { + log.Printf("[WARN] Skipping FSx OpenZFS Volume sweep for %s: %s", region, errs) + return nil + } + + return errs.ErrorOrNil() +} + func sweepFSXWindowsFileSystems(region string) error { client, err := sweep.SharedRegionalSweepClient(region) diff --git a/internal/service/fsx/wait.go b/internal/service/fsx/wait.go index 198ef6d7429..16dc6e61772 100644 --- a/internal/service/fsx/wait.go +++ b/internal/service/fsx/wait.go @@ -206,7 +206,7 @@ func waitStorageVirtualMachineDeleted(conn *fsx.FSx, id string, timeout time.Dur func waitVolumeCreated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { stateConf := &resource.StateChangeConf{ Pending: []string{fsx.VolumeLifecycleCreating, fsx.VolumeLifecyclePending}, - Target: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured}, + Target: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable}, Refresh: statusVolume(conn, id), Timeout: timeout, Delay: 30 * time.Second, @@ -249,7 +249,7 @@ func waitVolumeUpdated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Vo func waitVolumeDeleted(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { stateConf := &resource.StateChangeConf{ - Pending: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleDeleting}, + Pending: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable, fsx.VolumeLifecycleDeleting}, Target: []string{}, Refresh: statusVolume(conn, id), Timeout: timeout, diff --git a/website/docs/r/fsx_openzfs_volume.html.markdown b/website/docs/r/fsx_openzfs_volume.html.markdown new file mode 100644 index 00000000000..576136430c6 --- /dev/null +++ b/website/docs/r/fsx_openzfs_volume.html.markdown @@ -0,0 +1,77 @@ +--- +subcategory: "File System (FSx)" +layout: "aws" +page_title: "AWS: aws_fsx_openzfs_volume" +description: |- + Manages an Amazon FSx for OpenZFS volume. +--- + +# Resource: aws_fsx_openzfs_volume + +Manages an Amazon FSx for OpenZFS volume. +See the [FSx OpenZFS User Guide](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/what-is-fsx.html) for more information. + +## Example Usage + +```terraform +resource "aws_fsx_openzfs_volume" "test" { + name = "testvolume" + parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Volume. You can use a maximum of 203 alphanumeric characters, plus the underscore (_) special character. +* `parent_volume_id` - (Required) The volume id of volume that will be the parent volume for the volume being created, this could be the root volume created from the `aws_fsx_openzfs_file_system` resource with the `root_volume_id` or the `id` property of another `aws_fsx_openzfs_volume`. +* `origin_snapshot` - (Optional) The ARN of the source snapshot to create the volume from. +* `copy_tags_to_snapshots` - (Optional) A boolean flag indicating whether tags for the file system should be copied to snapshots. The default value is false. +* `data_compression_type` - (Optional) Method used to compress the data on the volume. Valid values are `NONE` or `ZSTD`. Child volumes that don't specify compression option will inherit from parent volume. This option on file system applies to the root volume. +* `nfs_exports` - (Optional) NFS export configuration for the root volume. Exactly 1 item. See [NFS Exports](#nfs-exports) Below. +* `read_only` - (Optional) specifies whether the volume is read-only. Default is false. +* `storage_capacity_quota_gib` - (Optional) The maximum amount of storage in gibibytes (GiB) that the volume can use from its parent. +* `storage_capacity_reservation_gib` - (Optional) The amount of storage in gibibytes (GiB) to reserve from the parent volume. +* `user_and_group_quotas` - (Optional) - Specify how much storage users or groups can use on the volume. Maximum of 100 items. See [User and Group Quotas](#user-and-group-quotas) Below. +* `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### NFS Exports + +* `client_configurations` - (Required) - A list of configuration objects that contain the client and options for mounting the OpenZFS file system. Maximum of 25 items. See [Client Configurations](#client configurations) Below. + +### Client Configurations + +* `clients` - (Required) - A value that specifies who can mount the file system. You can provide a wildcard character (*), an IP address (0.0.0.0), or a CIDR address (192.0.2.0/24. By default, Amazon FSx uses the wildcard character when specifying the client. +* `options` - (Required) - The options to use when mounting the file system. Maximum of 20 items. See the [Linix NFS exports man page](https://linux.die.net/man/5/exports) for more information. `crossmount` and `sync` are used by default. + +### User and Group Quotas + +* `id` - (Required) - The ID of the user or group. Valid values between `0` and `2147483647` +* `storage_capacity_quota_gib` - (Required) - The amount of storage that the user or group can use in gibibytes (GiB). Valid values between `0` and `2147483647` +* `Type` - (Required) - A value that specifies whether the quota applies to a user or group. Valid values are `USER` or `GROUP`. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `arn` - Amazon Resource Name of the file system. +* `id` - Identifier of the file system, e.g., `fsvol-12345678` +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). + +## Timeouts + +`aws_fsx_openzfs_file_system` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) +configuration options: + +* `create` - (Default `30m`) How long to wait for the file system to be created. +* `update` - (Default `30m`) How long to wait for the file system to be updated. +* `delete` - (Default `30m`) How long to wait for the file system to be deleted. + +## Import + +FSx Volumes can be imported using the `id`, e.g., + +``` +$ terraform import aws_fsx_openzfs_volume.example fsvol-543ab12b1ca672f33 +``` \ No newline at end of file From a9fdc45acdbfd5ed8569d06d06154489e436346e Mon Sep 17 00:00:00 2001 From: awsaxeman <34073510+awsaxeman@users.noreply.github.com> Date: Wed, 29 Dec 2021 01:07:40 -0500 Subject: [PATCH 06/12] fix errors --- internal/service/fsx/openzfs_volume.go | 25 ++++++++++++++-- internal/service/fsx/openzfs_volume_test.go | 33 ++++++++++----------- internal/service/fsx/sweep.go | 2 +- internal/service/fsx/wait.go | 4 +-- 4 files changed, 41 insertions(+), 23 deletions(-) diff --git a/internal/service/fsx/openzfs_volume.go b/internal/service/fsx/openzfs_volume.go index 5700e6b481c..7639d6c754b 100644 --- a/internal/service/fsx/openzfs_volume.go +++ b/internal/service/fsx/openzfs_volume.go @@ -284,7 +284,6 @@ func resourceOpenzfsVolumeRead(d *schema.ResourceData, meta interface{}) error { d.Set("copy_tags_to_snapshots", openzfsConfig.CopyTagsToSnapshots) d.Set("data_compression_type", openzfsConfig.DataCompressionType) d.Set("name", volume.Name) - d.Set("origin_snapshot", openzfsConfig.OriginSnapshot) d.Set("parent_volume_id", openzfsConfig.ParentVolumeId) d.Set("read_only", openzfsConfig.ReadOnly) d.Set("storage_capacity_quota_gib", openzfsConfig.StorageCapacityQuotaGiB) @@ -309,11 +308,15 @@ func resourceOpenzfsVolumeRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("error setting tags_all: %w", err) } - if err := d.Set("nfs_exports", flattenFsxOpenzfsFileNfsExports(openzfsConfig.NfsExports)); err != nil { + if err := d.Set("origin_snapshot", flattenFsxOpenzfsVolumeOriginSnapshot(openzfsConfig.OriginSnapshot)); err != nil { return fmt.Errorf("error setting nfs_exports: %w", err) } - if err := d.Set("user_and_group_quotas", flattenFsxOpenzfsFileUserAndGroupQuotas(openzfsConfig.UserAndGroupQuotas)); err != nil { + if err := d.Set("nfs_exports", flattenFsxOpenzfsVolumeNfsExports(openzfsConfig.NfsExports)); err != nil { + return fmt.Errorf("error setting nfs_exports: %w", err) + } + + if err := d.Set("user_and_group_quotas", flattenFsxOpenzfsVolumeUserAndGroupQuotas(openzfsConfig.UserAndGroupQuotas)); err != nil { return fmt.Errorf("error setting user_and_group_quotas: %w", err) } @@ -563,3 +566,19 @@ func flattenFsxOpenzfsVolumeUserAndGroupQuotas(rs []*fsx.OpenZFSUserOrGroupQuota return nil } + +func flattenFsxOpenzfsVolumeOriginSnapshot(rs *fsx.OpenZFSOriginSnapshotConfiguration) []interface{} { + if rs == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + if rs.CopyStrategy != nil { + m["copy_strategy"] = aws.StringValue(rs.CopyStrategy) + } + if rs.SnapshotARN != nil { + m["snapshot_arn"] = aws.StringValue(rs.SnapshotARN) + } + + return []interface{}{m} +} diff --git a/internal/service/fsx/openzfs_volume_test.go b/internal/service/fsx/openzfs_volume_test.go index 1a05e7dd6d7..ad106bb3e47 100644 --- a/internal/service/fsx/openzfs_volume_test.go +++ b/internal/service/fsx/openzfs_volume_test.go @@ -522,8 +522,7 @@ resource "aws_fsx_openzfs_file_system" "test" { subnet_ids = [aws_subnet.test1.id] deployment_type = "SINGLE_AZ_1" throughput_capacity = 64 - } - +} `, rName)) } @@ -546,7 +545,7 @@ resource "aws_fsx_openzfs_volume" "test" { resource "aws_fsx_openzfs_volume" "test2" { name = %[2]q parent_volume_id = aws_fsx_openzfs_volume.test.id - } +} `, rName, rName2)) } @@ -568,7 +567,7 @@ func testAccOpenzfsVolumeTags2Config(rName, tagKey1, tagValue1, tagKey2, tagValu resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id - + tags = { %[2]q = %[3]q @@ -634,7 +633,7 @@ resource "aws_fsx_openzfs_volume" "test" { options = ["async", "rw"] } } - + } `, rName)) } @@ -644,16 +643,16 @@ func testAccOpenzfsVolumeNFSExports2Config(rName string) string { resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id - nfs_exports { - client_configurations { - clients = "10.0.1.0/24" - options = ["async", "rw"] - } - client_configurations { - clients = "*" - options = ["sync", "rw"] - } + nfs_exports { + client_configurations { + clients = "10.0.1.0/24" + options = ["async", "rw"] + } + client_configurations { + clients = "*" + options = ["sync", "rw"] } + } } `, rName)) } @@ -664,9 +663,9 @@ resource "aws_fsx_openzfs_volume" "test" { name = %[1]q parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id user_and_group_quotas { - id = 10 - storage_capacity_quota_gib = %[2]d - type = "USER" + id = 10 + storage_capacity_quota_gib = %[2]d + type = "USER" } } `, rName, quotaSize)) diff --git a/internal/service/fsx/sweep.go b/internal/service/fsx/sweep.go index 284c070996d..772fc7f74fe 100644 --- a/internal/service/fsx/sweep.go +++ b/internal/service/fsx/sweep.go @@ -363,7 +363,7 @@ func sweepFSXOpenzfsVolume(region string) error { if aws.StringValue(v.VolumeType) != fsx.VolumeTypeOpenzfs { continue } - if v.OpenzfsConfiguration != nil && aws.BoolValue(v.OpenzfsConfiguration.RootVolumeId) { + if v.OpenZFSConfiguration != nil && aws.StringValue(v.OpenZFSConfiguration.ParentVolumeId) == nil { continue } diff --git a/internal/service/fsx/wait.go b/internal/service/fsx/wait.go index 16dc6e61772..642ddc2acca 100644 --- a/internal/service/fsx/wait.go +++ b/internal/service/fsx/wait.go @@ -203,7 +203,7 @@ func waitStorageVirtualMachineDeleted(conn *fsx.FSx, id string, timeout time.Dur return nil, err } -func waitVolumeCreated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { +func waitVolumeCreated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam stateConf := &resource.StateChangeConf{ Pending: []string{fsx.VolumeLifecycleCreating, fsx.VolumeLifecyclePending}, Target: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable}, @@ -247,7 +247,7 @@ func waitVolumeUpdated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Vo return nil, err } -func waitVolumeDeleted(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { +func waitVolumeDeleted(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Volume, error) { //nolint:unparam stateConf := &resource.StateChangeConf{ Pending: []string{fsx.VolumeLifecycleCreated, fsx.VolumeLifecycleMisconfigured, fsx.VolumeLifecycleAvailable, fsx.VolumeLifecycleDeleting}, Target: []string{}, From 19a0d598877aab2f926af42eca37a13b3c1d5a81 Mon Sep 17 00:00:00 2001 From: awsaxeman <34073510+awsaxeman@users.noreply.github.com> Date: Wed, 29 Dec 2021 01:50:17 -0500 Subject: [PATCH 07/12] add backup for openzfs --- internal/service/fsx/backup_test.go | 57 +++++++++++++++++++++++++++++ internal/service/fsx/sweep.go | 2 +- 2 files changed, 58 insertions(+), 1 deletion(-) diff --git a/internal/service/fsx/backup_test.go b/internal/service/fsx/backup_test.go index c66d549c3aa..caf405c7b89 100644 --- a/internal/service/fsx/backup_test.go +++ b/internal/service/fsx/backup_test.go @@ -76,6 +76,35 @@ func TestAccFSxBackup_ontapBasic(t *testing.T) { }) } +func TestAccFSxBackup_openzfsBasic(t *testing.T) { + var backup fsx.Backup + resourceName := "aws_fsx_backup.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxBackupDestroy, + Steps: []resource.TestStep{ + { + Config: testAccBackupOpenzfsBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxBackupExists(resourceName, &backup), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexp.MustCompile(`backup/.+`)), + acctest.CheckResourceAttrAccountID(resourceName, "owner_id"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccFSxBackup_windowsBasic(t *testing.T) { var backup fsx.Backup resourceName := "aws_fsx_backup.test" @@ -333,6 +362,22 @@ resource "aws_fsx_ontap_volume" "test" { `, rName, vName)) } +func testAccBackupOpenzfsBaseConfig(rName string) string { + return acctest.ConfigCompose(testAccBackupBaseConfig(), fmt.Sprintf(` +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 64 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 64 + + + tags = { + Name = %[1]q + } +} +`, rName)) +} + func testAccBackupWindowsBaseConfig(rName string) string { return acctest.ConfigCompose(testAccBackupBaseConfig(), fmt.Sprintf(` resource "aws_directory_service_directory" "test" { @@ -386,6 +431,18 @@ resource "aws_fsx_backup" "test" { `, rName)) } +func testAccBackupOpenzfsBasicConfig(rName string) string { + return acctest.ConfigCompose(testAccBackupOpenzfsBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_backup" "test" { + file_system_id = aws_fsx_openzfs_file_system.test.id + + tags = { + Name = %[1]q + } +} +`, rName)) +} + func testAccBackupWindowsBasicConfig(rName string) string { return acctest.ConfigCompose(testAccBackupWindowsBaseConfig(rName), fmt.Sprintf(` resource "aws_fsx_backup" "test" { diff --git a/internal/service/fsx/sweep.go b/internal/service/fsx/sweep.go index 772fc7f74fe..d06bb065af9 100644 --- a/internal/service/fsx/sweep.go +++ b/internal/service/fsx/sweep.go @@ -363,7 +363,7 @@ func sweepFSXOpenzfsVolume(region string) error { if aws.StringValue(v.VolumeType) != fsx.VolumeTypeOpenzfs { continue } - if v.OpenZFSConfiguration != nil && aws.StringValue(v.OpenZFSConfiguration.ParentVolumeId) == nil { + if v.OpenZFSConfiguration != nil && aws.StringValue(v.OpenZFSConfiguration.ParentVolumeId) == "" { continue } From 28dedea205c51f57e81b3fbf6e6cf426b0b69948 Mon Sep 17 00:00:00 2001 From: awsaxeman <34073510+awsaxeman@users.noreply.github.com> Date: Wed, 29 Dec 2021 08:31:23 -0500 Subject: [PATCH 08/12] test lint --- internal/service/fsx/backup_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/fsx/backup_test.go b/internal/service/fsx/backup_test.go index caf405c7b89..74d5e01397c 100644 --- a/internal/service/fsx/backup_test.go +++ b/internal/service/fsx/backup_test.go @@ -369,7 +369,7 @@ resource "aws_fsx_openzfs_file_system" "test" { subnet_ids = [aws_subnet.test1.id] deployment_type = "SINGLE_AZ_1" throughput_capacity = 64 - + tags = { Name = %[1]q From a8d658f2ee8fee654966ec9bfd6c5e224fe1424f Mon Sep 17 00:00:00 2001 From: awsaxeman <34073510+awsaxeman@users.noreply.github.com> Date: Wed, 29 Dec 2021 23:24:36 -0500 Subject: [PATCH 09/12] add openzfs snapshot --- .changelog/22234.txt | 4 + internal/provider/provider.go | 1 + internal/service/fsx/find.go | 25 ++ internal/service/fsx/openzfs_snapshot.go | 201 +++++++++ internal/service/fsx/openzfs_snapshot_test.go | 388 ++++++++++++++++++ internal/service/fsx/status.go | 16 + internal/service/fsx/wait.go | 54 +++ website/docs/r/fsx_backup.html.markdown | 15 + .../docs/r/fsx_openzfs_snapshot.html.markdown | 83 ++++ 9 files changed, 787 insertions(+) create mode 100644 internal/service/fsx/openzfs_snapshot.go create mode 100644 internal/service/fsx/openzfs_snapshot_test.go create mode 100644 website/docs/r/fsx_openzfs_snapshot.html.markdown diff --git a/.changelog/22234.txt b/.changelog/22234.txt index 721da0fa251..444be827cd8 100644 --- a/.changelog/22234.txt +++ b/.changelog/22234.txt @@ -4,4 +4,8 @@ aws_fsx_openzfs_file_system ```release-note:new-resource aws_fsx_openzfs_volume +``` + +```release-note:new-resource +aws_fsx_openzfs_snapshot ``` \ No newline at end of file diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 9961f0449bd..8588ccd577b 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -1220,6 +1220,7 @@ func Provider() *schema.Provider { "aws_fsx_ontap_volume": fsx.ResourceOntapVolume(), "aws_fsx_openzfs_file_system": fsx.ResourceOpenzfsFileSystem(), "aws_fsx_openzfs_volume": fsx.ResourceOpenzfsVolume(), + "aws_fsx_openzfs_snapshot": fsx.ResourceOpenzfsSnapshot(), "aws_fsx_windows_file_system": fsx.ResourceWindowsFileSystem(), "aws_gamelift_alias": gamelift.ResourceAlias(), diff --git a/internal/service/fsx/find.go b/internal/service/fsx/find.go index ccc4db48ae4..3cfd9361950 100644 --- a/internal/service/fsx/find.go +++ b/internal/service/fsx/find.go @@ -170,3 +170,28 @@ func FindVolumeByID(conn *fsx.FSx, id string) (*fsx.Volume, error) { return volumes[0], nil } + +func FindSnapshotByID(conn *fsx.FSx, id string) (*fsx.Snapshot, error) { + input := &fsx.DescribeSnapshotsInput{ + SnapshotIds: aws.StringSlice([]string{id}), + } + + output, err := conn.DescribeSnapshots(input) + + if tfawserr.ErrCodeEquals(err, fsx.ErrCodeVolumeNotFound) || tfawserr.ErrCodeEquals(err, fsx.ErrCodeSnapshotNotFound) { + return nil, &resource.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || len(output.Snapshots) == 0 || output.Snapshots[0] == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.Snapshots[0], nil +} diff --git a/internal/service/fsx/openzfs_snapshot.go b/internal/service/fsx/openzfs_snapshot.go new file mode 100644 index 00000000000..246393c88c2 --- /dev/null +++ b/internal/service/fsx/openzfs_snapshot.go @@ -0,0 +1,201 @@ +package fsx + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + "github.com/hashicorp/aws-sdk-go-base/tfawserr" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" +) + +func ResourceOpenzfsSnapshot() *schema.Resource { + return &schema.Resource{ + Create: resourceOpenzfsSnapshotCreate, + Read: resourceOpenzfsSnapshotRead, + Update: resourceOpenzfsSnapshotUpdate, + Delete: resourceOpenzfsSnapshotDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Read: schema.DefaultTimeout(30 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "creation_time": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringLenBetween(1, 203), + }, + "tags": tftags.TagsSchemaComputed(), + "tags_all": tftags.TagsSchemaComputed(), + "volume_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(23, 23), + }, + }, + + CustomizeDiff: customdiff.Sequence( + verify.SetTagsDiff, + ), + } +} + +func resourceOpenzfsSnapshotCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).FSxConn + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + tags := defaultTagsConfig.MergeTags(tftags.New(d.Get("tags").(map[string]interface{}))) + + input := &fsx.CreateSnapshotInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + Name: aws.String(d.Get("name").(string)), + VolumeId: aws.String(d.Get("volume_id").(string)), + } + + if len(tags) > 0 { + input.Tags = Tags(tags.IgnoreAWS()) + } + + result, err := conn.CreateSnapshot(input) + if err != nil { + return fmt.Errorf("error creating FSx OpenZFS Snapshot: %w", err) + } + + d.SetId(aws.StringValue(result.Snapshot.SnapshotId)) + + log.Println("[DEBUG] Waiting for FSx OpenZFS Snapshot to become available") + if _, err := waitSnapshotCreated(conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return fmt.Errorf("error waiting for FSx OpenZFS Snapshot (%s) to be available: %w", d.Id(), err) + } + + return resourceOpenzfsSnapshotRead(d, meta) +} + +func resourceOpenzfsSnapshotRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).FSxConn + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + + snapshot, err := FindSnapshotByID(conn, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] FSx Snapshot (%s) not found, removing from state", d.Id()) + d.SetId("") + return nil + } + + if err != nil { + return fmt.Errorf("error reading FSx Snapshot (%s): %w", d.Id(), err) + } + + d.Set("arn", snapshot.ResourceARN) + d.Set("volume_id", snapshot.VolumeId) + d.Set("name", snapshot.Name) + + if err := d.Set("creation_time", snapshot.CreationTime.Format(time.RFC3339)); err != nil { + return fmt.Errorf("error setting creation_time: %w", err) + } + + //Snapshot tags do not get returned with describe call so need to make a separate list tags call + tags, tagserr := ListTags(conn, *snapshot.ResourceARN) + + if tagserr != nil { + return fmt.Errorf("error reading Tags for FSx OpenZFS Snapshot (%s): %w", d.Id(), err) + } else { + tags = tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig) + } + //lintignore:AWSR002 + if err := d.Set("tags", tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { + return fmt.Errorf("error setting tags: %w", err) + } + + if err := d.Set("tags_all", tags.Map()); err != nil { + return fmt.Errorf("error setting tags_all: %w", err) + } + + return nil +} + +func resourceOpenzfsSnapshotUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).FSxConn + + if d.HasChange("tags_all") { + o, n := d.GetChange("tags_all") + + if err := UpdateTags(conn, d.Get("arn").(string), o, n); err != nil { + return fmt.Errorf("error updating FSx Snapshot (%s) tags: %w", d.Get("arn").(string), err) + } + } + + if d.HasChangesExcept("tags_all", "tags") { + input := &fsx.UpdateSnapshotInput{ + ClientRequestToken: aws.String(resource.UniqueId()), + SnapshotId: aws.String(d.Id()), + } + + if d.HasChange("name") { + input.Name = aws.String(d.Get("name").(string)) + } + + _, err := conn.UpdateSnapshot(input) + + if err != nil { + return fmt.Errorf("error updating FSx OpenZFS Snapshot (%s): %w", d.Id(), err) + } + + if _, err := waitSnapshotUpdated(conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return fmt.Errorf("error waiting for FSx OpenZFS Snapshot (%s) update: %w", d.Id(), err) + } + + } + + return resourceOpenzfsSnapshotRead(d, meta) +} + +func resourceOpenzfsSnapshotDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*conns.AWSClient).FSxConn + + request := &fsx.DeleteSnapshotInput{ + SnapshotId: aws.String(d.Id()), + } + + log.Printf("[INFO] Deleting FSx Snapshot: %s", d.Id()) + _, err := conn.DeleteSnapshot(request) + + if err != nil { + if tfawserr.ErrCodeEquals(err, fsx.ErrCodeSnapshotNotFound) { + return nil + } + return fmt.Errorf("error deleting FSx Snapshot (%s): %w", d.Id(), err) + } + + log.Println("[DEBUG] Waiting for snapshot to delete") + if _, err := waitSnapshotDeleted(conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return fmt.Errorf("error waiting for FSx Snapshot (%s) to deleted: %w", d.Id(), err) + } + + return nil +} diff --git a/internal/service/fsx/openzfs_snapshot_test.go b/internal/service/fsx/openzfs_snapshot_test.go new file mode 100644 index 00000000000..b639a3b273c --- /dev/null +++ b/internal/service/fsx/openzfs_snapshot_test.go @@ -0,0 +1,388 @@ +package fsx_test + +import ( + "fmt" + "regexp" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/fsx" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + tffsx "github.com/hashicorp/terraform-provider-aws/internal/service/fsx" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" +) + +func TestAccFSxOpenzfsSnapshot_basic(t *testing.T) { + var snapshot fsx.Snapshot + resourceName := "aws_fsx_openzfs_snapshot.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsSnapshotDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsSnapshotBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsSnapshotExists(resourceName, &snapshot), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexp.MustCompile(`snapshot/.+`)), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrSet(resourceName, "volume_id"), + resource.TestCheckResourceAttrSet(resourceName, "creation_time"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccFSxOpenzfsSnapshot_disappears(t *testing.T) { + var snapshot fsx.Snapshot + resourceName := "aws_fsx_openzfs_snapshot.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsSnapshotDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsSnapshotBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsSnapshotExists(resourceName, &snapshot), + acctest.CheckResourceDisappears(acctest.Provider, tffsx.ResourceOpenzfsSnapshot(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccFSxOpenzfsSnapshot_tags(t *testing.T) { + var snapshot fsx.Snapshot + resourceName := "aws_fsx_openzfs_snapshot.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsSnapshotDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsSnapshotTags1Config(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsSnapshotExists(resourceName, &snapshot), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccOpenzfsSnapshotTags2Config(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsSnapshotExists(resourceName, &snapshot), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccOpenzfsSnapshotTags1Config(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsSnapshotExists(resourceName, &snapshot), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + +func TestAccFSxOpenzfsSnapshot_name(t *testing.T) { + var snapshot1, snapshot2 fsx.Snapshot + resourceName := "aws_fsx_openzfs_snapshot.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsSnapshotDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsSnapshotBasicConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsSnapshotExists(resourceName, &snapshot1), + resource.TestCheckResourceAttr(resourceName, "name", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccOpenzfsSnapshotBasicConfig(rName2), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsSnapshotExists(resourceName, &snapshot2), + testAccCheckFsxOpenzfsSnapshotNotRecreated(&snapshot1, &snapshot2), + resource.TestCheckResourceAttr(resourceName, "name", rName2), + ), + }, + }, + }) +} + +func TestAccFSxOpenzfsSnapshot_childVolume(t *testing.T) { + var snapshot fsx.Snapshot + resourceName := "aws_fsx_openzfs_snapshot.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsSnapshotDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsSnapshotChildVolumeConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsSnapshotExists(resourceName, &snapshot), + acctest.MatchResourceAttrRegionalARN(resourceName, "arn", "fsx", regexp.MustCompile(`snapshot/.+`)), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccFSxOpenzfsSnapshot_volumeId(t *testing.T) { + var snapshot1, snapshot2 fsx.Snapshot + resourceName := "aws_fsx_openzfs_snapshot.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t); acctest.PreCheckPartitionHasService(fsx.EndpointsID, t) }, + ErrorCheck: acctest.ErrorCheck(t, fsx.EndpointsID), + Providers: acctest.Providers, + CheckDestroy: testAccCheckFsxOpenzfsSnapshotDestroy, + Steps: []resource.TestStep{ + { + Config: testAccOpenzfsSnapshotVolumeId1Config(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsSnapshotExists(resourceName, &snapshot1), + resource.TestCheckResourceAttr(resourceName, "name", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccOpenzfsSnapshotVolumeId2Config(rName2), + Check: resource.ComposeTestCheckFunc( + testAccCheckFsxOpenzfsSnapshotExists(resourceName, &snapshot2), + testAccCheckFsxOpenzfsSnapshotRecreated(&snapshot1, &snapshot2), + resource.TestCheckResourceAttr(resourceName, "name", rName2), + ), + }, + }, + }) +} + +func testAccCheckFsxOpenzfsSnapshotExists(resourceName string, fs *fsx.Snapshot) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return fmt.Errorf("Not found: %s", resourceName) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn + + output, err := tffsx.FindSnapshotByID(conn, rs.Primary.ID) + if err != nil { + return err + } + + if output == nil { + return fmt.Errorf("FSx OpenZFS Snapshot (%s) not found", rs.Primary.ID) + } + + *fs = *output + + return nil + } +} + +func testAccCheckFsxOpenzfsSnapshotDestroy(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).FSxConn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_fsx_openzfs_snapshot" { + continue + } + + _, err := tffsx.FindSnapshotByID(conn, rs.Primary.ID) + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("FSx OpenZFS snapshot %s still exists", rs.Primary.ID) + } + return nil +} + +func testAccCheckFsxOpenzfsSnapshotNotRecreated(i, j *fsx.Snapshot) resource.TestCheckFunc { + return func(s *terraform.State) error { + if aws.StringValue(i.SnapshotId) != aws.StringValue(j.SnapshotId) { + return fmt.Errorf("FSx OpenZFS Snapshot (%s) recreated", aws.StringValue(i.SnapshotId)) + } + + return nil + } +} + +func testAccCheckFsxOpenzfsSnapshotRecreated(i, j *fsx.Snapshot) resource.TestCheckFunc { + return func(s *terraform.State) error { + if aws.StringValue(i.SnapshotId) == aws.StringValue(j.SnapshotId) { + return fmt.Errorf("FSx OpenZFS Snapshot (%s) not recreated", aws.StringValue(i.SnapshotId)) + } + + return nil + } +} + +func testAccOpenzfsSnapshotBaseConfig(rName string) string { + return acctest.ConfigCompose(acctest.ConfigAvailableAZsNoOptIn(), fmt.Sprintf(` +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_subnet" "test1" { + vpc_id = aws_vpc.test.id + cidr_block = "10.0.1.0/24" + availability_zone = data.aws_availability_zones.available.names[0] +} + +resource "aws_fsx_openzfs_file_system" "test" { + storage_capacity = 64 + subnet_ids = [aws_subnet.test1.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 64 + + + tags = { + Name = %[1]q + } +} +`, rName)) +} + +func testAccOpenzfsSnapshotBasicConfig(rName string) string { + return acctest.ConfigCompose(testAccOpenzfsSnapshotBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_snapshot" "test" { + name = %[1]q + volume_id = aws_fsx_openzfs_file_system.test.root_volume_id +} +`, rName)) +} + +func testAccOpenzfsSnapshotTags1Config(rName string, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccOpenzfsSnapshotBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_snapshot" "test" { + name = %[1]q + volume_id = aws_fsx_openzfs_file_system.test.root_volume_id + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey1, tagValue1)) +} + +func testAccOpenzfsSnapshotTags2Config(rName string, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccOpenzfsSnapshotBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_snapshot" "test" { + name = %[1]q + volume_id = aws_fsx_openzfs_file_system.test.root_volume_id + + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey1, tagValue1, tagKey2, tagValue2)) +} + +func testAccOpenzfsSnapshotChildVolumeConfig(rName string) string { + return acctest.ConfigCompose(testAccOpenzfsSnapshotBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_volume" "test" { + name = %[1]q + parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id +} + +resource "aws_fsx_openzfs_snapshot" "test" { + name = %[1]q + volume_id = aws_fsx_openzfs_volume.test.id +} +`, rName)) +} + +func testAccOpenzfsSnapshotVolumeId1Config(rName string) string { + return acctest.ConfigCompose(testAccOpenzfsSnapshotBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_volume" "test1" { + name = %[1]q + parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id +} + +resource "aws_fsx_openzfs_snapshot" "test" { + name = %[1]q + volume_id = aws_fsx_openzfs_volume.test1.id +} +`, rName)) +} + +func testAccOpenzfsSnapshotVolumeId2Config(rName string) string { + return acctest.ConfigCompose(testAccOpenzfsSnapshotBaseConfig(rName), fmt.Sprintf(` +resource "aws_fsx_openzfs_volume" "test2" { + name = %[1]q + parent_volume_id = aws_fsx_openzfs_file_system.test.root_volume_id +} + +resource "aws_fsx_openzfs_snapshot" "test" { + name = %[1]q + volume_id = aws_fsx_openzfs_volume.test2.id +} +`, rName)) +} diff --git a/internal/service/fsx/status.go b/internal/service/fsx/status.go index 6dc2211e9d0..dfb91b4f1d4 100644 --- a/internal/service/fsx/status.go +++ b/internal/service/fsx/status.go @@ -86,3 +86,19 @@ func statusVolume(conn *fsx.FSx, id string) resource.StateRefreshFunc { return output, aws.StringValue(output.Lifecycle), nil } } + +func statusSnapshot(conn *fsx.FSx, id string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + output, err := FindSnapshotByID(conn, id) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.Lifecycle), nil + } +} diff --git a/internal/service/fsx/wait.go b/internal/service/fsx/wait.go index 642ddc2acca..93bc90469dd 100644 --- a/internal/service/fsx/wait.go +++ b/internal/service/fsx/wait.go @@ -268,3 +268,57 @@ func waitVolumeDeleted(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Vo return nil, err } + +func waitSnapshotCreated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Snapshot, error) { //nolint:unparam + stateConf := &resource.StateChangeConf{ + Pending: []string{fsx.SnapshotLifecycleCreating, fsx.SnapshotLifecyclePending}, + Target: []string{fsx.SnapshotLifecycleAvailable}, + Refresh: statusSnapshot(conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*fsx.Snapshot); ok { + return output, err + } + + return nil, err +} + +func waitSnapshotUpdated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Snapshot, error) { //nolint:unparam + stateConf := &resource.StateChangeConf{ + Pending: []string{fsx.SnapshotLifecyclePending}, + Target: []string{fsx.SnapshotLifecycleAvailable}, + Refresh: statusSnapshot(conn, id), + Timeout: timeout, + Delay: 150 * time.Second, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*fsx.Snapshot); ok { + return output, err + } + + return nil, err +} + +func waitSnapshotDeleted(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Snapshot, error) { //nolint:unparam + stateConf := &resource.StateChangeConf{ + Pending: []string{fsx.SnapshotLifecyclePending, fsx.SnapshotLifecycleDeleting}, + Target: []string{}, + Refresh: statusSnapshot(conn, id), + Timeout: timeout, + Delay: 30 * time.Second, + } + + outputRaw, err := stateConf.WaitForState() + + if output, ok := outputRaw.(*fsx.Snapshot); ok { + return output, err + } + + return nil, err +} diff --git a/website/docs/r/fsx_backup.html.markdown b/website/docs/r/fsx_backup.html.markdown index 30cac8b0916..f0f3a48e55b 100644 --- a/website/docs/r/fsx_backup.html.markdown +++ b/website/docs/r/fsx_backup.html.markdown @@ -59,6 +59,21 @@ resource "aws_fsx_ontap_volume" "example" { } ``` +## OpenZFS Example + +```terraform +resource "aws_fsx_backup" "example" { + file_system_id = aws_fsx_openzfs_file_system.example.id +} + +resource "aws_fsx_openzfs_file_system" "example" { + storage_capacity = 64 + subnet_ids = [aws_subnet.example.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 64 +} +``` + ## Argument Reference The following arguments are supported: diff --git a/website/docs/r/fsx_openzfs_snapshot.html.markdown b/website/docs/r/fsx_openzfs_snapshot.html.markdown new file mode 100644 index 00000000000..0e5fae76691 --- /dev/null +++ b/website/docs/r/fsx_openzfs_snapshot.html.markdown @@ -0,0 +1,83 @@ +subcategory: "File System (FSx)" +layout: "aws" +page_title: "AWS: aws_fsx_openzfs_snapshot" +description: |- + Manages an Amazon FSx for OpenZFS snapshot. +--- + +# Resource: aws_fsx_openzfs_snapshot + +Manages an Amazon FSx for OpenZFS volume. +See the [FSx OpenZFS User Guide](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/what-is-fsx.html) for more information. + + +## Example Usage + +### Root volume Example +```terraform +resource "aws_fsx_openzfs_snapshot" "example" { + name = "example" + volume_id = aws_fsx_openzfs_file_system.example.root_volume_id +} + +resource "aws_fsx_openzfs_file_system" "example" { + storage_capacity = 64 + subnet_ids = [aws_subnet.example.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 64 +} +``` + +### Child volume Example +```terraform +resource "aws_fsx_openzfs_snapshot" "example" { + name = "example" + volume_id = aws_fsx_openzfs_volume.example.id +} + +resource "aws_fsx_openzfs_volume" "example" { + name = "example" + parent_volume_id = aws_fsx_openzfs_file_system.example.root_volume_id +} + +resource "aws_fsx_openzfs_file_system" "example" { + storage_capacity = 64 + subnet_ids = [aws_subnet.example.id] + deployment_type = "SINGLE_AZ_1" + throughput_capacity = 64 +} +``` + + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Snapshot. You can use a maximum of 203 alphanumeric characters plus either _ or - or : or . for the name. +* `tags` - (Optional) A map of tags to assign to the file system. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. If you have set `copy_tags_to_backups` to true, and you specify one or more tags, no existing file system tags are copied from the file system to the backup. +* `volume_id` - (Optional) The ID of the volume to snapshot. This can be the root volume or a child volume. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `arn` - Amazon Resource Name of the snapshot. +* `id` - Identifier of the snapshot, e.g., `fsvolsnap-12345678` +* `tags_all` - A map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). + +## Timeouts + +`aws_fsx_openzfs_snapshot` provides the following [Timeouts](https://www.terraform.io/docs/configuration/blocks/resources/syntax.html#operation-timeouts) +configuration options: + +* `create` - (Default `30m`) How long to wait for the backup to be created. +* `delete` - (Default `30m`) How long to wait for the backup to be deleted. +* `update` - (Default `30m`) How long to wait for the backup to be deleted. + +## Import + +FSx OpenZFS snapshot can be imported using the `id`, e.g., + +``` +$ terraform import aws_fsx_openzfs_snapshot.example fs-543ab12b1ca672f33 +``` From 00f8cadda905c45c17afa1f88f5b4dd9d5317d07 Mon Sep 17 00:00:00 2001 From: awsaxeman <34073510+awsaxeman@users.noreply.github.com> Date: Thu, 30 Dec 2021 00:11:54 -0500 Subject: [PATCH 10/12] fix lint --- internal/service/fsx/wait.go | 6 +++--- website/docs/r/fsx_openzfs_snapshot.html.markdown | 11 +++++++---- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/internal/service/fsx/wait.go b/internal/service/fsx/wait.go index 93bc90469dd..8de0a1eb2ad 100644 --- a/internal/service/fsx/wait.go +++ b/internal/service/fsx/wait.go @@ -269,7 +269,7 @@ func waitVolumeDeleted(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Vo return nil, err } -func waitSnapshotCreated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Snapshot, error) { //nolint:unparam +func waitSnapshotCreated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Snapshot, error) { stateConf := &resource.StateChangeConf{ Pending: []string{fsx.SnapshotLifecycleCreating, fsx.SnapshotLifecyclePending}, Target: []string{fsx.SnapshotLifecycleAvailable}, @@ -287,7 +287,7 @@ func waitSnapshotCreated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx. return nil, err } -func waitSnapshotUpdated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Snapshot, error) { //nolint:unparam +func waitSnapshotUpdated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Snapshot, error) { stateConf := &resource.StateChangeConf{ Pending: []string{fsx.SnapshotLifecyclePending}, Target: []string{fsx.SnapshotLifecycleAvailable}, @@ -305,7 +305,7 @@ func waitSnapshotUpdated(conn *fsx.FSx, id string, timeout time.Duration) (*fsx. return nil, err } -func waitSnapshotDeleted(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Snapshot, error) { //nolint:unparam +func waitSnapshotDeleted(conn *fsx.FSx, id string, timeout time.Duration) (*fsx.Snapshot, error) { stateConf := &resource.StateChangeConf{ Pending: []string{fsx.SnapshotLifecyclePending, fsx.SnapshotLifecycleDeleting}, Target: []string{}, diff --git a/website/docs/r/fsx_openzfs_snapshot.html.markdown b/website/docs/r/fsx_openzfs_snapshot.html.markdown index 0e5fae76691..efda65f6bed 100644 --- a/website/docs/r/fsx_openzfs_snapshot.html.markdown +++ b/website/docs/r/fsx_openzfs_snapshot.html.markdown @@ -1,3 +1,4 @@ +--- subcategory: "File System (FSx)" layout: "aws" page_title: "AWS: aws_fsx_openzfs_snapshot" @@ -13,10 +14,11 @@ See the [FSx OpenZFS User Guide](https://docs.aws.amazon.com/fsx/latest/OpenZFSG ## Example Usage -### Root volume Example +### Root volume Example + ```terraform resource "aws_fsx_openzfs_snapshot" "example" { - name = "example" + name = "example" volume_id = aws_fsx_openzfs_file_system.example.root_volume_id } @@ -28,10 +30,11 @@ resource "aws_fsx_openzfs_file_system" "example" { } ``` -### Child volume Example +### Child volume Example + ```terraform resource "aws_fsx_openzfs_snapshot" "example" { - name = "example" + name = "example" volume_id = aws_fsx_openzfs_volume.example.id } From 73d1bccc50cfdcb0dbd8f7dc31df89e327490763 Mon Sep 17 00:00:00 2001 From: awsaxeman <34073510+awsaxeman@users.noreply.github.com> Date: Tue, 4 Jan 2022 16:28:01 -0500 Subject: [PATCH 11/12] user_and_group_quotas to typeset --- internal/service/fsx/openzfs_file_system.go | 10 ++-- .../service/fsx/openzfs_file_system_test.go | 48 ++++++++++++------- internal/service/fsx/openzfs_volume.go | 6 +-- internal/service/fsx/openzfs_volume_test.go | 40 ++++++++++------ 4 files changed, 63 insertions(+), 41 deletions(-) diff --git a/internal/service/fsx/openzfs_file_system.go b/internal/service/fsx/openzfs_file_system.go index 9ee07650a7f..76af2e2b00c 100644 --- a/internal/service/fsx/openzfs_file_system.go +++ b/internal/service/fsx/openzfs_file_system.go @@ -156,7 +156,7 @@ func ResourceOpenzfsFileSystem() *schema.Resource { Computed: true, }, "user_and_group_quotas": { - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, Computed: true, MaxItems: 100, @@ -601,8 +601,8 @@ func expandFsxOpenzfsRootVolumeConfiguration(cfg []interface{}) *fsx.OpenZFSCrea out.ReadOnly = aws.Bool(v) } - if v, ok := conf["user_and_group_quotas"].([]interface{}); ok { - out.UserAndGroupQuotas = expandFsxOpenzfsUserAndGroupQuotas(v) + if v, ok := conf["user_and_group_quotas"]; ok { + out.UserAndGroupQuotas = expandFsxOpenzfsUserAndGroupQuotas(v.(*schema.Set).List()) } if v, ok := conf["nfs_exports"].([]interface{}); ok { @@ -629,8 +629,8 @@ func expandFsxOpenzfsUpdateRootVolumeConfiguration(cfg []interface{}) *fsx.Updat out.ReadOnly = aws.Bool(v) } - if v, ok := conf["user_and_group_quotas"].([]interface{}); ok { - out.UserAndGroupQuotas = expandFsxOpenzfsUserAndGroupQuotas(v) + if v, ok := conf["user_and_group_quotas"]; ok { + out.UserAndGroupQuotas = expandFsxOpenzfsUserAndGroupQuotas(v.(*schema.Set).List()) } if v, ok := conf["nfs_exports"].([]interface{}); ok { diff --git a/internal/service/fsx/openzfs_file_system_test.go b/internal/service/fsx/openzfs_file_system_test.go index 9f8a600e806..ba8476d3c38 100644 --- a/internal/service/fsx/openzfs_file_system_test.go +++ b/internal/service/fsx/openzfs_file_system_test.go @@ -149,9 +149,11 @@ func TestAccFSxOpenzfsFileSystem_rootVolume(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.options.1", "rw"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.read_only", "false"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "1"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.id", "10"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.storage_capacity_quota_gib", "128"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.type", "USER"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.user_and_group_quotas.*", map[string]string{ + "id": "10", + "storage_capacity_quota_gib": "128", + "type": "USER", + }), ), }, { @@ -175,9 +177,11 @@ func TestAccFSxOpenzfsFileSystem_rootVolume(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.nfs_exports.0.client_configurations.0.options.1", "rw"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.read_only", "true"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "1"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.id", "10"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.storage_capacity_quota_gib", "256"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.type", "USER"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.user_and_group_quotas.*", map[string]string{ + "id": "10", + "storage_capacity_quota_gib": "256", + "type": "USER", + }), ), }, { @@ -201,18 +205,26 @@ func TestAccFSxOpenzfsFileSystem_rootVolume(t *testing.T) { }), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.read_only", "false"), resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.#", "4"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.id", "10"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.storage_capacity_quota_gib", "128"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.0.type", "USER"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.1.id", "20"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.1.storage_capacity_quota_gib", "1024"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.1.type", "GROUP"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.2.id", "5"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.2.storage_capacity_quota_gib", "1024"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.2.type", "GROUP"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.3.id", "100"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.3.storage_capacity_quota_gib", "128"), - resource.TestCheckResourceAttr(resourceName, "root_volume_configuration.0.user_and_group_quotas.3.type", "USER"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.user_and_group_quotas.*", map[string]string{ + "id": "10", + "storage_capacity_quota_gib": "128", + "type": "USER", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.user_and_group_quotas.*", map[string]string{ + "id": "20", + "storage_capacity_quota_gib": "1024", + "type": "GROUP", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.user_and_group_quotas.*", map[string]string{ + "id": "5", + "storage_capacity_quota_gib": "1024", + "type": "GROUP", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "root_volume_configuration.0.user_and_group_quotas.*", map[string]string{ + "id": "100", + "storage_capacity_quota_gib": "128", + "type": "USER", + }), ), }, }, diff --git a/internal/service/fsx/openzfs_volume.go b/internal/service/fsx/openzfs_volume.go index 7639d6c754b..b82359591d3 100644 --- a/internal/service/fsx/openzfs_volume.go +++ b/internal/service/fsx/openzfs_volume.go @@ -141,7 +141,7 @@ func ResourceOpenzfsVolume() *schema.Resource { ValidateFunc: validation.IntBetween(0, 2147483647), }, "user_and_group_quotas": { - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, Computed: true, MaxItems: 100, @@ -222,7 +222,7 @@ func resourceOepnzfsVolumeCreate(d *schema.ResourceData, meta interface{}) error } if v, ok := d.GetOk("user_and_group_quotas"); ok { - input.OpenZFSConfiguration.UserAndGroupQuotas = expandFsxOpenzfsVolumeUserAndGroupQuotas(v.([]interface{})) + input.OpenZFSConfiguration.UserAndGroupQuotas = expandFsxOpenzfsVolumeUserAndGroupQuotas(v.(*schema.Set).List()) } if v, ok := d.GetOk("origin_snapshot"); ok { @@ -366,7 +366,7 @@ func resourceOpenzfsVolumeUpdate(d *schema.ResourceData, meta interface{}) error } if d.HasChange("user_and_group_quotas") { - input.OpenZFSConfiguration.UserAndGroupQuotas = expandFsxOpenzfsVolumeUserAndGroupQuotas(d.Get("user_and_group_quotas").([]interface{})) + input.OpenZFSConfiguration.UserAndGroupQuotas = expandFsxOpenzfsVolumeUserAndGroupQuotas(d.Get("user_and_group_quotas").(*schema.Set).List()) } _, err := conn.UpdateVolume(input) diff --git a/internal/service/fsx/openzfs_volume_test.go b/internal/service/fsx/openzfs_volume_test.go index ad106bb3e47..64902ecd345 100644 --- a/internal/service/fsx/openzfs_volume_test.go +++ b/internal/service/fsx/openzfs_volume_test.go @@ -384,9 +384,11 @@ func TestAccFSxOpenzfsVolume_userAndGroupQuotas(t *testing.T) { Check: resource.ComposeTestCheckFunc( testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume1), resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.#", "1"), - resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.0.id", "10"), - resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.0.storage_capacity_quota_gib", "256"), - resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.0.type", "USER"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "user_and_group_quotas.*", map[string]string{ + "id": "10", + "storage_capacity_quota_gib": "256", + "type": "USER", + }), ), }, { @@ -400,18 +402,26 @@ func TestAccFSxOpenzfsVolume_userAndGroupQuotas(t *testing.T) { testAccCheckFsxOpenzfsVolumeExists(resourceName, &volume2), testAccCheckFsxOpenzfsVolumeNotRecreated(&volume1, &volume2), resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.#", "4"), - resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.0.id", "10"), - resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.0.storage_capacity_quota_gib", "128"), - resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.0.type", "USER"), - resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.1.id", "20"), - resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.1.storage_capacity_quota_gib", "1024"), - resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.1.type", "GROUP"), - resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.2.id", "5"), - resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.2.storage_capacity_quota_gib", "1024"), - resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.2.type", "GROUP"), - resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.3.id", "100"), - resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.3.storage_capacity_quota_gib", "128"), - resource.TestCheckResourceAttr(resourceName, "user_and_group_quotas.3.type", "USER"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "user_and_group_quotas.*", map[string]string{ + "id": "10", + "storage_capacity_quota_gib": "128", + "type": "USER", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "user_and_group_quotas.*", map[string]string{ + "id": "20", + "storage_capacity_quota_gib": "1024", + "type": "GROUP", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "user_and_group_quotas.*", map[string]string{ + "id": "5", + "storage_capacity_quota_gib": "1024", + "type": "GROUP", + }), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "user_and_group_quotas.*", map[string]string{ + "id": "100", + "storage_capacity_quota_gib": "128", + "type": "USER", + }), ), }, }, From 34b4a86f6e345922f516766fb685a07ae4ae16da Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 5 Jan 2022 09:20:11 -0500 Subject: [PATCH 12/12] Skip GovCloud failures. --- internal/service/fsx/openzfs_file_system_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/internal/service/fsx/openzfs_file_system_test.go b/internal/service/fsx/openzfs_file_system_test.go index ba8476d3c38..82041800b28 100644 --- a/internal/service/fsx/openzfs_file_system_test.go +++ b/internal/service/fsx/openzfs_file_system_test.go @@ -16,6 +16,16 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) +func init() { + acctest.RegisterServiceErrorCheckFunc(fsx.EndpointsID, testAccErrorCheckSkipFSx) +} + +func testAccErrorCheckSkipFSx(t *testing.T) resource.ErrorCheckFunc { + return acctest.ErrorCheckSkipMessagesContaining(t, + "Amazon FSx does not currently support OpenZFS file system creation in the following Availability Zones", + ) +} + func TestAccFSxOpenzfsFileSystem_basic(t *testing.T) { var filesystem fsx.FileSystem resourceName := "aws_fsx_openzfs_file_system.test"