Skip to content

Commit

Permalink
Merge pull request #20614 from DrFaust92/fsx_lustre_backup
Browse files Browse the repository at this point in the history
r/fsx_lustre_file_system - add support for creating filesystem from backup
  • Loading branch information
ewbankkit authored Aug 20, 2021
2 parents ab944ce + 09821db commit ca8624f
Show file tree
Hide file tree
Showing 4 changed files with 100 additions and 6 deletions.
3 changes: 3 additions & 0 deletions .changelog/20614.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
resource/aws_fsx_lustre_filesystem: Allow creating filesystem from backup using `backup_id`.
```
50 changes: 44 additions & 6 deletions aws/resource_aws_fsx_lustre_file_system.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,11 @@ func resourceAwsFsxLustreFileSystem() *schema.Resource {
Type: schema.TypeString,
Computed: true,
},
"backup_id": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"dns_name": {
Type: schema.TypeString,
Computed: true,
Expand Down Expand Up @@ -90,7 +95,7 @@ func resourceAwsFsxLustreFileSystem() *schema.Resource {
},
"storage_capacity": {
Type: schema.TypeInt,
Required: true,
Optional: true,
ValidateFunc: validation.IntAtLeast(1200),
},
"subnet_ids": {
Expand Down Expand Up @@ -227,69 +232,102 @@ func resourceAwsFsxLustreFileSystemCreate(d *schema.ResourceData, meta interface
},
}

backupInput := &fsx.CreateFileSystemFromBackupInput{
ClientRequestToken: aws.String(resource.UniqueId()),
StorageType: aws.String(d.Get("storage_type").(string)),
SubnetIds: expandStringList(d.Get("subnet_ids").([]interface{})),
LustreConfiguration: &fsx.CreateFileSystemLustreConfiguration{
DeploymentType: aws.String(d.Get("deployment_type").(string)),
},
}

//Applicable only for TypePersistent1
if v, ok := d.GetOk("kms_key_id"); ok {
input.KmsKeyId = aws.String(v.(string))
backupInput.KmsKeyId = aws.String(v.(string))
}

if v, ok := d.GetOk("automatic_backup_retention_days"); ok {
input.LustreConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(v.(int)))
backupInput.LustreConfiguration.AutomaticBackupRetentionDays = aws.Int64(int64(v.(int)))
}

if v, ok := d.GetOk("daily_automatic_backup_start_time"); ok {
input.LustreConfiguration.DailyAutomaticBackupStartTime = aws.String(v.(string))
backupInput.LustreConfiguration.DailyAutomaticBackupStartTime = aws.String(v.(string))
}

if v, ok := d.GetOk("export_path"); ok {
input.LustreConfiguration.ExportPath = aws.String(v.(string))
backupInput.LustreConfiguration.ExportPath = aws.String(v.(string))
}

if v, ok := d.GetOk("import_path"); ok {
input.LustreConfiguration.ImportPath = aws.String(v.(string))
backupInput.LustreConfiguration.ImportPath = aws.String(v.(string))
}

if v, ok := d.GetOk("imported_file_chunk_size"); ok {
input.LustreConfiguration.ImportedFileChunkSize = aws.Int64(int64(v.(int)))
backupInput.LustreConfiguration.ImportedFileChunkSize = aws.Int64(int64(v.(int)))
}

if v, ok := d.GetOk("security_group_ids"); ok {
input.SecurityGroupIds = expandStringSet(v.(*schema.Set))
backupInput.SecurityGroupIds = expandStringSet(v.(*schema.Set))
}

if len(tags) > 0 {
input.Tags = tags.IgnoreAws().FsxTags()
backupInput.Tags = tags.IgnoreAws().FsxTags()
}

if v, ok := d.GetOk("weekly_maintenance_start_time"); ok {
input.LustreConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string))
backupInput.LustreConfiguration.WeeklyMaintenanceStartTime = aws.String(v.(string))
}

if v, ok := d.GetOk("per_unit_storage_throughput"); ok {
input.LustreConfiguration.PerUnitStorageThroughput = aws.Int64(int64(v.(int)))
backupInput.LustreConfiguration.PerUnitStorageThroughput = aws.Int64(int64(v.(int)))
}

if v, ok := d.GetOk("drive_cache_type"); ok {
input.LustreConfiguration.DriveCacheType = aws.String(v.(string))
backupInput.LustreConfiguration.DriveCacheType = aws.String(v.(string))
}

if v, ok := d.GetOk("auto_import_policy"); ok {
input.LustreConfiguration.AutoImportPolicy = aws.String(v.(string))
backupInput.LustreConfiguration.AutoImportPolicy = aws.String(v.(string))
}

if v, ok := d.GetOk("copy_tags_to_backups"); ok {
input.LustreConfiguration.CopyTagsToBackups = aws.Bool(v.(bool))
backupInput.LustreConfiguration.CopyTagsToBackups = aws.Bool(v.(bool))
}

if v, ok := d.GetOk("data_compression_type"); ok {
input.LustreConfiguration.DataCompressionType = aws.String(v.(string))
backupInput.LustreConfiguration.DataCompressionType = aws.String(v.(string))
}

result, err := conn.CreateFileSystem(input)
if err != nil {
return fmt.Errorf("Error creating FSx Lustre filesystem: %w", err)
}
if v, ok := d.GetOk("backup_id"); ok {
backupInput.BackupId = aws.String(v.(string))
result, err := conn.CreateFileSystemFromBackup(backupInput)
if err != nil {
return fmt.Errorf("Error creating FSx Lustre filesystem from backup: %w", err)
}

d.SetId(aws.StringValue(result.FileSystem.FileSystemId))
d.SetId(aws.StringValue(result.FileSystem.FileSystemId))
} else {
result, err := conn.CreateFileSystem(input)
if err != nil {
return fmt.Errorf("Error creating FSx Lustre filesystem: %w", err)
}

d.SetId(aws.StringValue(result.FileSystem.FileSystemId))
}

log.Println("[DEBUG] Waiting for filesystem to become available")

Expand Down
52 changes: 52 additions & 0 deletions aws/resource_aws_fsx_lustre_file_system_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -600,6 +600,35 @@ func TestAccAWSFsxLustreFileSystem_DeploymentTypePersistent1(t *testing.T) {
})
}

func TestAccAWSFsxLustreFileSystem_fromBackup(t *testing.T) {
var filesystem fsx.FileSystem
resourceName := "aws_fsx_lustre_file_system.test"

resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t); testAccPartitionHasServicePreCheck(fsx.EndpointsID, t) },
ErrorCheck: testAccErrorCheck(t, fsx.EndpointsID),
Providers: testAccProviders,
CheckDestroy: testAccCheckFsxLustreFileSystemDestroy,
Steps: []resource.TestStep{
{
Config: testAccAwsFsxLustreFileSystemFromBackup(),
Check: resource.ComposeTestCheckFunc(
testAccCheckFsxLustreFileSystemExists(resourceName, &filesystem),
resource.TestCheckResourceAttr(resourceName, "per_unit_storage_throughput", "50"),
resource.TestCheckResourceAttr(resourceName, "deployment_type", fsx.LustreDeploymentTypePersistent1),
resource.TestCheckResourceAttrPair(resourceName, "backup_id", "aws_fsx_backup.test", "id"),
),
},
{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
ImportStateVerifyIgnore: []string{"security_group_ids", "backup_id"},
},
},
})
}

func TestAccAWSFsxLustreFileSystem_KmsKeyId(t *testing.T) {
var filesystem1, filesystem2 fsx.FileSystem
resourceName := "aws_fsx_lustre_file_system.test"
Expand Down Expand Up @@ -1117,6 +1146,29 @@ resource "aws_fsx_lustre_file_system" "test" {
`, perUnitStorageThroughput))
}

func testAccAwsFsxLustreFileSystemFromBackup() string {
return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), `
resource "aws_fsx_lustre_file_system" "base" {
storage_capacity = 1200
subnet_ids = [aws_subnet.test1.id]
deployment_type = "PERSISTENT_1"
per_unit_storage_throughput = 50
}
resource "aws_fsx_backup" "test" {
file_system_id = aws_fsx_lustre_file_system.base.id
}
resource "aws_fsx_lustre_file_system" "test" {
storage_capacity = 1200
subnet_ids = [aws_subnet.test1.id]
deployment_type = "PERSISTENT_1"
per_unit_storage_throughput = 50
backup_id = aws_fsx_backup.test.id
}
`)
}

func testAccAwsFsxLustreFileSystemConfigKmsKeyId1() string {
return composeConfig(testAccAwsFsxLustreFileSystemConfigBase(), `
resource "aws_kms_key" "test1" {
Expand Down
1 change: 1 addition & 0 deletions website/docs/r/fsx_lustre_file_system.html.markdown
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ The following arguments are supported:

* `storage_capacity` - (Required) The storage capacity (GiB) of the file system. Minimum of `1200`. See more details at [Allowed values for Fsx storage capacity](https://docs.aws.amazon.com/fsx/latest/APIReference/API_CreateFileSystem.html#FSx-CreateFileSystem-request-StorageCapacity). Update is allowed only for `SCRATCH_2` and `PERSISTENT_1` deployment types, See more details at [Fsx Storage Capacity Update](https://docs.aws.amazon.com/fsx/latest/APIReference/API_UpdateFileSystem.html#FSx-UpdateFileSystem-request-StorageCapacity).
* `subnet_ids` - (Required) A list of IDs for the subnets that the file system will be accessible from. File systems currently support only one subnet. The file server is also launched in that subnet's Availability Zone.
* `backup_id` - (Optional) The ID of the source backup to create the filesystem from.
* `export_path` - (Optional) S3 URI (with optional prefix) where the root of your Amazon FSx file system is exported. Can only be specified with `import_path` argument and the path must use the same Amazon S3 bucket as specified in `import_path`. Set equal to `import_path` to overwrite files on export. Defaults to `s3://{IMPORT BUCKET}/FSxLustre{CREATION TIMESTAMP}`.
* `import_path` - (Optional) S3 URI (with optional prefix) that you're using as the data repository for your FSx for Lustre file system. For example, `s3://example-bucket/optional-prefix/`.
* `imported_file_chunk_size` - (Optional) For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. Can only be specified with `import_path` argument. Defaults to `1024`. Minimum of `1` and maximum of `512000`.
Expand Down

0 comments on commit ca8624f

Please sign in to comment.