Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adds support for aws_finspace_dataview, aws_finspace_scaling_group & aws_finspace_volume for aws_finspace_cluster #34831

Merged
merged 10 commits into from
Dec 15, 2023
6 changes: 6 additions & 0 deletions .changelog/34831.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
```release-note:enhancement
resource/aws_finspace_kx_cluster: Add `database.dataview_name`, `scaling_group_configuration`, and `tickerplant_log_configuration` arguments.
```
```release-note:enhancement
resource/aws_finspace_kx_cluster: The `capacity_configuration` argument is now optional.
```
219 changes: 208 additions & 11 deletions internal/service/finspace/kx_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ func ResourceKxCluster() *schema.Resource {
},
"capacity_configuration": {
Type: schema.TypeList,
Required: true,
Optional: true,
ForceNew: true,
MaxItems: 1,
Elem: &schema.Resource{
Expand Down Expand Up @@ -225,6 +225,12 @@ func ResourceKxCluster() *schema.Resource {
ForceNew: true,
ValidateFunc: validation.StringLenBetween(3, 63),
},
"dataview_name": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.StringLenBetween(3, 63),
},
},
},
},
Expand Down Expand Up @@ -280,17 +286,23 @@ func ResourceKxCluster() *schema.Resource {
Schema: map[string]*schema.Schema{
"type": {
Type: schema.TypeString,
Required: true,
Optional: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice(
enum.Slice(types.KxSavedownStorageTypeSds01), true),
},
"size": {
Type: schema.TypeInt,
Required: true,
Optional: true,
ForceNew: true,
ValidateFunc: validation.IntBetween(10, 16000),
},
"volume_name": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.StringLenBetween(3, 63),
},
},
},
},
Expand Down Expand Up @@ -346,6 +358,64 @@ func ResourceKxCluster() *schema.Resource {
},
},
},
"scaling_group_configuration": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"scaling_group_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validation.StringLenBetween(3, 63),
},
"cpu": {
Type: schema.TypeFloat,
Optional: true,
ForceNew: true,
ValidateFunc: validation.FloatAtLeast(0.1),
},
"node_count": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
ValidateFunc: validation.IntAtLeast(1),
},
"memory_limit": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
ValidateFunc: validation.IntAtLeast(6),
},
"memory_reservation": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
ValidateFunc: validation.IntAtLeast(6),
},
},
},
},
"tickerplant_log_configuration": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"tickerplant_log_volumes": {
Type: schema.TypeSet,
Required: true,
ForceNew: true,
Elem: &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validation.StringLenBetween(3, 63),
},
},
},
},
},
},

CustomizeDiff: verify.SetTagsDiff,
Expand Down Expand Up @@ -375,14 +445,13 @@ func resourceKxClusterCreate(ctx context.Context, d *schema.ResourceData, meta i
d.SetId(rID)

in := &finspace.CreateKxClusterInput{
EnvironmentId: aws.String(environmentId),
ClusterName: aws.String(clusterName),
ClusterType: types.KxClusterType(d.Get("type").(string)),
ReleaseLabel: aws.String(d.Get("release_label").(string)),
AzMode: types.KxAzMode(d.Get("az_mode").(string)),
CapacityConfiguration: expandCapacityConfiguration(d.Get("capacity_configuration").([]interface{})),
ClientToken: aws.String(id.UniqueId()),
Tags: getTagsIn(ctx),
EnvironmentId: aws.String(environmentId),
ClusterName: aws.String(clusterName),
ClusterType: types.KxClusterType(d.Get("type").(string)),
ReleaseLabel: aws.String(d.Get("release_label").(string)),
AzMode: types.KxAzMode(d.Get("az_mode").(string)),
ClientToken: aws.String(id.UniqueId()),
Tags: getTagsIn(ctx),
}

if v, ok := d.GetOk("description"); ok {
Expand All @@ -401,6 +470,10 @@ func resourceKxClusterCreate(ctx context.Context, d *schema.ResourceData, meta i
in.AvailabilityZoneId = aws.String(v.(string))
}

if v, ok := d.GetOk("capacity_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil {
in.CapacityConfiguration = expandCapacityConfiguration(v.([]interface{}))
}

if v, ok := d.GetOk("command_line_arguments"); ok && len(v.(map[string]interface{})) > 0 {
in.CommandLineArguments = expandCommandLineArguments(v.(map[string]interface{}))
}
Expand Down Expand Up @@ -429,6 +502,14 @@ func resourceKxClusterCreate(ctx context.Context, d *schema.ResourceData, meta i
in.Code = expandCode(v.([]interface{}))
}

if v, ok := d.GetOk("scaling_group_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil {
in.ScalingGroupConfiguration = expandScalingGroupConfiguration(v.([]interface{}))
}

if v, ok := d.GetOk("tickerplant_log_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil {
in.TickerplantLogConfiguration = expandTickerplantLogConfiguration(v.([]interface{}))
}

out, err := conn.CreateKxCluster(ctx, in)
if err != nil {
return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxCluster, d.Get("name").(string), err)
Expand Down Expand Up @@ -507,6 +588,14 @@ func resourceKxClusterRead(ctx context.Context, d *schema.ResourceData, meta int
return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err)
}

if err := d.Set("scaling_group_configuration", flattenScalingGroupConfiguration(out.ScalingGroupConfiguration)); err != nil {
return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err)
}

if err := d.Set("tickerplant_log_configuration", flattenTickerplantLogConfiguration(out.TickerplantLogConfiguration)); err != nil {
return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err)
}

// compose cluster ARN using environment ARN
parts, err := flex.ExpandResourceId(d.Id(), kxUserIDPartCount, false)
if err != nil {
Expand Down Expand Up @@ -767,6 +856,38 @@ func expandAutoScalingConfiguration(tfList []interface{}) *types.AutoScalingConf
return a
}

func expandScalingGroupConfiguration(tfList []interface{}) *types.KxScalingGroupConfiguration {
if len(tfList) == 0 || tfList[0] == nil {
return nil
}

tfMap := tfList[0].(map[string]interface{})

a := &types.KxScalingGroupConfiguration{}

if v, ok := tfMap["scaling_group_name"].(string); ok && v != "" {
a.ScalingGroupName = aws.String(v)
}

if v, ok := tfMap["node_count"].(int); ok && v != 0 {
a.NodeCount = aws.Int32(int32(v))
}

if v, ok := tfMap["memory_limit"].(int); ok && v != 0 {
a.MemoryLimit = aws.Int32(int32(v))
}

if v, ok := tfMap["cpu"].(float64); ok && v != 0 {
a.Cpu = aws.Float64(v)
}

if v, ok := tfMap["memory_reservation"].(int); ok && v != 0 {
a.MemoryReservation = aws.Int32(int32(v))
}

return a
}

func expandSavedownStorageConfiguration(tfList []interface{}) *types.KxSavedownStorageConfiguration {
if len(tfList) == 0 || tfList[0] == nil {
return nil
Expand All @@ -784,6 +905,10 @@ func expandSavedownStorageConfiguration(tfList []interface{}) *types.KxSavedownS
a.Size = aws.Int32(int32(v))
}

if v, ok := tfMap["volume_name"].(string); ok && v != "" {
a.VolumeName = aws.String(v)
}

return a
}

Expand Down Expand Up @@ -815,6 +940,22 @@ func expandVPCConfiguration(tfList []interface{}) *types.VpcConfiguration {
return a
}

func expandTickerplantLogConfiguration(tfList []interface{}) *types.TickerplantLogConfiguration {
if len(tfList) == 0 || tfList[0] == nil {
return nil
}

tfMap := tfList[0].(map[string]interface{})

a := &types.TickerplantLogConfiguration{}

if v, ok := tfMap["tickerplant_log_volumes"].(*schema.Set); ok && v.Len() > 0 {
a.TickerplantLogVolumes = flex.ExpandStringValueSet(v)
}

return a
}

func expandCacheStorageConfiguration(tfMap map[string]interface{}) *types.KxCacheStorageConfiguration {
if tfMap == nil {
return nil
Expand Down Expand Up @@ -896,6 +1037,10 @@ func expandDatabase(tfMap map[string]interface{}) *types.KxDatabaseConfiguration
a.DatabaseName = aws.String(v)
}

if v, ok := tfMap["dataview_name"].(string); ok && v != "" {
a.DataviewName = aws.String(v)
}

if v, ok := tfMap["cache_configurations"]; ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil {
a.CacheConfigurations = expandCacheConfigurations(v.([]interface{}))
}
Expand Down Expand Up @@ -1059,6 +1204,50 @@ func flattenAutoScalingConfiguration(apiObject *types.AutoScalingConfiguration)
return []interface{}{m}
}

func flattenScalingGroupConfiguration(apiObject *types.KxScalingGroupConfiguration) []interface{} {
if apiObject == nil {
return nil
}

m := map[string]interface{}{}

if v := apiObject.ScalingGroupName; v != nil {
m["scaling_group_name"] = aws.ToString(v)
}

if v := apiObject.NodeCount; v != nil {
m["node_count"] = aws.ToInt32(v)
}

if v := apiObject.MemoryLimit; v != nil {
m["memory_limit"] = aws.ToInt32(v)
}

if v := apiObject.Cpu; v != nil {
m["cpu"] = aws.ToFloat64(v)
}

if v := apiObject.MemoryReservation; v != nil {
m["memory_reservation"] = aws.ToInt32(v)
}

return []interface{}{m}
}

func flattenTickerplantLogConfiguration(apiObject *types.TickerplantLogConfiguration) []interface{} {
if apiObject == nil {
return nil
}

m := map[string]interface{}{}

if v := apiObject.TickerplantLogVolumes; v != nil {
m["tickerplant_log_volumes"] = v
}

return []interface{}{m}
}

func flattenSavedownStorageConfiguration(apiObject *types.KxSavedownStorageConfiguration) []interface{} {
if apiObject == nil {
return nil
Expand All @@ -1074,6 +1263,10 @@ func flattenSavedownStorageConfiguration(apiObject *types.KxSavedownStorageConfi
m["size"] = v
}

if v := apiObject.VolumeName; v != nil {
m["volume_name"] = aws.ToString(v)
}

return []interface{}{m}
}

Expand Down Expand Up @@ -1200,6 +1393,10 @@ func flattenDatabase(apiObject *types.KxDatabaseConfiguration) map[string]interf
m["database_name"] = aws.ToString(v)
}

if v := apiObject.DataviewName; v != nil {
m["dataview_name"] = aws.ToString(v)
}

if v := apiObject.CacheConfigurations; v != nil {
m["cache_configurations"] = flattenCacheConfigurations(v)
}
Expand Down
Loading
Loading