diff --git a/.changelog/34831.txt b/.changelog/34831.txt new file mode 100644 index 00000000000..e7c0098c35a --- /dev/null +++ b/.changelog/34831.txt @@ -0,0 +1,6 @@ +```release-note:enhancement +resource/aws_finspace_kx_cluster: Add `database.dataview_name`, `scaling_group_configuration`, and `tickerplant_log_configuration` arguments. +``` +```release-note:enhancement +resource/aws_finspace_kx_cluster: The `capacity_configuration` argument is now optional. +``` diff --git a/internal/service/finspace/kx_cluster.go b/internal/service/finspace/kx_cluster.go index 2346f196a56..e00b091ffb4 100644 --- a/internal/service/finspace/kx_cluster.go +++ b/internal/service/finspace/kx_cluster.go @@ -132,7 +132,7 @@ func ResourceKxCluster() *schema.Resource { }, "capacity_configuration": { Type: schema.TypeList, - Required: true, + Optional: true, ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ @@ -225,6 +225,12 @@ func ResourceKxCluster() *schema.Resource { ForceNew: true, ValidateFunc: validation.StringLenBetween(3, 63), }, + "dataview_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, }, }, }, @@ -280,17 +286,23 @@ func ResourceKxCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "type": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, ValidateFunc: validation.StringInSlice( enum.Slice(types.KxSavedownStorageTypeSds01), true), }, "size": { Type: schema.TypeInt, - Required: true, + Optional: true, ForceNew: true, ValidateFunc: validation.IntBetween(10, 16000), }, + "volume_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, }, }, }, @@ -346,6 +358,64 @@ func ResourceKxCluster() *schema.Resource { }, }, }, + "scaling_group_configuration": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scaling_group_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "cpu": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: true, + ValidateFunc: validation.FloatAtLeast(0.1), + }, + "node_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "memory_limit": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(6), + }, + "memory_reservation": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(6), + }, + }, + }, + }, + "tickerplant_log_configuration": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tickerplant_log_volumes": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + }, + }, + }, + }, }, CustomizeDiff: verify.SetTagsDiff, @@ -375,14 +445,13 @@ func resourceKxClusterCreate(ctx context.Context, d *schema.ResourceData, meta i d.SetId(rID) in := &finspace.CreateKxClusterInput{ - EnvironmentId: aws.String(environmentId), - ClusterName: aws.String(clusterName), - ClusterType: types.KxClusterType(d.Get("type").(string)), - ReleaseLabel: aws.String(d.Get("release_label").(string)), - AzMode: types.KxAzMode(d.Get("az_mode").(string)), - CapacityConfiguration: expandCapacityConfiguration(d.Get("capacity_configuration").([]interface{})), - ClientToken: aws.String(id.UniqueId()), - Tags: getTagsIn(ctx), + EnvironmentId: aws.String(environmentId), + ClusterName: aws.String(clusterName), + ClusterType: types.KxClusterType(d.Get("type").(string)), + ReleaseLabel: aws.String(d.Get("release_label").(string)), + AzMode: types.KxAzMode(d.Get("az_mode").(string)), + ClientToken: aws.String(id.UniqueId()), + Tags: getTagsIn(ctx), } if v, ok := d.GetOk("description"); ok { @@ -401,6 +470,10 @@ func resourceKxClusterCreate(ctx context.Context, d *schema.ResourceData, meta i in.AvailabilityZoneId = aws.String(v.(string)) } + if v, ok := d.GetOk("capacity_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + in.CapacityConfiguration = expandCapacityConfiguration(v.([]interface{})) + } + if v, ok := d.GetOk("command_line_arguments"); ok && len(v.(map[string]interface{})) > 0 { in.CommandLineArguments = expandCommandLineArguments(v.(map[string]interface{})) } @@ -429,6 +502,14 @@ func resourceKxClusterCreate(ctx context.Context, d *schema.ResourceData, meta i in.Code = expandCode(v.([]interface{})) } + if v, ok := d.GetOk("scaling_group_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + in.ScalingGroupConfiguration = expandScalingGroupConfiguration(v.([]interface{})) + } + + if v, ok := d.GetOk("tickerplant_log_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + in.TickerplantLogConfiguration = expandTickerplantLogConfiguration(v.([]interface{})) + } + out, err := conn.CreateKxCluster(ctx, in) if err != nil { return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxCluster, d.Get("name").(string), err) @@ -507,6 +588,14 @@ func resourceKxClusterRead(ctx context.Context, d *schema.ResourceData, meta int return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err) } + if err := d.Set("scaling_group_configuration", flattenScalingGroupConfiguration(out.ScalingGroupConfiguration)); err != nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err) + } + + if err := d.Set("tickerplant_log_configuration", flattenTickerplantLogConfiguration(out.TickerplantLogConfiguration)); err != nil { + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err) + } + // compose cluster ARN using environment ARN parts, err := flex.ExpandResourceId(d.Id(), kxUserIDPartCount, false) if err != nil { @@ -767,6 +856,38 @@ func expandAutoScalingConfiguration(tfList []interface{}) *types.AutoScalingConf return a } +func expandScalingGroupConfiguration(tfList []interface{}) *types.KxScalingGroupConfiguration { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]interface{}) + + a := &types.KxScalingGroupConfiguration{} + + if v, ok := tfMap["scaling_group_name"].(string); ok && v != "" { + a.ScalingGroupName = aws.String(v) + } + + if v, ok := tfMap["node_count"].(int); ok && v != 0 { + a.NodeCount = aws.Int32(int32(v)) + } + + if v, ok := tfMap["memory_limit"].(int); ok && v != 0 { + a.MemoryLimit = aws.Int32(int32(v)) + } + + if v, ok := tfMap["cpu"].(float64); ok && v != 0 { + a.Cpu = aws.Float64(v) + } + + if v, ok := tfMap["memory_reservation"].(int); ok && v != 0 { + a.MemoryReservation = aws.Int32(int32(v)) + } + + return a +} + func expandSavedownStorageConfiguration(tfList []interface{}) *types.KxSavedownStorageConfiguration { if len(tfList) == 0 || tfList[0] == nil { return nil @@ -784,6 +905,10 @@ func expandSavedownStorageConfiguration(tfList []interface{}) *types.KxSavedownS a.Size = aws.Int32(int32(v)) } + if v, ok := tfMap["volume_name"].(string); ok && v != "" { + a.VolumeName = aws.String(v) + } + return a } @@ -815,6 +940,22 @@ func expandVPCConfiguration(tfList []interface{}) *types.VpcConfiguration { return a } +func expandTickerplantLogConfiguration(tfList []interface{}) *types.TickerplantLogConfiguration { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]interface{}) + + a := &types.TickerplantLogConfiguration{} + + if v, ok := tfMap["tickerplant_log_volumes"].(*schema.Set); ok && v.Len() > 0 { + a.TickerplantLogVolumes = flex.ExpandStringValueSet(v) + } + + return a +} + func expandCacheStorageConfiguration(tfMap map[string]interface{}) *types.KxCacheStorageConfiguration { if tfMap == nil { return nil @@ -896,6 +1037,10 @@ func expandDatabase(tfMap map[string]interface{}) *types.KxDatabaseConfiguration a.DatabaseName = aws.String(v) } + if v, ok := tfMap["dataview_name"].(string); ok && v != "" { + a.DataviewName = aws.String(v) + } + if v, ok := tfMap["cache_configurations"]; ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { a.CacheConfigurations = expandCacheConfigurations(v.([]interface{})) } @@ -1059,6 +1204,50 @@ func flattenAutoScalingConfiguration(apiObject *types.AutoScalingConfiguration) return []interface{}{m} } +func flattenScalingGroupConfiguration(apiObject *types.KxScalingGroupConfiguration) []interface{} { + if apiObject == nil { + return nil + } + + m := map[string]interface{}{} + + if v := apiObject.ScalingGroupName; v != nil { + m["scaling_group_name"] = aws.ToString(v) + } + + if v := apiObject.NodeCount; v != nil { + m["node_count"] = aws.ToInt32(v) + } + + if v := apiObject.MemoryLimit; v != nil { + m["memory_limit"] = aws.ToInt32(v) + } + + if v := apiObject.Cpu; v != nil { + m["cpu"] = aws.ToFloat64(v) + } + + if v := apiObject.MemoryReservation; v != nil { + m["memory_reservation"] = aws.ToInt32(v) + } + + return []interface{}{m} +} + +func flattenTickerplantLogConfiguration(apiObject *types.TickerplantLogConfiguration) []interface{} { + if apiObject == nil { + return nil + } + + m := map[string]interface{}{} + + if v := apiObject.TickerplantLogVolumes; v != nil { + m["tickerplant_log_volumes"] = v + } + + return []interface{}{m} +} + func flattenSavedownStorageConfiguration(apiObject *types.KxSavedownStorageConfiguration) []interface{} { if apiObject == nil { return nil @@ -1074,6 +1263,10 @@ func flattenSavedownStorageConfiguration(apiObject *types.KxSavedownStorageConfi m["size"] = v } + if v := apiObject.VolumeName; v != nil { + m["volume_name"] = aws.ToString(v) + } + return []interface{}{m} } @@ -1200,6 +1393,10 @@ func flattenDatabase(apiObject *types.KxDatabaseConfiguration) map[string]interf m["database_name"] = aws.ToString(v) } + if v := apiObject.DataviewName; v != nil { + m["dataview_name"] = aws.ToString(v) + } + if v := apiObject.CacheConfigurations; v != nil { m["cache_configurations"] = flattenCacheConfigurations(v) } diff --git a/internal/service/finspace/kx_cluster_test.go b/internal/service/finspace/kx_cluster_test.go index 6fc14e33133..9d69793554a 100644 --- a/internal/service/finspace/kx_cluster_test.go +++ b/internal/service/finspace/kx_cluster_test.go @@ -576,6 +576,134 @@ func TestAccFinSpaceKxCluster_tags(t *testing.T) { }) } +func TestAccFinSpaceKxCluster_ScalingGroup(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var kxcluster finspace.GetKxClusterOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + testAccPreCheckManagedKxLicenseEnabled(t) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxClusterConfig_ScalingGroup(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxClusterExists(ctx, resourceName, &kxcluster), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxClusterStatusRunning)), + ), + }, + }, + }) +} + +func TestAccFinSpaceKxCluster_RDBInScalingGroupWithKxVolume(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var kxcluster finspace.GetKxClusterOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + testAccPreCheckManagedKxLicenseEnabled(t) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxRDBClusterConfigInScalingGroup_withKxVolume(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxClusterExists(ctx, resourceName, &kxcluster), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxClusterStatusRunning)), + ), + }, + }, + }) +} + +func TestAccFinSpaceKxCluster_TPInScalingGroupWithKxVolume(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var kxcluster finspace.GetKxClusterOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + testAccPreCheckManagedKxLicenseEnabled(t) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxTPClusterConfigInScalingGroup_withKxVolume(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxClusterExists(ctx, resourceName, &kxcluster), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxClusterStatusRunning)), + ), + }, + }, + }) +} + +func TestAccFinSpaceKxCluster_InScalingGroupWithKxDataview(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var kxcluster finspace.GetKxClusterOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + testAccPreCheckManagedKxLicenseEnabled(t) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxClusterConfigInScalingGroup_withKxDataview(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxClusterExists(ctx, resourceName, &kxcluster), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxClusterStatusRunning)), + ), + }, + }, + }) +} + func testAccCheckKxClusterDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) @@ -746,6 +874,50 @@ resource "aws_route" "r" { `, rName) } +func testAccKxClusterConfigScalingGroupBase(rName string) string { + return fmt.Sprintf(` +resource "aws_finspace_kx_scaling_group" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + host_type = "kx.sg.4xlarge" +} + `, rName) +} + +func testAccKxClusterConfigKxVolumeBase(rName string) string { + return fmt.Sprintf(` +resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type = "SSD_1000" + size = 1200 + } +} + `, rName) +} + +func testAccKxClusterConfigKxDataviewBase(rName string) string { + return fmt.Sprintf(` +resource "aws_finspace_kx_database" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id +} + +resource "aws_finspace_kx_dataview" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + database_name = aws_finspace_kx_database.test.name + auto_update = true + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] +} +`, rName) +} func testAccKxClusterConfig_basic(rName string) string { return acctest.ConfigCompose( testAccKxClusterConfigBase(rName), @@ -772,6 +944,149 @@ resource "aws_finspace_kx_cluster" "test" { `, rName)) } +func testAccKxClusterConfig_ScalingGroup(rName string) string { + return acctest.ConfigCompose( + testAccKxClusterConfigBase(rName), + testAccKxClusterConfigScalingGroupBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_cluster" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + type = "HDB" + release_label = "1.0" + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + vpc_configuration { + vpc_id = aws_vpc.test.id + security_group_ids = [aws_security_group.test.id] + subnet_ids = [aws_subnet.test.id] + ip_address_type = "IP_V4" + } + scaling_group_configuration { + scaling_group_name = aws_finspace_kx_scaling_group.test.name + memory_limit = 200 + memory_reservation = 100 + node_count = 1 + cpu = 0.5 + } +} +`, rName)) +} + +func testAccKxRDBClusterConfigInScalingGroup_withKxVolume(rName string) string { + return acctest.ConfigCompose( + testAccKxClusterConfigBase(rName), + testAccKxClusterConfigKxVolumeBase(rName), + testAccKxClusterConfigScalingGroupBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_database" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id +} + +resource "aws_finspace_kx_cluster" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + type = "RDB" + release_label = "1.0" + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + vpc_configuration { + vpc_id = aws_vpc.test.id + security_group_ids = [aws_security_group.test.id] + subnet_ids = [aws_subnet.test.id] + ip_address_type = "IP_V4" + } + scaling_group_configuration { + scaling_group_name = aws_finspace_kx_scaling_group.test.name + memory_limit = 200 + memory_reservation = 100 + node_count = 1 + cpu = 0.5 + } + database { + database_name = aws_finspace_kx_database.test.name + } + savedown_storage_configuration { + volume_name = aws_finspace_kx_volume.test.name + } +} +`, rName)) +} + +func testAccKxTPClusterConfigInScalingGroup_withKxVolume(rName string) string { + return acctest.ConfigCompose( + testAccKxClusterConfigBase(rName), + testAccKxClusterConfigKxVolumeBase(rName), + testAccKxClusterConfigScalingGroupBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_cluster" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + type = "TICKERPLANT" + release_label = "1.0" + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + vpc_configuration { + vpc_id = aws_vpc.test.id + security_group_ids = [aws_security_group.test.id] + subnet_ids = [aws_subnet.test.id] + ip_address_type = "IP_V4" + } + scaling_group_configuration { + scaling_group_name = aws_finspace_kx_scaling_group.test.name + memory_limit = 200 + memory_reservation = 100 + node_count = 1 + cpu = 0.5 + } + tickerplant_log_configuration { + tickerplant_log_volumes = [aws_finspace_kx_volume.test.name] + } +} +`, rName)) +} + +func testAccKxClusterConfigInScalingGroup_withKxDataview(rName string) string { + return acctest.ConfigCompose( + testAccKxClusterConfigBase(rName), + testAccKxClusterConfigScalingGroupBase(rName), + testAccKxClusterConfigKxDataviewBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_cluster" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + type = "HDB" + release_label = "1.0" + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + vpc_configuration { + vpc_id = aws_vpc.test.id + security_group_ids = [aws_security_group.test.id] + subnet_ids = [aws_subnet.test.id] + ip_address_type = "IP_V4" + } + + scaling_group_configuration { + scaling_group_name = aws_finspace_kx_scaling_group.test.name + memory_limit = 200 + memory_reservation = 100 + node_count = 1 + cpu = 0.5 + } + + database { + database_name = aws_finspace_kx_database.test.name + dataview_name = aws_finspace_kx_dataview.test.name + } + + lifecycle { + ignore_changes = [database] + } +} +`, rName)) +} + func testAccKxClusterConfig_description(rName, description string) string { return acctest.ConfigCompose( testAccKxClusterConfigBase(rName), diff --git a/website/docs/r/finspace_kx_cluster.html.markdown b/website/docs/r/finspace_kx_cluster.html.markdown index 52ed4105d4a..13b06c8e474 100644 --- a/website/docs/r/finspace_kx_cluster.html.markdown +++ b/website/docs/r/finspace_kx_cluster.html.markdown @@ -92,6 +92,8 @@ The following arguments are optional: * `execution_role` - (Optional) An IAM role that defines a set of permissions associated with a cluster. These permissions are assumed when a cluster attempts to access another cluster. * `initialization_script` - (Optional) Path to Q program that will be run at launch of a cluster. This is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q. * `savedown_storage_configuration` - (Optional) Size and type of the temporary storage that is used to hold data during the savedown process. This parameter is required when you choose `type` as RDB. All the data written to this storage space is lost when the cluster node is restarted. See [savedown_storage_configuration](#savedown_storage_configuration). +* `scaling_group_configuration` - (Optional) The structure that stores the configuration details of a scaling group. +* `tickerplant_log_configuration` - A configuration to store Tickerplant logs. It consists of a list of volumes that will be mounted to your cluster. For the cluster type Tickerplant , the location of the TP volume on the cluster will be available by using the global variable .aws.tp_log_path. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### auto_scaling_configuration @@ -149,6 +151,7 @@ The database block supports the following arguments: * `database_name` - (Required) Name of the KX database. * `cache_configurations` - (Optional) Configuration details for the disk cache to increase performance reading from a KX database mounted to the cluster. See [cache_configurations](#cache_configurations). * `changeset_id` - (Optional) A unique identifier of the changeset that is associated with the cluster. +* `dataview_name` - (Optional) The name of the dataview to be used for caching historical data on disk. You cannot update to a different dataview name once a cluster is created. Use `lifecycle` [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) for database to prevent any undesirable behaviors. #### cache_configurations @@ -161,9 +164,10 @@ The cache_configuration block supports the following arguments: The savedown_storage_configuration block supports the following arguments: -* `type` - (Required) Type of writeable storage space for temporarily storing your savedown data. The valid values are: +* `type` - (Optional) Type of writeable storage space for temporarily storing your savedown data. The valid values are: * SDS01 - This type represents 3000 IOPS and io2 ebs volume type. -* `size` - (Required) Size of temporary storage in gigabytes. Must be between 10 and 16000. +* `size` - (Optional) Size of temporary storage in gigabytes. Must be between 10 and 16000. +* `volume_name` - (Optional) The name of the kdb volume that you want to use as writeable save-down storage for clusters. ### vpc_configuration @@ -174,6 +178,20 @@ The vpc_configuration block supports the following arguments: * `subnet_ids `- (Required) Identifier of the subnet that the Privatelink VPC endpoint uses to connect to the cluster. * `ip_address_type` - (Required) IP address type for cluster network configuration parameters. The following type is available: IP_V4 - IP address version 4. +### scaling_group_configuration + +* `scaling_group_name` - (Required) A unique identifier for the kdb scaling group. +* `memory_reservation` - (Required) A reservation of the minimum amount of memory that should be available on the scaling group for a kdb cluster to be successfully placed in a scaling group. +* `node_count` - (Required) The number of kdb cluster nodes. +* `cpu` - The number of vCPUs that you want to reserve for each node of this kdb cluster on the scaling group host. +* `memory_limit` - An optional hard limit on the amount of memory a kdb cluster can use. + +### tickerplant_log_configuration + +The tickerplant_log_configuration block supports the following arguments: + +* tickerplant_log_volumes - (Required) The names of the volumes for tickerplant logs. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: